From 4c60248a6dccb674059641816161fb9b38ea1718 Mon Sep 17 00:00:00 2001 From: dvorst <87502756+dvorst@users.noreply.github.com> Date: Tue, 22 Aug 2023 23:14:12 +0200 Subject: [PATCH 0001/1018] DOC: add description of dtype b1 in arrays.dtypes.rst Closes #23366 --- doc/source/reference/arrays.dtypes.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index 3f2b6d9d0e4c..2f317cc01fc8 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -230,12 +230,12 @@ One-character strings Array-protocol type strings (see :ref:`arrays.interface`) The first character specifies the kind of data and the remaining characters specify the number of bytes per item, except for Unicode, - where it is interpreted as the number of characters. The item size - must correspond to an existing type, or an error will be raised. The - supported kinds are + where it is interpreted as the number of characters, and except ``b1`` + which represents boolean. The item size must correspond to an existing + type, or an error will be raised. The supported kinds are - ================ ======================== - ``'?'`` boolean + ================= ======================== + ``'?'``, ``'b1'`` boolean ``'b'`` (signed) byte ``'B'`` unsigned byte ``'i'`` (signed) integer @@ -248,7 +248,7 @@ Array-protocol type strings (see :ref:`arrays.interface`) ``'S'``, ``'a'`` zero-terminated bytes (not recommended) ``'U'`` Unicode string ``'V'`` raw data (:class:`void`) - ================ ======================== + ================= ======================== .. admonition:: Example From 0d0709421a497d49e3c1cf0a6d332c5d432b4ef6 Mon Sep 17 00:00:00 2001 From: UV Date: Sat, 11 Jan 2025 08:57:00 +0530 Subject: [PATCH 0002/1018] DOC: Fix ambiguity in polyfit description --- numpy/lib/_polynomial_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index 420e357eb354..d919373ea1f4 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -457,7 +457,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): A summary of the differences can be found in the :doc:`transition guide `. - Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + Fit a polynomial ``f(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` to points `(x, y)`. Returns a vector of coefficients `p` that minimises the squared error in the order `deg`, `deg-1`, ... `0`. From 1c577f4d5744fa49bc0b784e066860ae82cfb4de Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 19 Apr 2025 20:33:25 +0200 Subject: [PATCH 0003/1018] Fix NPY_FINLINE macro for C++ class methods Remove 'static' qualifier in C++ mode to allow NPY_FINLINE usage within class methods. --- numpy/_core/include/numpy/npy_common.h | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index 79ad8ad78cb2..96b6ce67bf64 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -98,11 +98,23 @@ #endif #ifdef _MSC_VER - #define NPY_FINLINE static __forceinline + #ifdef __cplusplus + #define NPY_FINLINE __forceinline + #else + #define NPY_FINLINE static __forceinline + #endif #elif defined(__GNUC__) - #define NPY_FINLINE static inline __attribute__((always_inline)) + #ifdef __cplusplus + #define NPY_FINLINE inline __attribute__((always_inline)) + #else + #define NPY_FINLINE static inline __attribute__((always_inline)) + #endif #else - #define NPY_FINLINE static + #ifdef __cplusplus + #define NPY_FINLINE inline + #else + #define NPY_FINLINE static NPY_INLINE + #endif #endif #if defined(_MSC_VER) From c952b9a82a120934793efc4e30ffca3858d04397 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 24 Nov 2023 11:01:14 +0200 Subject: [PATCH 0004/1018] ENH: Enable native half-precision scalar conversion operations on ARM --- numpy/_core/src/common/half.hpp | 81 ++++++--------------------- numpy/_core/src/npymath/halffloat.cpp | 28 ++------- 2 files changed, 20 insertions(+), 89 deletions(-) diff --git a/numpy/_core/src/common/half.hpp b/numpy/_core/src/common/half.hpp index 484750ad84cd..14dabbe79d7f 100644 --- a/numpy/_core/src/common/half.hpp +++ b/numpy/_core/src/common/half.hpp @@ -9,8 +9,6 @@ // TODO(@seiko2plus): // - covers half-precision operations that being supported by numpy/halffloat.h // - add support for arithmetic operations -// - enables __fp16 causes massive FP exceptions on aarch64, -// needs a deep investigation namespace np { @@ -19,42 +17,19 @@ namespace np { /// Provides a type that implements 16-bit floating point (half-precision). /// This type is ensured to be 16-bit size. -#if 1 // ndef __ARM_FP16_FORMAT_IEEE class Half final { public: - /// Whether `Half` has a full native HW support. - static constexpr bool kNative = false; - /// Whether `Half` has a native HW support for single/double conversion. - template - static constexpr bool kNativeConversion = ( - ( - std::is_same_v && - #if defined(NPY_HAVE_FP16) || defined(NPY_HAVE_VSX3) - true - #else - false - #endif - ) || ( - std::is_same_v && - #if defined(NPY_HAVE_AVX512FP16) || (defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE)) - true - #else - false - #endif - ) - ); - /// Default constructor. initialize nothing. Half() = default; /// Construct from float /// If there are no hardware optimization available, rounding will always /// be set to ties to even. - explicit Half(float f) + NPY_FINLINE explicit Half(float f) { #if defined(NPY_HAVE_FP16) __m128 mf = _mm_load_ss(&f); - bits_ = static_cast(_mm_cvtsi128_si32(_mm_cvtps_ph(mf, _MM_FROUND_TO_NEAREST_INT))); + bits_ = _mm_extract_epi16(_mm_cvtps_ph(mf, _MM_FROUND_TO_NEAREST_INT), 0); #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX_ASM) __vector float vf32 = vec_splats(f); __vector unsigned short vf16; @@ -64,6 +39,9 @@ class Half final { #else bits_ = vec_extract(vf16, 0); #endif + #elif defined(__ARM_FP16_FORMAT_IEEE) + __fp16 f16 = __fp16(f); + bits_ = BitCast(f16); #else bits_ = half_private::FromFloatBits(BitCast(f)); #endif @@ -72,20 +50,23 @@ class Half final { /// Construct from double. /// If there are no hardware optimization available, rounding will always /// be set to ties to even. - explicit Half(double f) + NPY_FINLINE explicit Half(double f) { #if defined(NPY_HAVE_AVX512FP16) __m128d md = _mm_load_sd(&f); - bits_ = static_cast(_mm_cvtsi128_si32(_mm_castph_si128(_mm_cvtpd_ph(md)))); + bits_ = _mm_extract_epi16(_mm_castph_si128(_mm_cvtpd_ph(md)), 0); #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) __asm__ __volatile__ ("xscvdphp %x0,%x1" : "=wa" (bits_) : "wa" (f)); + #elif defined(__ARM_FP16_FORMAT_IEEE) + __fp16 f16 = __fp16(f); + bits_ = BitCast(f16); #else bits_ = half_private::FromDoubleBits(BitCast(f)); #endif } /// Cast to float - explicit operator float() const + NPY_FINLINE explicit operator float() const { #if defined(NPY_HAVE_FP16) float ret; @@ -99,13 +80,15 @@ class Half final { : "=wa"(vf32) : "wa"(vec_splats(bits_))); return vec_extract(vf32, 0); + #elif defined(__ARM_FP16_FORMAT_IEEE) + return float(BitCast<__fp16>(bits_)); #else return BitCast(half_private::ToFloatBits(bits_)); #endif } /// Cast to double - explicit operator double() const + NPY_FINLINE explicit operator double() const { #if defined(NPY_HAVE_AVX512FP16) double ret; @@ -117,6 +100,8 @@ class Half final { : "=wa"(f64) : "wa"(bits_)); return f64; + #elif defined(__ARM_FP16_FORMAT_IEEE) + return double(BitCast<__fp16>(bits_)); #else return BitCast(half_private::ToDoubleBits(bits_)); #endif @@ -223,40 +208,6 @@ class Half final { private: uint16_t bits_; }; -#else // __ARM_FP16_FORMAT_IEEE -class Half final { - public: - static constexpr bool kNative = true; - template - static constexpr bool kNativeConversion = ( - std::is_same_v || std::is_same_v - ); - Half() = default; - constexpr Half(__fp16 h) : half_(h) - {} - constexpr operator __fp16() const - { return half_; } - static Half FromBits(uint16_t bits) - { - Half h; - h.half_ = BitCast<__fp16>(bits); - return h; - } - uint16_t Bits() const - { return BitCast(half_); } - constexpr bool Less(Half r) const - { return half_ < r.half_; } - constexpr bool LessEqual(Half r) const - { return half_ <= r.half_; } - constexpr bool Equal(Half r) const - { return half_ == r.half_; } - constexpr bool IsNaN() const - { return half_ != half_; } - - private: - __fp16 half_; -}; -#endif // __ARM_FP16_FORMAT_IEEE /// @} cpp_core_types diff --git a/numpy/_core/src/npymath/halffloat.cpp b/numpy/_core/src/npymath/halffloat.cpp index aa582c1b9517..9289a659f5f5 100644 --- a/numpy/_core/src/npymath/halffloat.cpp +++ b/numpy/_core/src/npymath/halffloat.cpp @@ -198,41 +198,21 @@ npy_half npy_half_divmod(npy_half h1, npy_half h2, npy_half *modulus) npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f) { - if constexpr (Half::kNativeConversion) { - return BitCast(Half(BitCast(f))); - } - else { - return half_private::FromFloatBits(f); - } + return BitCast(Half(BitCast(f))); } npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d) { - if constexpr (Half::kNativeConversion) { - return BitCast(Half(BitCast(d))); - } - else { - return half_private::FromDoubleBits(d); - } + return BitCast(Half(BitCast(d))); } npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h) { - if constexpr (Half::kNativeConversion) { - return BitCast(static_cast(Half::FromBits(h))); - } - else { - return half_private::ToFloatBits(h); - } + return BitCast(static_cast(Half::FromBits(h))); } npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h) { - if constexpr (Half::kNativeConversion) { - return BitCast(static_cast(Half::FromBits(h))); - } - else { - return half_private::ToDoubleBits(h); - } + return BitCast(static_cast(Half::FromBits(h))); } From 94039f36c381a7ed214b1f95890c7a811b11eba2 Mon Sep 17 00:00:00 2001 From: Marco Barbosa Date: Thu, 15 May 2025 11:01:41 -0300 Subject: [PATCH 0005/1018] DOC: improves np.fromfile file description (#28840) --- numpy/_core/_add_newdocs.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 65a29476a075..ea851e16e751 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1528,7 +1528,9 @@ Parameters ---------- file : file or str or Path - Open file object or filename. + An open file object, a string containing the filename, or a Path object. + When reading from a file object it must support random access + (i.e. it must have tell and seek methods). dtype : data-type Data type of the returned array. For binary files, it is used to determine the size and byte-order From db5535a0a565b6847a9054b40c8941dd46ec75de Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 16 May 2025 12:39:17 +0200 Subject: [PATCH 0006/1018] MNT: constant string arrays instead of pointers in C --- numpy/_core/src/common/npy_cpu_features.c | 2 +- numpy/_core/src/multiarray/compiled_base.c | 6 +++--- numpy/_core/src/multiarray/stringdtype/casts.cpp | 16 ++++++++-------- .../src/multiarray/stringdtype/static_string.c | 2 +- numpy/f2py/rules.py | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 8810182812e5..e1fd8404d3bf 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -277,7 +277,7 @@ npy__cpu_check_env(int disable, const char *env) { char *notsupp_cur = ¬supp[0]; //comma and space including (htab, vtab, CR, LF, FF) - const char *delim = ", \t\v\r\n\f"; + const char delim[] = ", \t\v\r\n\f"; char *feature = strtok(features, delim); while (feature) { if (npy__cpu_baseline_fid(feature) > 0){ diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 26b898fa1479..86b60cf75944 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -920,11 +920,11 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_t return NULL; } -static const char *EMPTY_SEQUENCE_ERR_MSG = "indices must be integral: the provided " \ +static const char EMPTY_SEQUENCE_ERR_MSG[] = "indices must be integral: the provided " \ "empty sequence was inferred as float. Wrap it with " \ "'np.array(indices, dtype=np.intp)'"; -static const char *NON_INTEGRAL_ERROR_MSG = "only int indices permitted"; +static const char NON_INTEGRAL_ERROR_MSG[] = "only int indices permitted"; /* Convert obj to an ndarray with integer dtype or fail */ static PyArrayObject * @@ -1465,7 +1465,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t PyObject *obj; PyObject *str; const char *docstr; - static char *msg = "already has a different docstring"; + static const char msg[] = "already has a different docstring"; /* Don't add docstrings */ #if PY_VERSION_HEX > 0x030b0000 diff --git a/numpy/_core/src/multiarray/stringdtype/casts.cpp b/numpy/_core/src/multiarray/stringdtype/casts.cpp index f66727501f97..c69818173e24 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.cpp +++ b/numpy/_core/src/multiarray/stringdtype/casts.cpp @@ -605,7 +605,7 @@ load_non_nullable_string(char *in, int has_null, const npy_static_string *defaul const npy_packed_static_string *ps = (npy_packed_static_string *)in; int isnull = NpyString_load(allocator, ps, string_to_load); if (isnull == -1) { - const char *msg = "Failed to load string for conversion to a non-nullable type"; + const char msg[] = "Failed to load string for conversion to a non-nullable type"; if (has_gil) { PyErr_SetString(PyExc_MemoryError, msg); @@ -617,7 +617,7 @@ load_non_nullable_string(char *in, int has_null, const npy_static_string *defaul } else if (isnull) { if (has_null) { - const char *msg = "Arrays with missing data cannot be converted to a non-nullable type"; + const char msg[] = "Arrays with missing data cannot be converted to a non-nullable type"; if (has_gil) { PyErr_SetString(PyExc_ValueError, msg); @@ -821,8 +821,8 @@ static PyType_Slot s2int_slots[] = { static const char * make_s2type_name(NPY_TYPES typenum) { - const char *prefix = "cast_StringDType_to_"; - size_t plen = strlen(prefix); + const char prefix[] = "cast_StringDType_to_"; + size_t plen = sizeof(prefix)/sizeof(char) - 1; const char *type_name = typenum_to_cstr(typenum); size_t nlen = strlen(type_name); @@ -842,14 +842,14 @@ make_s2type_name(NPY_TYPES typenum) { static const char * make_type2s_name(NPY_TYPES typenum) { - const char *prefix = "cast_"; - size_t plen = strlen(prefix); + const char prefix[] = "cast_"; + size_t plen = sizeof(prefix)/sizeof(char) - 1; const char *type_name = typenum_to_cstr(typenum); size_t nlen = strlen(type_name); - const char *suffix = "_to_StringDType"; - size_t slen = strlen(suffix); + const char suffix[] = "_to_StringDType"; + size_t slen = sizeof(prefix)/sizeof(char) - 1; char *buf = (char *)PyMem_RawCalloc(sizeof(char), plen + nlen + slen + 1); diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index 02ab7d246a7a..1c29bbb67f7e 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -404,7 +404,7 @@ NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) } } -static const char * const EMPTY_STRING = ""; +static const char EMPTY_STRING[] = ""; /*NUMPY_API * Extract the packed contents of *packed_string* into *unpacked_string*. diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 4122f0a49f17..c10d2afdd097 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -1154,7 +1154,7 @@ 'frompyobj': [ ' #setdims#;', ' capi_#varname#_intent |= #intent#;', - (' const char * capi_errmess = "#modulename#.#pyname#:' + (' const char capi_errmess[] = "#modulename#.#pyname#:' ' failed to create array from the #nth# `#varname#`";'), {isintent_hide: ' capi_#varname#_as_array = ndarray_from_pyobj(' From a5401fd0599aa7cb7044c6983d0d7df17d4ed06f Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 16 May 2025 12:51:22 +0200 Subject: [PATCH 0007/1018] MNT: alternative handling of trailing null character Trying to avoid this compiler warning: ../numpy/_core/src/multiarray/stringdtype/casts.cpp:860:12: warning: 'char* strncat(char*, const char*, size_t)' specified bound 15 equals source length [-Wstringop-overflow=] 860 | strncat(buf, suffix, slen); | ~~~~~~~^~~~~~~~~~~~~~~~~~~ --- .../src/multiarray/stringdtype/casts.cpp | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/casts.cpp b/numpy/_core/src/multiarray/stringdtype/casts.cpp index c69818173e24..3632e359c9a9 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.cpp +++ b/numpy/_core/src/multiarray/stringdtype/casts.cpp @@ -833,10 +833,12 @@ make_s2type_name(NPY_TYPES typenum) { return NULL; } - // memcpy instead of strcpy to avoid stringop-truncation warning, since - // we are not including the trailing null character - memcpy(buf, prefix, plen); - strncat(buf, type_name, nlen); + // memcpy instead of strcpy/strncat to avoid stringop-truncation warning, + // since we are not including the trailing null character + char *p = buf; + memcpy(p, prefix, plen); + p += plen; + memcpy(p, type_name, nlen); return buf; } @@ -853,11 +855,14 @@ make_type2s_name(NPY_TYPES typenum) { char *buf = (char *)PyMem_RawCalloc(sizeof(char), plen + nlen + slen + 1); - // memcpy instead of strcpy to avoid stringop-truncation warning, since - // we are not including the trailing null character - memcpy(buf, prefix, plen); - strncat(buf, type_name, nlen); - strncat(buf, suffix, slen); + // memcpy instead of strcpy/strncat to avoid stringop-truncation warning, + // since we are not including the trailing null character + char *p = buf; + memcpy(p, prefix, plen); + p += plen; + memcpy(p, type_name, nlen); + p += nlen; + memcpy(p, suffix, slen); return buf; } From 22132f1522f8ac3d278a3773fe071b699df3fc31 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 16 May 2025 13:23:05 +0200 Subject: [PATCH 0008/1018] MNT: more const'ness for arrays of string literals in C --- numpy/_core/src/common/npy_cpu_features.c | 2 +- numpy/_core/src/umath/string_ufuncs.cpp | 12 ++++++------ numpy/_core/src/umath/stringdtype_ufuncs.cpp | 10 +++++----- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index e1fd8404d3bf..f15f636cdb1e 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -246,7 +246,7 @@ npy__cpu_validate_baseline(void) static int npy__cpu_check_env(int disable, const char *env) { - static const char *names[] = { + static const char *const names[] = { "enable", "disable", "NPY_ENABLE_CPU_FEATURES", "NPY_DISABLE_CPU_FEATURES", "During parsing environment variable: 'NPY_ENABLE_CPU_FEATURES':\n", diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 5b4b67cda625..ffd757815364 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -1521,7 +1521,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = NPY_OBJECT; dtypes[1] = NPY_BOOL; - const char *unary_buffer_method_names[] = { + const char *const unary_buffer_method_names[] = { "isalpha", "isalnum", "isdigit", "isspace", "islower", "isupper", "istitle", "isdecimal", "isnumeric", }; @@ -1635,7 +1635,7 @@ init_string_ufuncs(PyObject *umath) dtypes[2] = dtypes[3] = NPY_INT64; dtypes[4] = NPY_BOOL; - const char *startswith_endswith_names[] = { + const char *const startswith_endswith_names[] = { "startswith", "endswith" }; @@ -1664,7 +1664,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = NPY_OBJECT; - const char *strip_whitespace_names[] = { + const char *const strip_whitespace_names[] = { "_lstrip_whitespace", "_rstrip_whitespace", "_strip_whitespace" }; @@ -1691,7 +1691,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = dtypes[2] = NPY_OBJECT; - const char *strip_chars_names[] = { + const char *const strip_chars_names[] = { "_lstrip_chars", "_rstrip_chars", "_strip_chars" }; @@ -1750,7 +1750,7 @@ init_string_ufuncs(PyObject *umath) dtypes[1] = NPY_INT64; - const char *center_ljust_rjust_names[] = { + const char *const center_ljust_rjust_names[] = { "_center", "_ljust", "_rjust" }; @@ -1827,7 +1827,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = dtypes[3] = dtypes[4] = dtypes[5] = NPY_OBJECT; dtypes[2] = NPY_INT64; - const char *partition_names[] = {"_partition_index", "_rpartition_index"}; + const char *const partition_names[] = {"_partition_index", "_rpartition_index"}; static STARTPOSITION partition_startpositions[] = { STARTPOSITION::FRONT, STARTPOSITION::BACK diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 37ae0a39a349..adcbfd3b7480 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -2605,7 +2605,7 @@ add_object_and_unicode_promoters(PyObject *umath, const char* ufunc_name, NPY_NO_EXPORT int init_stringdtype_ufuncs(PyObject *umath) { - static const char *comparison_ufunc_names[6] = { + static const char *const comparison_ufunc_names[6] = { "equal", "not_equal", "less", "less_equal", "greater_equal", "greater", }; @@ -2654,7 +2654,7 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - const char *unary_loop_names[] = { + const char *const unary_loop_names[] = { "isalpha", "isdecimal", "isdigit", "isnumeric", "isspace", "isalnum", "istitle", "isupper", "islower", }; @@ -2874,7 +2874,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType, &PyArray_StringDType }; - const char *strip_whitespace_names[] = { + const char *const strip_whitespace_names[] = { "_lstrip_whitespace", "_rstrip_whitespace", "_strip_whitespace", }; @@ -2898,7 +2898,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType, &PyArray_StringDType, &PyArray_StringDType }; - const char *strip_chars_names[] = { + const char *const strip_chars_names[] = { "_lstrip_chars", "_rstrip_chars", "_strip_chars", }; @@ -3082,7 +3082,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType }; - const char *partition_names[] = {"_partition", "_rpartition"}; + const char *const partition_names[] = {"_partition", "_rpartition"}; static STARTPOSITION partition_startpositions[] = { STARTPOSITION::FRONT, STARTPOSITION::BACK From 92f2622e376e05620fd4e63fb453bc5d1e5b160a Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 1 Apr 2025 22:58:10 +0200 Subject: [PATCH 0009/1018] ENH, SIMD: Initial implementation of Highway wrapper A thin wrapper over Google's Highway SIMD library to simplify its interface. This commit provides the implementation of that wrapper, consisting of: - simd.hpp: Main header defining the SIMD namespaces and configuration - simd.inc.hpp: Template header included multiple times with different namespaces The wrapper eliminates Highway's class tags by: - Using lane types directly which can be deduced from arguments - Leveraging namespaces (np::simd and np::simd128) for different register widths A README is included to guide usage and document design decisions. --- numpy/_core/src/common/simd/README.md | 258 +++++++++++++++++++++++ numpy/_core/src/common/simd/simd.hpp | 80 +++++++ numpy/_core/src/common/simd/simd.inc.hpp | 102 +++++++++ 3 files changed, 440 insertions(+) create mode 100644 numpy/_core/src/common/simd/README.md create mode 100644 numpy/_core/src/common/simd/simd.hpp create mode 100644 numpy/_core/src/common/simd/simd.inc.hpp diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md new file mode 100644 index 000000000000..ac58c4a6bd94 --- /dev/null +++ b/numpy/_core/src/common/simd/README.md @@ -0,0 +1,258 @@ +# NumPy SIMD Wrapper for Highway + +This directory contains a lightweight C++ wrapper over Google's [Highway](https://github.com/google/highway) SIMD library, designed specifically for NumPy's needs. + +> **Note**: This directory also contains the C interface of universal intrinsics (under `simd.h`) which is no longer supported. The Highway wrapper described in this document should be used instead for all new SIMD code. + +## Overview + +The wrapper simplifies Highway's SIMD interface by eliminating class tags and using lane types directly, which can be deduced from arguments in most cases. This design makes the SIMD code more intuitive and easier to maintain while still leveraging Highway generic intrinsics. + +## Architecture + +The wrapper consists of two main headers: + +1. `simd.hpp`: The main header that defines namespaces and includes configuration macros +2. `simd.inc.hpp`: Implementation details included by `simd.hpp` multiple times for different namespaces + +Additionally, this directory contains legacy C interface files for universal intrinsics (`simd.h` and related files) which are deprecated and should not be used for new code. All new SIMD code should use the Highway wrapper. + + +## Usage + +### Basic Usage + +```cpp +#include "simd/simd.hpp" + +// Use np::simd for maximum width SIMD operations +using namespace np::simd; +float *data = /* ... */; +Vec v = LoadU(data); +v = Add(v, v); +StoreU(v, data); + +// Use np::simd128 for fixed 128-bit SIMD operations +using namespace np::simd128; +Vec v128 = LoadU(data); +v128 = Add(v128, v128); +StoreU(v128, data); +``` + +### Checking for SIMD Support + +```cpp +#include "simd/simd.hpp" + +// Check if SIMD is enabled +#if NPY_SIMDX + // SIMD code +#else + // Scalar fallback code +#endif + +// Check for float64 support +#if NPY_SIMDX_F64 + // Use float64 SIMD operations +#endif + +// Check for FMA support +#if NPY_SIMDX_FMA + // Use FMA operations +#endif +``` + +## Type Support and Constraints + +The wrapper provides type constraints to help with SFINAE (Substitution Failure Is Not An Error) and compile-time type checking: + +- `kSupportLane`: Determines whether the specified lane type is supported by the SIMD extension. + ```cpp + // Base template - always defined, even when SIMD is not enabled (for SFINAE) + template + constexpr bool kSupportLane = NPY_SIMDX != 0; + template <> + constexpr bool kSupportLane = NPY_SIMDX_F64 != 0; + ``` + + +```cpp +#include "simd/simd.hpp" + +// Check if float64 operations are supported +if constexpr (np::simd::kSupportLane) { + // Use float64 operations +} +``` + +These constraints allow for compile-time checking of which lane types are supported, which can be used in SFINAE contexts to enable or disable functions based on type support. + +## Available Operations + +The wrapper provides the following common operations that are used in NumPy: + +- Vector creation operations: + - `Zero`: Returns a vector with all lanes set to zero + - `Set`: Returns a vector with all lanes set to the given value + - `Undefined`: Returns an uninitialized vector + +- Memory operations: + - `LoadU`: Unaligned load of a vector from memory + - `StoreU`: Unaligned store of a vector to memory + +- Vector information: + - `Lanes`: Returns the number of vector lanes based on the lane type + +- Type conversion: + - `BitCast`: Reinterprets a vector to a different type without modifying the underlying data + - `VecFromMask`: Converts a mask to a vector + +- Comparison operations: + - `Eq`: Element-wise equality comparison + - `Le`: Element-wise less than or equal comparison + - `Lt`: Element-wise less than comparison + - `Gt`: Element-wise greater than comparison + - `Ge`: Element-wise greater than or equal comparison + +- Arithmetic operations: + - `Add`: Element-wise addition + - `Sub`: Element-wise subtraction + - `Mul`: Element-wise multiplication + - `Div`: Element-wise division + - `Min`: Element-wise minimum + - `Max`: Element-wise maximum + - `Abs`: Element-wise absolute value + - `Sqrt`: Element-wise square root + +- Logical operations: + - `And`: Bitwise AND + - `Or`: Bitwise OR + - `Xor`: Bitwise XOR + - `AndNot`: Bitwise AND NOT (a & ~b) + +Additional Highway operations can be accessed via the `hn` namespace alias inside the `simd` or `simd128` namespaces. + +## Extending + +To add more operations from Highway: + +1. Import them in the `simd.inc.hpp` file using the `using` directive if they don't require a tag: + ```cpp + // For operations that don't require a tag + using hn::FunctionName; + ``` + +2. Define wrapper functions for intrinsics that require a class tag: + ```cpp + // For operations that require a tag + template + HWY_API ReturnType FunctionName(Args... args) { + return hn::FunctionName(_Tag(), args...); + } + ``` + +3. Add appropriate documentation and SFINAE constraints if needed + + +## Build Configuration + +The SIMD wrapper automatically disables SIMD operations when optimizations are disabled: + +- When `NPY_DISABLE_OPTIMIZATION` is defined, SIMD operations are disabled +- SIMD is enabled only when the Highway target is not scalar (`HWY_TARGET != HWY_SCALAR`) + +## Design Notes + +1. **Why avoid Highway scalar operations?** + - NumPy already provides kernels for scalar operations + - Compilers can better optimize standard library implementations + - Not all Highway intrinsics are fully supported in scalar mode + +2. **Legacy Universal Intrinsics** + - The older universal intrinsics C interface (in `simd.h` and accessible via `NPY_SIMD` macros) is deprecated + - All new SIMD code should use this Highway-based wrapper (accessible via `NPY_SIMDX` macros) + - The legacy code is maintained for compatibility but will eventually be removed + +3. **Feature Detection Constants vs. Highway Constants** + - NumPy-specific constants (`NPY_SIMDX_F16`, `NPY_SIMDX_F64`, `NPY_SIMDX_FMA`) provide additional safety beyond raw Highway constants + - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check hardware capabilities but don't consider NumPy's build configuration + - Our constants combine both checks: + ```cpp + #define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) + ``` + - This ensures SIMD features won't be used when: + - Hardware supports it but NumPy optimization is disabled via meson option: + ``` + option('disable-optimization', type: 'boolean', value: false, + description: 'Disable CPU optimized code (dispatch,simd,unroll...)') + ``` + - Highway target is scalar (`HWY_TARGET == HWY_SCALAR`) + - Using these constants ensures consistent behavior across different compilation settings + - Without this additional layer, code might incorrectly try to use SIMD paths in scalar mode + +4. **Namespace Design** + - `np::simd`: Maximum width SIMD operations (scalable) + - `np::simd128`: Fixed 128-bit SIMD operations + - `hn`: Highway namespace alias (available within the SIMD namespaces) + +5. **Why Namespaces and Why Not Just Use Highway Directly?** + - Highway's design uses class tag types as template parameters (e.g., `Vec>`) when defining vector types + - Many Highway functions require explicitly passing a tag instance as the first parameter + - This class tag-based approach increases verbosity and complexity in user code + - Our wrapper eliminates this by internally managing tags through namespaces, letting users directly use types e.g. `Vec` + - Simple example with raw Highway: + ```cpp + // Highway's approach + float *data = /* ... */; + + namespace hn = hwy::HWY_NAMESPACE; + using namespace hn; + + // Full-width operations + ScalableTag df; // Create a tag instance + Vec v = LoadU(df, data); // LoadU requires a tag instance + StoreU(v, df, data); // StoreU requires a tag instance + + // 128-bit operations + Full128 df128; // Create a 128-bit tag instance + Vec v128 = LoadU(df128, data); // LoadU requires a tag instance + StoreU(v128, df128, data); // StoreU requires a tag instance + ``` + + - Simple example with our wrapper: + ```cpp + // Our wrapper approach + float *data = /* ... */; + + // Full-width operations + using namespace np::simd; + Vec v = LoadU(data); // Full-width vector load + StoreU(v, data); + + // 128-bit operations + using namespace np::simd128; + Vec v128 = LoadU(data); // 128-bit vector load + StoreU(v128, data); + ``` + + - The namespaced approach simplifies code, reduces errors, and provides a more intuitive interface + - It preserves all Highway operations benefits while reducing cognitive overhead + +5. **Why Namespaces Are Essential for This Design?** + - Namespaces allow us to define different internal tag types (`hn::ScalableTag` in `np::simd` vs `hn::Full128` in `np::simd128`) + - This provides a consistent type-based interface (`Vec`) without requiring users to manually create tags + - Enables using the same function names (like `LoadU`) with different implementations based on SIMD width + - Without namespaces, we'd have to either reintroduce tags (defeating the purpose of the wrapper) or create different function names for each variant (e.g., `LoadU` vs `LoadU128`) + +6. **Template Type Parameters** + - `TLane`: The scalar type for each vector lane (e.g., uint8_t, float, double) + + +## Requirements + +- C++17 or later +- Google Highway library + +## License + +Same as NumPy's license diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp new file mode 100644 index 000000000000..698da4adf865 --- /dev/null +++ b/numpy/_core/src/common/simd/simd.hpp @@ -0,0 +1,80 @@ +#ifndef NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ +#define NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ + +/** + * This header provides a thin wrapper over Google's Highway SIMD library. + * + * The wrapper aims to simplify the SIMD interface of Google's Highway by + * get ride of its class tags and use lane types directly which can be deduced + * from the args in most cases. + */ +/** + * Since `NPY_SIMD` is only limited to NumPy C universal intrinsics, + * `NPY_SIMDX` is defined to indicate the SIMD availability for Google's Highway + * C++ code. + * + * Highway SIMD is only available when optimization is enabled. + * When NPY_DISABLE_OPTIMIZATION is defined, SIMD operations are disabled + * and the code falls back to scalar implementations. + */ +#ifndef NPY_DISABLE_OPTIMIZATION +#include + +/** + * We avoid using Highway scalar operations for the following reasons: + * 1. We already provide kernels for scalar operations, so falling back to + * the NumPy implementation is more appropriate. Compilers can often + * optimize these better since they rely on standard libraries. + * 2. Not all Highway intrinsics are fully supported in scalar mode. + * + * Therefore, we only enable SIMD when the Highway target is not scalar. + */ +#define NPY_SIMDX (HWY_TARGET != HWY_SCALAR) + +// Indicates if the SIMD operations are available for float16. +#define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) +// Note: Highway requires SIMD extentions with native float32 support, so we don't need +// to check for it. + +// Indicates if the SIMD operations are available for float64. +#define NPY_SIMDX_F64 (NPY_SIMDX && HWY_HAVE_FLOAT64) + +// Indicates if the SIMD floating operations are natively supports fma. +#define NPY_SIMDX_FMA (NPY_SIMDX && HWY_NATIVE_FMA) + +#else +#define NPY_SIMDX 0 +#define NPY_SIMDX_F16 0 +#define NPY_SIMDX_F64 0 +#define NPY_SIMDX_FMA 0 +#endif + +namespace np { + +/// Represents the max SIMD width supported by the platform. +namespace simd { +#if NPY_SIMDX +/// The highway namespace alias. +/// We can not import all the symbols from the HWY_NAMESPACE because it will +/// conflict with the existing symbols in the numpy namespace. +namespace hn = hwy::HWY_NAMESPACE; +// internaly used by the template header +template +using _Tag = hn::ScalableTag; +#endif +#include "simd.inc.hpp" +} // namespace simd + +/// Represents the 128-bit SIMD width. +namespace simd128 { +#if NPY_SIMDX +namespace hn = hwy::HWY_NAMESPACE; +template +using _Tag = hn::Full128; +#endif +#include "simd.inc.hpp" +} // namespace simd128 + +} // namespace np + +#endif // NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp new file mode 100644 index 000000000000..d052d829720a --- /dev/null +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -0,0 +1,102 @@ +#ifndef NPY_SIMDX +#error "This is not a standalone header. Include simd.hpp instead." +#endif + +// NOTE: This file is included by simd.hpp multiple times with different namespaces +// so avoid including any headers here +// #define NPY_SIMDX 1 // uncomment to enable Highlighting + +/** + * Determines whether the specified lane type is supported by the SIMD extension. + * Always defined as false when SIMD is not enabled, so it can be used in SFINAE. + * + * @tparam TLane The lane type to check for support. + */ +template +constexpr bool kSupportLane = NPY_SIMDX != 0; + +#if NPY_SIMDX +// Define lane type support based on Highway capabilities +template <> +constexpr bool kSupportLane = HWY_HAVE_FLOAT16 != 0; +template <> +constexpr bool kSupportLane = HWY_HAVE_FLOAT64 != 0; + +/// Represents an N-lane vector based on the specified lane type. +/// @tparam TLane The scalar type for each vector lane +template +using Vec = hn::Vec<_Tag>; + +/// Represents a mask vector with boolean values or as a bitmask. +/// @tparam TLane The scalar type the mask corresponds to +template +using Mask = hn::Mask<_Tag>; + +/// Unaligned load of a vector from memory. +template +HWY_API Vec LoadU(const TLane* ptr) { + return hn::LoadU(_Tag(), ptr); +} + +/// Unaligned store of a vector to memory. +template +HWY_API void StoreU(const Vec& a, TLane* ptr) { + hn::StoreU(a, _Tag(), ptr); +} + +/// Returns the number of vector lanes based on the lane type. +template +HWY_API constexpr size_t Lanes(TLane tag = 0) { + return hn::Lanes(_Tag()); +} + +/// Returns an uninitialized N-lane vector. +template +HWY_API Vec Undefined(TLane tag = 0) { + return hn::Undefined(_Tag()); +} + +/// Returns N-lane vector with all lanes equal to zero. +template +HWY_API Vec Zero(TLane tag = 0) { + return hn::Zero(_Tag()); +} + +/// Returns N-lane vector with all lanes equal to the given value of type `TLane`. +template +HWY_API Vec Set(TLane val) { + return hn::Set(_Tag(), val); +} + +/// Converts a mask to a vector based on the specified lane type. +template +HWY_API Vec VecFromMask(const TMask &m) { + return hn::VecFromMask(_Tag(), m); +} + +/// Convert (Reinterpret) an N-lane vector to a different type without modifying the underlying data. +template +HWY_API Vec BitCast(const TVec &v) { + return hn::BitCast(_Tag(), v); +} + +// Import common Highway intrinsics +using hn::Eq; +using hn::Le; +using hn::Lt; +using hn::Gt; +using hn::Ge; +using hn::And; +using hn::Or; +using hn::Xor; +using hn::AndNot; +using hn::Sub; +using hn::Add; +using hn::Mul; +using hn::Div; +using hn::Min; +using hn::Max; +using hn::Abs; +using hn::Sqrt; + +#endif // NPY_SIMDX From 5d48ec32b7e4f17e9c5934f1e029c25f683dc840 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 12 Apr 2025 19:25:27 +0200 Subject: [PATCH 0010/1018] SIMD: Update wrapper with improved docs and type support - Fix hardware/platform terminology in documentation for clarity - Add support for long double in template specializations - Add kMaxLanes constant to expose maximum vector width information - Follows clang formatting style for consistency with NumPy codebase. --- numpy/_core/src/common/simd/README.md | 9 ++- numpy/_core/src/common/simd/simd.inc.hpp | 70 ++++++++++++++++-------- 2 files changed, 54 insertions(+), 25 deletions(-) diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md index ac58c4a6bd94..9a68d1aa1bfc 100644 --- a/numpy/_core/src/common/simd/README.md +++ b/numpy/_core/src/common/simd/README.md @@ -75,6 +75,11 @@ The wrapper provides type constraints to help with SFINAE (Substitution Failure constexpr bool kSupportLane = NPY_SIMDX_F64 != 0; ``` +- `kMaxLanes`: Maximum number of lanes supported by the SIMD extension for the specified lane type. + ```cpp + template + constexpr size_t kMaxLanes = HWY_MAX_LANES_D(_Tag); + ``` ```cpp #include "simd/simd.hpp" @@ -175,13 +180,13 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d 3. **Feature Detection Constants vs. Highway Constants** - NumPy-specific constants (`NPY_SIMDX_F16`, `NPY_SIMDX_F64`, `NPY_SIMDX_FMA`) provide additional safety beyond raw Highway constants - - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check hardware capabilities but don't consider NumPy's build configuration + - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check platform capabilities but don't consider NumPy's build configuration - Our constants combine both checks: ```cpp #define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) ``` - This ensures SIMD features won't be used when: - - Hardware supports it but NumPy optimization is disabled via meson option: + - Platform supports it but NumPy optimization is disabled via meson option: ``` option('disable-optimization', type: 'boolean', value: false, description: 'Disable CPU optimized code (dispatch,simd,unroll...)') diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp index d052d829720a..6d12c9a3caeb 100644 --- a/numpy/_core/src/common/simd/simd.inc.hpp +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -1,10 +1,10 @@ #ifndef NPY_SIMDX #error "This is not a standalone header. Include simd.hpp instead." +#define NPY_SIMDX 1 // Prevent editors from graying out the happy branch #endif // NOTE: This file is included by simd.hpp multiple times with different namespaces // so avoid including any headers here -// #define NPY_SIMDX 1 // uncomment to enable Highlighting /** * Determines whether the specified lane type is supported by the SIMD extension. @@ -21,6 +21,13 @@ template <> constexpr bool kSupportLane = HWY_HAVE_FLOAT16 != 0; template <> constexpr bool kSupportLane = HWY_HAVE_FLOAT64 != 0; +template <> +constexpr bool kSupportLane = + HWY_HAVE_FLOAT64 != 0 && sizeof(long double) == sizeof(double); + +/// Maximum number of lanes supported by the SIMD extension for the specified lane type. +template +constexpr size_t kMaxLanes = HWY_MAX_LANES_D(_Tag); /// Represents an N-lane vector based on the specified lane type. /// @tparam TLane The scalar type for each vector lane @@ -34,69 +41,86 @@ using Mask = hn::Mask<_Tag>; /// Unaligned load of a vector from memory. template -HWY_API Vec LoadU(const TLane* ptr) { +HWY_API Vec +LoadU(const TLane *ptr) +{ return hn::LoadU(_Tag(), ptr); } /// Unaligned store of a vector to memory. template -HWY_API void StoreU(const Vec& a, TLane* ptr) { +HWY_API void +StoreU(const Vec &a, TLane *ptr) +{ hn::StoreU(a, _Tag(), ptr); } /// Returns the number of vector lanes based on the lane type. template -HWY_API constexpr size_t Lanes(TLane tag = 0) { +HWY_API constexpr size_t +Lanes(TLane tag = 0) +{ return hn::Lanes(_Tag()); } /// Returns an uninitialized N-lane vector. template -HWY_API Vec Undefined(TLane tag = 0) { +HWY_API Vec +Undefined(TLane tag = 0) +{ return hn::Undefined(_Tag()); } /// Returns N-lane vector with all lanes equal to zero. template -HWY_API Vec Zero(TLane tag = 0) { +HWY_API Vec +Zero(TLane tag = 0) +{ return hn::Zero(_Tag()); } /// Returns N-lane vector with all lanes equal to the given value of type `TLane`. template -HWY_API Vec Set(TLane val) { +HWY_API Vec +Set(TLane val) +{ return hn::Set(_Tag(), val); } /// Converts a mask to a vector based on the specified lane type. template -HWY_API Vec VecFromMask(const TMask &m) { +HWY_API Vec +VecFromMask(const TMask &m) +{ return hn::VecFromMask(_Tag(), m); } -/// Convert (Reinterpret) an N-lane vector to a different type without modifying the underlying data. +/// Convert (Reinterpret) an N-lane vector to a different type without modifying the +/// underlying data. template -HWY_API Vec BitCast(const TVec &v) { +HWY_API Vec +BitCast(const TVec &v) +{ return hn::BitCast(_Tag(), v); } // Import common Highway intrinsics -using hn::Eq; -using hn::Le; -using hn::Lt; -using hn::Gt; -using hn::Ge; +using hn::Abs; +using hn::Add; using hn::And; -using hn::Or; -using hn::Xor; using hn::AndNot; -using hn::Sub; -using hn::Add; -using hn::Mul; using hn::Div; -using hn::Min; +using hn::Eq; +using hn::Ge; +using hn::Gt; +using hn::Le; +using hn::Lt; using hn::Max; -using hn::Abs; +using hn::Min; +using hn::Mul; +using hn::Or; using hn::Sqrt; +using hn::Sub; +using hn::Xor; -#endif // NPY_SIMDX +#endif // NPY_SIMDX From e099bba5cba528036593321e539c6565a5e69765 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 17 Apr 2025 04:37:59 +0200 Subject: [PATCH 0011/1018] SIMD: Improve isolation and constexpr handling in wrapper - Add anonymous namespace around implementation to ensure each translation unit gets its own constants based on local flags - Use HWY_LANES_CONSTEXPR for Lanes function to ensure proper constexpr evaluation across platforms --- numpy/_core/src/common/simd/simd.inc.hpp | 8 +++++++- numpy/_core/src/umath/loops_trigonometric.dispatch.cpp | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp index 6d12c9a3caeb..64d28bc47118 100644 --- a/numpy/_core/src/common/simd/simd.inc.hpp +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -3,6 +3,10 @@ #define NPY_SIMDX 1 // Prevent editors from graying out the happy branch #endif +// Using anonymous namespace instead of inline to ensure each translation unit +// gets its own copy of constants based on local compilation flags +namespace { + // NOTE: This file is included by simd.hpp multiple times with different namespaces // so avoid including any headers here @@ -57,7 +61,7 @@ StoreU(const Vec &a, TLane *ptr) /// Returns the number of vector lanes based on the lane type. template -HWY_API constexpr size_t +HWY_API HWY_LANES_CONSTEXPR size_t Lanes(TLane tag = 0) { return hn::Lanes(_Tag()); @@ -124,3 +128,5 @@ using hn::Sub; using hn::Xor; #endif // NPY_SIMDX + +} // namespace anonymous diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index ae696db4cd4a..9ce2571a1528 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -3,7 +3,9 @@ #include "loops_utils.h" #include "simd/simd.h" +#include "simd/simd.hpp" #include + namespace hn = hwy::HWY_NAMESPACE; /* From 1fd2076a37e35b0d2f9cf84195117f65637c185d Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 17 Apr 2025 16:13:20 +0200 Subject: [PATCH 0012/1018] Update Highway submodule to latest master --- numpy/_core/src/highway | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 0b696633f9ad..12b325bc1793 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 0b696633f9ad89497dd5532b55eaa01625ad71ca +Subproject commit 12b325bc1793dee68ab2157995a690db859fe9e0 From 80e6a30f3efc30d067d7aa28568ff854572f8a89 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 17 Apr 2025 16:27:30 +0200 Subject: [PATCH 0013/1018] SIMD: Fix compile error by using MaxLanes instead of Lanes for array size Replace hn::Lanes(f64) with hn::MaxLanes(f64) when defining the index array size to fix error C2131: "expression did not evaluate to a constant". This error occurs because Lanes() isn't always constexpr compatible, especially with scalable vector extensions. MaxLanes() provides a compile-time constant value suitable for static array allocation and should be used with non-scalable SIMD extensions when defining fixed-size arrays. --- numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src | 2 +- numpy/_core/src/umath/loops_trigonometric.dispatch.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src b/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src index 8c66229942ee..93d288fbdb2e 100755 --- a/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src +++ b/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src @@ -385,7 +385,7 @@ simd_tanh_f64(const double *src, npy_intp ssrc, double *dst, npy_intp sdst, npy_ vec_f64 b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16; if constexpr(hn::MaxLanes(f64) == 2){ vec_f64 e0e1_0, e0e1_1; - uint64_t index[hn::Lanes(f64)]; + uint64_t index[hn::MaxLanes(f64)]; hn::StoreU(idx, u64, index); /**begin repeat diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index 9ce2571a1528..d298a8596cc4 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -186,7 +186,7 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, "larger than 256 bits."); simd_maski = ((uint8_t *)&simd_maski)[0]; #endif - float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[hn::Lanes(f32)]; + float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[hn::MaxLanes(f32)]; hn::Store(x_in, f32, ip_fback); // process elements using libc for large elements From 6519b288d66dcc7ed27cfeb664a1eb2d279d8492 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 17 May 2025 10:18:39 +0300 Subject: [PATCH 0014/1018] SIMD: Rename NPY_SIMDX to NPY_HWY MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename Highway wrapper macros for clarity: - NPY_SIMDX → NPY_HWY - NPY_SIMDX_F16 → NPY_HWY_F16 - NPY_SIMDX_F64 → NPY_HWY_F64 - NPY_SIMDX_FMA → NPY_HWY_FMA To avoids confusion with legacy SIMD macros. --- numpy/_core/src/common/simd/README.md | 16 ++++++++-------- numpy/_core/src/common/simd/simd.hpp | 22 +++++++++++----------- numpy/_core/src/common/simd/simd.inc.hpp | 12 ++++++------ 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md index 9a68d1aa1bfc..e69bab0e7ba5 100644 --- a/numpy/_core/src/common/simd/README.md +++ b/numpy/_core/src/common/simd/README.md @@ -45,19 +45,19 @@ StoreU(v128, data); #include "simd/simd.hpp" // Check if SIMD is enabled -#if NPY_SIMDX +#if NPY_HWY // SIMD code #else // Scalar fallback code #endif // Check for float64 support -#if NPY_SIMDX_F64 +#if NPY_HWY_F64 // Use float64 SIMD operations #endif // Check for FMA support -#if NPY_SIMDX_FMA +#if NPY_HWY_FMA // Use FMA operations #endif ``` @@ -70,9 +70,9 @@ The wrapper provides type constraints to help with SFINAE (Substitution Failure ```cpp // Base template - always defined, even when SIMD is not enabled (for SFINAE) template - constexpr bool kSupportLane = NPY_SIMDX != 0; + constexpr bool kSupportLane = NPY_HWY != 0; template <> - constexpr bool kSupportLane = NPY_SIMDX_F64 != 0; + constexpr bool kSupportLane = NPY_HWY_F64 != 0; ``` - `kMaxLanes`: Maximum number of lanes supported by the SIMD extension for the specified lane type. @@ -175,15 +175,15 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d 2. **Legacy Universal Intrinsics** - The older universal intrinsics C interface (in `simd.h` and accessible via `NPY_SIMD` macros) is deprecated - - All new SIMD code should use this Highway-based wrapper (accessible via `NPY_SIMDX` macros) + - All new SIMD code should use this Highway-based wrapper (accessible via `NPY_HWY` macros) - The legacy code is maintained for compatibility but will eventually be removed 3. **Feature Detection Constants vs. Highway Constants** - - NumPy-specific constants (`NPY_SIMDX_F16`, `NPY_SIMDX_F64`, `NPY_SIMDX_FMA`) provide additional safety beyond raw Highway constants + - NumPy-specific constants (`NPY_HWY_F16`, `NPY_HWY_F64`, `NPY_HWY_FMA`) provide additional safety beyond raw Highway constants - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check platform capabilities but don't consider NumPy's build configuration - Our constants combine both checks: ```cpp - #define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) + #define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) ``` - This ensures SIMD features won't be used when: - Platform supports it but NumPy optimization is disabled via meson option: diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp index 698da4adf865..c4e572dc8d0f 100644 --- a/numpy/_core/src/common/simd/simd.hpp +++ b/numpy/_core/src/common/simd/simd.hpp @@ -10,7 +10,7 @@ */ /** * Since `NPY_SIMD` is only limited to NumPy C universal intrinsics, - * `NPY_SIMDX` is defined to indicate the SIMD availability for Google's Highway + * `NPY_HWY` is defined to indicate the SIMD availability for Google's Highway * C++ code. * * Highway SIMD is only available when optimization is enabled. @@ -29,31 +29,31 @@ * * Therefore, we only enable SIMD when the Highway target is not scalar. */ -#define NPY_SIMDX (HWY_TARGET != HWY_SCALAR) +#define NPY_HWY (HWY_TARGET != HWY_SCALAR) // Indicates if the SIMD operations are available for float16. -#define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) +#define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) // Note: Highway requires SIMD extentions with native float32 support, so we don't need // to check for it. // Indicates if the SIMD operations are available for float64. -#define NPY_SIMDX_F64 (NPY_SIMDX && HWY_HAVE_FLOAT64) +#define NPY_HWY_F64 (NPY_HWY && HWY_HAVE_FLOAT64) // Indicates if the SIMD floating operations are natively supports fma. -#define NPY_SIMDX_FMA (NPY_SIMDX && HWY_NATIVE_FMA) +#define NPY_HWY_FMA (NPY_HWY && HWY_NATIVE_FMA) #else -#define NPY_SIMDX 0 -#define NPY_SIMDX_F16 0 -#define NPY_SIMDX_F64 0 -#define NPY_SIMDX_FMA 0 +#define NPY_HWY 0 +#define NPY_HWY_F16 0 +#define NPY_HWY_F64 0 +#define NPY_HWY_FMA 0 #endif namespace np { /// Represents the max SIMD width supported by the platform. namespace simd { -#if NPY_SIMDX +#if NPY_HWY /// The highway namespace alias. /// We can not import all the symbols from the HWY_NAMESPACE because it will /// conflict with the existing symbols in the numpy namespace. @@ -67,7 +67,7 @@ using _Tag = hn::ScalableTag; /// Represents the 128-bit SIMD width. namespace simd128 { -#if NPY_SIMDX +#if NPY_HWY namespace hn = hwy::HWY_NAMESPACE; template using _Tag = hn::Full128; diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp index 64d28bc47118..f4a2540927dd 100644 --- a/numpy/_core/src/common/simd/simd.inc.hpp +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -1,6 +1,6 @@ -#ifndef NPY_SIMDX +#ifndef NPY_HWY #error "This is not a standalone header. Include simd.hpp instead." -#define NPY_SIMDX 1 // Prevent editors from graying out the happy branch +#define NPY_HWY 1 // Prevent editors from graying out the happy branch #endif // Using anonymous namespace instead of inline to ensure each translation unit @@ -17,9 +17,9 @@ namespace { * @tparam TLane The lane type to check for support. */ template -constexpr bool kSupportLane = NPY_SIMDX != 0; +constexpr bool kSupportLane = NPY_HWY != 0; -#if NPY_SIMDX +#if NPY_HWY // Define lane type support based on Highway capabilities template <> constexpr bool kSupportLane = HWY_HAVE_FLOAT16 != 0; @@ -127,6 +127,6 @@ using hn::Sqrt; using hn::Sub; using hn::Xor; -#endif // NPY_SIMDX +#endif // NPY_HWY -} // namespace anonymous +} // namespace From 275e45ccc3c59759a77306031f3ca66601a70807 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Mon, 19 May 2025 18:27:58 +0300 Subject: [PATCH 0015/1018] Disable Highway EMU128 scalar emulation Skip Highway's EMU128 in favor of NumPy's scalar implementations for due to strict IEEE 754 floating-point compliance requirements --- numpy/_core/src/common/simd/README.md | 3 +++ numpy/_core/src/common/simd/simd.hpp | 18 ++++++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md index e69bab0e7ba5..a13a0f75b6fc 100644 --- a/numpy/_core/src/common/simd/README.md +++ b/numpy/_core/src/common/simd/README.md @@ -165,6 +165,7 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d - When `NPY_DISABLE_OPTIMIZATION` is defined, SIMD operations are disabled - SIMD is enabled only when the Highway target is not scalar (`HWY_TARGET != HWY_SCALAR`) + and not EMU128 (`HWY_TARGET != HWY_EMU128`) ## Design Notes @@ -172,6 +173,8 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d - NumPy already provides kernels for scalar operations - Compilers can better optimize standard library implementations - Not all Highway intrinsics are fully supported in scalar mode + - For strict IEEE 754 floating-point compliance requirements, direct scalar + implementations offer more predictable behavior than EMU128 2. **Legacy Universal Intrinsics** - The older universal intrinsics C interface (in `simd.h` and accessible via `NPY_SIMD` macros) is deprecated diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp index c4e572dc8d0f..40556a68c59d 100644 --- a/numpy/_core/src/common/simd/simd.hpp +++ b/numpy/_core/src/common/simd/simd.hpp @@ -22,14 +22,20 @@ /** * We avoid using Highway scalar operations for the following reasons: - * 1. We already provide kernels for scalar operations, so falling back to - * the NumPy implementation is more appropriate. Compilers can often - * optimize these better since they rely on standard libraries. - * 2. Not all Highway intrinsics are fully supported in scalar mode. * - * Therefore, we only enable SIMD when the Highway target is not scalar. + * 1. NumPy already provides optimized kernels for scalar operations. Using these + * existing implementations is more consistent with NumPy's architecture and + * allows for compiler optimizations specific to standard library calls. + * + * 2. Not all Highway intrinsics are fully supported in scalar mode, which could + * lead to compilation errors or unexpected behavior for certain operations. + * + * 3. For NumPy's strict IEEE 754 floating-point compliance requirements, direct scalar + * implementations offer more predictable behavior than EMU128. + * + * Therefore, we only enable Highway SIMD when targeting actual SIMD instruction sets. */ -#define NPY_HWY (HWY_TARGET != HWY_SCALAR) +#define NPY_HWY ((HWY_TARGET != HWY_SCALAR) && (HWY_TARGET != HWY_EMU128)) // Indicates if the SIMD operations are available for float16. #define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) From 530409d43dbfa27df91ad26752a2cc42d6cf7d37 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 19 May 2025 20:45:51 -0400 Subject: [PATCH 0016/1018] CI: update cibuildwheel to 3.0.0b1 and enable cp314 [wheel build] --- .github/workflows/wheels.yml | 7 +++++-- tools/wheels/cibw_test_command.sh | 4 ++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index e96021775f3c..f502e6d64d1f 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -92,7 +92,7 @@ jobs: - [windows-2019, win_amd64, ""] - [windows-2019, win32, ""] - [windows-11-arm, win_arm64, ""] - python: ["cp311", "cp312", "cp313", "cp313t", "pp311"] + python: ["cp311", "cp312", "cp313", "cp313t", "cp314", "cp314t", "pp311"] exclude: # Don't build PyPy 32-bit windows - buildplat: [windows-2019, win32, ""] @@ -107,6 +107,8 @@ jobs: python: "pp311" - buildplat: [ macos13, macosx_x86_64, openblas ] python: "cp313t" + - buildplat: [ macos13, macosx_x86_64, openblas ] + python: "cp314t" env: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} @@ -175,9 +177,10 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # v2.23.3 + uses: pypa/cibuildwheel@90a0ddeff0f23eebc21630e65d66d0f4955e9b94 # v3.0.0b1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} + CIBW_ENABLE: cpython-prerelease cpython-freethreading pypy - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 60e90ef5beb6..45dbc8a102cf 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -4,6 +4,10 @@ set -xe PROJECT_DIR="$1" +if [ -d tools ]; then + cd tools +fi + python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" From d68ba1ebfc356c4d3df8e4996d76035148573de5 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 20 May 2025 08:55:52 -0400 Subject: [PATCH 0017/1018] MNT: respond to code review [wheel build] --- .github/workflows/wheels.yml | 3 ++- pyproject.toml | 2 +- tools/wheels/cibw_test_command.sh | 4 +--- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index f502e6d64d1f..bede99dd6e9a 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -180,7 +180,8 @@ jobs: uses: pypa/cibuildwheel@90a0ddeff0f23eebc21630e65d66d0f4955e9b94 # v3.0.0b1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - CIBW_ENABLE: cpython-prerelease cpython-freethreading pypy + # delete when we switch 3.14(t)-dev to 3.14(t) + CIBW_ENABLE: cpython-prerelease - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: diff --git a/pyproject.toml b/pyproject.toml index b62d71cbba73..b1eba50044fe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -148,7 +148,7 @@ before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" config-settings = "setup-args=-Duse-ilp64=true setup-args=-Dallow-noblas=false build-dir=build" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" -enable = ["cpython-freethreading", "pypy", "cpython-prerelease"] +enable = ["cpython-freethreading", "pypy"] [tool.cibuildwheel.linux] manylinux-x86_64-image = "manylinux_2_28" diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 45dbc8a102cf..4763ad512b22 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -4,9 +4,7 @@ set -xe PROJECT_DIR="$1" -if [ -d tools ]; then - cd tools -fi +export PYTHONSAFEPATH=1 python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" From 198cc9aade98ef880d4d09315aef9c565ea69b52 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 20 May 2025 09:08:21 -0400 Subject: [PATCH 0018/1018] MNT: move back to 'cd tools' hack [wheel build] --- tools/wheels/cibw_test_command.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 4763ad512b22..2d39687a861b 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -4,7 +4,9 @@ set -xe PROJECT_DIR="$1" -export PYTHONSAFEPATH=1 +if [ -d tools ]; then + cd tools +fi python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" From 084f183a66cdaa888205ffeb599aff6c083178cc Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 20 May 2025 16:11:29 -0600 Subject: [PATCH 0019/1018] BEG, MAINT: Begin NumPy 2.4.0 development. - Create 2.4.0-notes.rst - Update release.rst - Update pavement.py - Update pyproject.toml - Update cversions.txt - Update numpyconfig.h - Delete release fragments --- doc/release/upcoming_changes/26018.change.rst | 7 ----- .../upcoming_changes/26018.performance.rst | 7 ----- .../upcoming_changes/26745.highlight.rst | 10 ------- .../upcoming_changes/27288.improvement.rst | 3 -- .../upcoming_changes/27789.new_function.rst | 5 ---- doc/release/upcoming_changes/27883.c_api.rst | 4 --- doc/release/upcoming_changes/27883.change.rst | 17 ----------- doc/release/upcoming_changes/27998.c_api.rst | 10 ------- doc/release/upcoming_changes/28080.c_api.rst | 1 - .../upcoming_changes/28080.improvement.rst | 2 -- doc/release/upcoming_changes/28102.change.rst | 6 ---- .../upcoming_changes/28105.improvement.rst | 2 -- .../upcoming_changes/28129.deprecation.rst | 4 --- .../upcoming_changes/28205.improvement.rst | 6 ---- .../upcoming_changes/28214.new_feature.rst | 23 --------------- .../upcoming_changes/28250.improvement.rst | 3 -- .../upcoming_changes/28254.expired.rst | 29 ------------------- doc/release/upcoming_changes/28343.change.rst | 1 - doc/release/upcoming_changes/28426.change.rst | 6 ---- doc/release/upcoming_changes/28436.change.rst | 10 ------- .../upcoming_changes/28442.improvement.rst | 1 - doc/release/upcoming_changes/28569.change.rst | 2 -- .../upcoming_changes/28576.new_feature.rst | 15 ---------- doc/release/upcoming_changes/28615.change.rst | 5 ---- .../upcoming_changes/28619.highlight.rst | 6 ---- .../upcoming_changes/28619.performance.rst | 7 ----- .../upcoming_changes/28669.new_feature.rst | 3 -- doc/release/upcoming_changes/28703.change.rst | 3 -- doc/release/upcoming_changes/28713.change.rst | 1 - doc/release/upcoming_changes/28741.change.rst | 1 - .../upcoming_changes/28769.performance.rst | 8 ----- .../upcoming_changes/28856.improvement.rst | 5 ---- .../upcoming_changes/28884.deprecation.rst | 28 ------------------ .../upcoming_changes/28940.new_feature.rst | 6 ---- .../upcoming_changes/28961.expired.rst | 1 - doc/source/release.rst | 1 + doc/source/release/2.4.0-notes.rst | 19 ++++++++++++ numpy/_core/code_generators/cversions.txt | 3 +- numpy/_core/include/numpy/numpyconfig.h | 2 +- pavement.py | 2 +- pyproject.toml | 2 +- 41 files changed, 25 insertions(+), 252 deletions(-) delete mode 100644 doc/release/upcoming_changes/26018.change.rst delete mode 100644 doc/release/upcoming_changes/26018.performance.rst delete mode 100644 doc/release/upcoming_changes/26745.highlight.rst delete mode 100644 doc/release/upcoming_changes/27288.improvement.rst delete mode 100644 doc/release/upcoming_changes/27789.new_function.rst delete mode 100644 doc/release/upcoming_changes/27883.c_api.rst delete mode 100644 doc/release/upcoming_changes/27883.change.rst delete mode 100644 doc/release/upcoming_changes/27998.c_api.rst delete mode 100644 doc/release/upcoming_changes/28080.c_api.rst delete mode 100644 doc/release/upcoming_changes/28080.improvement.rst delete mode 100644 doc/release/upcoming_changes/28102.change.rst delete mode 100644 doc/release/upcoming_changes/28105.improvement.rst delete mode 100644 doc/release/upcoming_changes/28129.deprecation.rst delete mode 100644 doc/release/upcoming_changes/28205.improvement.rst delete mode 100644 doc/release/upcoming_changes/28214.new_feature.rst delete mode 100644 doc/release/upcoming_changes/28250.improvement.rst delete mode 100644 doc/release/upcoming_changes/28254.expired.rst delete mode 100644 doc/release/upcoming_changes/28343.change.rst delete mode 100644 doc/release/upcoming_changes/28426.change.rst delete mode 100644 doc/release/upcoming_changes/28436.change.rst delete mode 100644 doc/release/upcoming_changes/28442.improvement.rst delete mode 100644 doc/release/upcoming_changes/28569.change.rst delete mode 100644 doc/release/upcoming_changes/28576.new_feature.rst delete mode 100644 doc/release/upcoming_changes/28615.change.rst delete mode 100644 doc/release/upcoming_changes/28619.highlight.rst delete mode 100644 doc/release/upcoming_changes/28619.performance.rst delete mode 100644 doc/release/upcoming_changes/28669.new_feature.rst delete mode 100644 doc/release/upcoming_changes/28703.change.rst delete mode 100644 doc/release/upcoming_changes/28713.change.rst delete mode 100644 doc/release/upcoming_changes/28741.change.rst delete mode 100644 doc/release/upcoming_changes/28769.performance.rst delete mode 100644 doc/release/upcoming_changes/28856.improvement.rst delete mode 100644 doc/release/upcoming_changes/28884.deprecation.rst delete mode 100644 doc/release/upcoming_changes/28940.new_feature.rst delete mode 100644 doc/release/upcoming_changes/28961.expired.rst create mode 100644 doc/source/release/2.4.0-notes.rst diff --git a/doc/release/upcoming_changes/26018.change.rst b/doc/release/upcoming_changes/26018.change.rst deleted file mode 100644 index 9d7c139be183..000000000000 --- a/doc/release/upcoming_changes/26018.change.rst +++ /dev/null @@ -1,7 +0,0 @@ -``unique_values`` may return unsorted data ------------------------------------------- -The relatively new function (added in NumPy 2.0) ``unique_values`` may now -return unsorted results. Just as ``unique_counts`` and ``unique_all`` -these never guaranteed a sorted result, however, the result -was sorted until now. In cases where these do return a sorted result, this -may change in future releases to improve performance. diff --git a/doc/release/upcoming_changes/26018.performance.rst b/doc/release/upcoming_changes/26018.performance.rst deleted file mode 100644 index ffeab51dbdf6..000000000000 --- a/doc/release/upcoming_changes/26018.performance.rst +++ /dev/null @@ -1,7 +0,0 @@ -Performance improvements to ``np.unique`` ------------------------------------------ -``np.unique`` now tries to use a hash table to find unique values instead of sorting -values before finding unique values. This is limited to certain dtypes for now, and -the function is now faster for those dtypes. The function now also exposes a ``sorted`` -parameter to allow returning unique values as they were found, instead of sorting them -afterwards. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26745.highlight.rst b/doc/release/upcoming_changes/26745.highlight.rst deleted file mode 100644 index 5636f919c80d..000000000000 --- a/doc/release/upcoming_changes/26745.highlight.rst +++ /dev/null @@ -1,10 +0,0 @@ -Interactive examples in the NumPy documentation ------------------------------------------------ - -The NumPy documentation includes a number of examples that -can now be run interactively in your browser using WebAssembly -and Pyodide. - -Please note that the examples are currently experimental in -nature and may not work as expected for all methods in the -public API. diff --git a/doc/release/upcoming_changes/27288.improvement.rst b/doc/release/upcoming_changes/27288.improvement.rst deleted file mode 100644 index c7319554c63f..000000000000 --- a/doc/release/upcoming_changes/27288.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Scalar comparisons between non-comparable dtypes such as - `np.array(1) == np.array('s')` now return a NumPy bool instead of - a Python bool. \ No newline at end of file diff --git a/doc/release/upcoming_changes/27789.new_function.rst b/doc/release/upcoming_changes/27789.new_function.rst deleted file mode 100644 index 734a0c3bc2b5..000000000000 --- a/doc/release/upcoming_changes/27789.new_function.rst +++ /dev/null @@ -1,5 +0,0 @@ -New function `numpy.strings.slice` ----------------------------------- -The new function `numpy.strings.slice` was added, which implements fast -native slicing of string arrays. It supports the full slicing API including -negative slice offsets and steps. \ No newline at end of file diff --git a/doc/release/upcoming_changes/27883.c_api.rst b/doc/release/upcoming_changes/27883.c_api.rst deleted file mode 100644 index 107e0036c5c2..000000000000 --- a/doc/release/upcoming_changes/27883.c_api.rst +++ /dev/null @@ -1,4 +0,0 @@ -* `NpyIter_GetTransferFlags` is now available to check if - the iterator needs the Python API or if casts may cause floating point - errors (FPE). FPEs can for example be set when casting ``float64(1e300)`` - to ``float32`` (overflow to infinity) or a NaN to an integer (invalid value). \ No newline at end of file diff --git a/doc/release/upcoming_changes/27883.change.rst b/doc/release/upcoming_changes/27883.change.rst deleted file mode 100644 index ea68771efba3..000000000000 --- a/doc/release/upcoming_changes/27883.change.rst +++ /dev/null @@ -1,17 +0,0 @@ -Changes to the main iterator and potential numerical changes ------------------------------------------------------------- -The main iterator, used in math functions and via ``np.nditer`` from Python -and ``NpyIter`` in C, now behaves differently for some buffered iterations. -This means that: - -* The buffer size used will often be smaller than the maximum buffer sized - allowed by the ``buffersize`` parameter. -* The "growinner" flag is now honored with buffered reductions when no operand - requires buffering. - -For ``np.sum()`` such changes in buffersize may slightly change numerical -results of floating point operations. -Users who use "growinner" for custom reductions could notice -changes in precision (for example, in NumPy we removed it from -``einsum`` to avoid most precision changes and improve precision -for some 64bit floating point inputs). diff --git a/doc/release/upcoming_changes/27998.c_api.rst b/doc/release/upcoming_changes/27998.c_api.rst deleted file mode 100644 index edc6371af1f9..000000000000 --- a/doc/release/upcoming_changes/27998.c_api.rst +++ /dev/null @@ -1,10 +0,0 @@ -New `NpyIter_GetTransferFlags` and ``NpyIter_IterationNeedsAPI`` change ------------------------------------------------------------------------ -NumPy now has the new `NpyIter_GetTransferFlags` function as a more precise -way checking of iterator/buffering needs. I.e. whether the Python API/GIL is -required or floating point errors may occur. -This function is also faster if you already know your needs without buffering. - -The ``NpyIter_IterationNeedsAPI`` function now performs all the checks that were -previously performed at setup time. While it was never necessary to call it -multiple times, doing so will now have a larger cost. diff --git a/doc/release/upcoming_changes/28080.c_api.rst b/doc/release/upcoming_changes/28080.c_api.rst deleted file mode 100644 index f72be7ef52fe..000000000000 --- a/doc/release/upcoming_changes/28080.c_api.rst +++ /dev/null @@ -1 +0,0 @@ -* ``NpyIter`` now has no limit on the number of operands it supports. diff --git a/doc/release/upcoming_changes/28080.improvement.rst b/doc/release/upcoming_changes/28080.improvement.rst deleted file mode 100644 index 19b85ae3c96a..000000000000 --- a/doc/release/upcoming_changes/28080.improvement.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``np.nditer`` now has no limit on the number of supported operands - (C-integer). diff --git a/doc/release/upcoming_changes/28102.change.rst b/doc/release/upcoming_changes/28102.change.rst deleted file mode 100644 index bd54378a652e..000000000000 --- a/doc/release/upcoming_changes/28102.change.rst +++ /dev/null @@ -1,6 +0,0 @@ -The minimum supported GCC version is now 9.3.0 ----------------------------------------------- -The minimum supported version was updated from 8.4.0 to 9.3.0, -primarily in order to reduce the chance of platform-specific bugs in old GCC -versions from causing issues. - diff --git a/doc/release/upcoming_changes/28105.improvement.rst b/doc/release/upcoming_changes/28105.improvement.rst deleted file mode 100644 index 537467575234..000000000000 --- a/doc/release/upcoming_changes/28105.improvement.rst +++ /dev/null @@ -1,2 +0,0 @@ -* No-copy pickling is now supported for any - array that can be transposed to a C-contiguous array. \ No newline at end of file diff --git a/doc/release/upcoming_changes/28129.deprecation.rst b/doc/release/upcoming_changes/28129.deprecation.rst deleted file mode 100644 index b1beb0c5cca3..000000000000 --- a/doc/release/upcoming_changes/28129.deprecation.rst +++ /dev/null @@ -1,4 +0,0 @@ -* The ``numpy.typing.mypy_plugin`` has been deprecated in favor of platform-agnostic - static type inference. Please remove ``numpy.typing.mypy_plugin`` from the ``plugins`` - section of your mypy configuration. If this change results in new errors being - reported, kindly open an issue. diff --git a/doc/release/upcoming_changes/28205.improvement.rst b/doc/release/upcoming_changes/28205.improvement.rst deleted file mode 100644 index 42eaaac98363..000000000000 --- a/doc/release/upcoming_changes/28205.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -Added warnings to `np.isclose` ---------------------------------- -Added warning messages if at least one of atol or rtol are -either `np.nan` or `np.inf` within `np.isclose` - -* Warnings follow the user's `np.seterr` settings diff --git a/doc/release/upcoming_changes/28214.new_feature.rst b/doc/release/upcoming_changes/28214.new_feature.rst deleted file mode 100644 index eb95a0739e79..000000000000 --- a/doc/release/upcoming_changes/28214.new_feature.rst +++ /dev/null @@ -1,23 +0,0 @@ -NumPy now registers its pkg-config paths with the pkgconf_ PyPI package ------------------------------------------------------------------------ - -The pkgconf_ PyPI package provides an interface for projects like NumPy to -register their own paths to be added to the pkg-config search path. This means -that when using pkgconf_ from PyPI, NumPy will be discoverable without needing -for any custom environment configuration. - -.. attention:: Attention - - This only applies when using the pkgconf_ package from PyPI_, or put another - way, this only applies when installing pkgconf_ via a Python package - manager. - - If you are using ``pkg-config`` or ``pkgconf`` provided by your system, or - any other source that does not use the pkgconf-pypi_ project, the NumPy - pkg-config directory will not be automatically added to the search path. In - these situations, you might want to use ``numpy-config``. - - -.. _pkgconf: https://github.com/pypackaging-native/pkgconf-pypi -.. _PyPI: https://pypi.org/ -.. _pkgconf-pypi: https://github.com/pypackaging-native/pkgconf-pypi diff --git a/doc/release/upcoming_changes/28250.improvement.rst b/doc/release/upcoming_changes/28250.improvement.rst deleted file mode 100644 index 703a8bb0c2e1..000000000000 --- a/doc/release/upcoming_changes/28250.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -* The ``__repr__`` for user-defined dtypes now prefers the ``__name__`` of the - custom dtype over a more generic name constructed from its ``kind`` and - ``itemsize``. diff --git a/doc/release/upcoming_changes/28254.expired.rst b/doc/release/upcoming_changes/28254.expired.rst deleted file mode 100644 index 5f391eb6cbe2..000000000000 --- a/doc/release/upcoming_changes/28254.expired.rst +++ /dev/null @@ -1,29 +0,0 @@ -* Remove deprecated macros like ``NPY_OWNDATA`` from cython interfaces in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) -* Remove ``numpy/npy_1_7_deprecated_api.h`` and C macros like ``NPY_OWNDATA`` in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) -* Remove alias ``generate_divbyzero_error`` to ``npy_set_floatstatus_divbyzero`` and ``generate_overflow_error`` to ``npy_set_floatstatus_overflow`` (deprecated since 1.10) -* Remove ``np.tostring`` (deprecated since 1.19) -* Raise on ``np.conjugate`` of non-numeric types (deprecated since 1.13) -* Raise when using ``np.bincount(...minlength=None)``, use 0 instead (deprecated since 1.14) -* Passing ``shape=None`` to functions with a non-optional shape argument errors, use ``()`` instead (deprecated since 1.20) -* Inexact matches for ``mode`` and ``searchside`` raise (deprecated since 1.20) -* Setting ``__array_finalize__ = None`` errors (deprecated since 1.23) -* ``np.fromfile`` and ``np.fromstring`` error on bad data, previously they would guess (deprecated since 1.18) -* ``datetime64`` and ``timedelta64`` construction with a tuple no longer accepts an ``event`` value, either use a two-tuple of (unit, num) or a 4-tuple of (unit, num, den, 1) (deprecated since 1.14) -* When constructing a ``dtype`` from a class with a ``dtype`` attribute, that attribute must be a dtype-instance rather than a thing that can be parsed as a dtype instance (deprecated in 1.19). At some point the whole construct of using a dtype attribute will be deprecated (see #25306) -* Passing booleans as partition index errors (deprecated since 1.23) -* Out-of-bounds indexes error even on empty arrays (deprecated since 1.20) -* ``np.tostring`` has been removed, use ``tobytes`` instead (deprecated since 1.19) -* Disallow make a non-writeable array writeable for arrays with a base that do not own their data (deprecated since 1.17) -* ``concatenate()`` with ``axis=None`` uses ``same-kind`` casting by default, not ``unsafe`` (deprecated since 1.20) -* Unpickling a scalar with object dtype errors (deprecated since 1.20) -* The binary mode of ``fromstring`` now errors, use ``frombuffer`` instead (deprecated since 1.14) -* Converting ``np.inexact`` or ``np.floating`` to a dtype errors (deprecated since 1.19) -* Converting ``np.complex``, ``np.integer``, ``np.signedinteger``, ``np.unsignedinteger``, ``np.generic`` to a dtype errors (deprecated since 1.19) -* The Python built-in ``round`` errors for complex scalars. Use ``np.round`` or ``scalar.round`` instead (deprecated since 1.19) -* 'np.bool' scalars can no longer be interpreted as an index (deprecated since 1.19) -* Parsing an integer via a float string is no longer supported. (deprecated since 1.23) To avoid this error you can - * make sure the original data is stored as integers. - * use the ``converters=float`` keyword argument. - * Use ``np.loadtxt(...).astype(np.int64)`` -* The use of a length 1 tuple for the ufunc ``signature`` errors. Use ``dtype`` or fill the tuple with ``None`` (deprecated since 1.19) -* Special handling of matrix is in np.outer is removed. Convert to a ndarray via ``matrix.A`` (deprecated since 1.20) diff --git a/doc/release/upcoming_changes/28343.change.rst b/doc/release/upcoming_changes/28343.change.rst deleted file mode 100644 index 378ef775b62e..000000000000 --- a/doc/release/upcoming_changes/28343.change.rst +++ /dev/null @@ -1 +0,0 @@ -* The vector norm ``ord=inf`` and the matrix norms ``ord={1, 2, inf, 'nuc'}`` now always returns zero for empty arrays. Empty arrays have at least one axis of size zero. This affects `np.linalg.norm`, `np.linalg.vector_norm`, and `np.linalg.matrix_norm`. Previously, NumPy would raises errors or return zero depending on the shape of the array. diff --git a/doc/release/upcoming_changes/28426.change.rst b/doc/release/upcoming_changes/28426.change.rst deleted file mode 100644 index d1c48640eed0..000000000000 --- a/doc/release/upcoming_changes/28426.change.rst +++ /dev/null @@ -1,6 +0,0 @@ -Changes to automatic bin selection in numpy.histogram ------------------------------------------------------ -The automatic bin selection algorithm in ``numpy.histogram`` has been modified -to avoid out-of-memory errors for samples with low variation. -For full control over the selected bins the user can use set -the ``bin`` or ``range`` parameters of ``numpy.histogram``. diff --git a/doc/release/upcoming_changes/28436.change.rst b/doc/release/upcoming_changes/28436.change.rst deleted file mode 100644 index 60149e55a4d0..000000000000 --- a/doc/release/upcoming_changes/28436.change.rst +++ /dev/null @@ -1,10 +0,0 @@ -Build manylinux_2_28 wheels ---------------------------- - -Wheels for linux systems will use the ``manylinux_2_28`` tag (instead of the ``manylinux2014`` tag), which means -dropping support for redhat7/centos7, amazonlinux2, debian9, ubuntu18.04, and -other pre-glibc2.28 operating system versions, as per the `PEP 600 support -table`_. - -.. _`PEP 600 support table`: https://github.com/mayeut/pep600_compliance?tab=readme-ov-file#pep600-compliance-check - diff --git a/doc/release/upcoming_changes/28442.improvement.rst b/doc/release/upcoming_changes/28442.improvement.rst deleted file mode 100644 index 16d71bde19c5..000000000000 --- a/doc/release/upcoming_changes/28442.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -* ``np.dot`` now reports floating point exceptions. diff --git a/doc/release/upcoming_changes/28569.change.rst b/doc/release/upcoming_changes/28569.change.rst deleted file mode 100644 index f9d26fda0484..000000000000 --- a/doc/release/upcoming_changes/28569.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* A spelling error in the error message returned when converting a string to a float with the - method ``np.format_float_positional`` has been fixed. diff --git a/doc/release/upcoming_changes/28576.new_feature.rst b/doc/release/upcoming_changes/28576.new_feature.rst deleted file mode 100644 index 2c50887a49f2..000000000000 --- a/doc/release/upcoming_changes/28576.new_feature.rst +++ /dev/null @@ -1,15 +0,0 @@ -Allow ``out=...`` in ufuncs to ensure array result --------------------------------------------------- -NumPy has the sometimes difficult behavior that it currently usually -returns scalars rather than 0-D arrays (even if the inputs were 0-D arrays). -This is especially problematic for non-numerical dtypes (e.g. ``object``). - -For ufuncs (i.e. most simple math functions) it is now possible -to use ``out=...`` (literally `...`, e.g. ``out=Ellipsis``) which is identical in behavior to ``out`` not -being passed, but will ensure a non-scalar return. -This spelling is borrowed from ``arr1d[0, ...]`` where the ``...`` -also ensures a non-scalar return. - -Other functions with an ``out=`` kwarg should gain support eventually. -Downstream libraries that interoperate via ``__array_ufunc__`` or -``__array_function__`` may need to adapt to support this. diff --git a/doc/release/upcoming_changes/28615.change.rst b/doc/release/upcoming_changes/28615.change.rst deleted file mode 100644 index 58b751e40704..000000000000 --- a/doc/release/upcoming_changes/28615.change.rst +++ /dev/null @@ -1,5 +0,0 @@ -* NumPy's ``__array_api_version__`` was upgraded from ``2023.12`` to ``2024.12``. -* `numpy.count_nonzero` for ``axis=None`` (default) now returns a NumPy scalar - instead of a Python integer. -* The parameter ``axis`` in `numpy.take_along_axis` function has now a default - value of ``-1``. diff --git a/doc/release/upcoming_changes/28619.highlight.rst b/doc/release/upcoming_changes/28619.highlight.rst deleted file mode 100644 index 6c296b92899e..000000000000 --- a/doc/release/upcoming_changes/28619.highlight.rst +++ /dev/null @@ -1,6 +0,0 @@ -Building NumPy with OpenMP Parallelization -------------------------------------------- -NumPy now supports OpenMP parallel processing capabilities when built with the -``-Denable_openmp=true`` Meson build flag. This feature is disabled by default. -When enabled, ``np.sort`` and ``np.argsort`` functions can utilize OpenMP for -parallel thread execution, improving performance for these operations. diff --git a/doc/release/upcoming_changes/28619.performance.rst b/doc/release/upcoming_changes/28619.performance.rst deleted file mode 100644 index 904decbe0ba6..000000000000 --- a/doc/release/upcoming_changes/28619.performance.rst +++ /dev/null @@ -1,7 +0,0 @@ -Performance improvements to ``np.sort`` and ``np.argsort`` ----------------------------------------------------------- -``np.sort`` and ``np.argsort`` functions now can leverage OpenMP for parallel -thread execution, resulting in up to 3.5x speedups on x86 architectures with -AVX2 or AVX-512 instructions. This opt-in feature requires NumPy to be built -with the -Denable_openmp Meson flag. Users can control the number of threads -used by setting the OMP_NUM_THREADS environment variable. diff --git a/doc/release/upcoming_changes/28669.new_feature.rst b/doc/release/upcoming_changes/28669.new_feature.rst deleted file mode 100644 index 2953a5123ccc..000000000000 --- a/doc/release/upcoming_changes/28669.new_feature.rst +++ /dev/null @@ -1,3 +0,0 @@ -* The type parameter of ``np.dtype`` now defaults to ``typing.Any``. - This way, static type-checkers will infer ``dtype: np.dtype`` as - ``dtype: np.dtype[Any]``, without reporting an error. diff --git a/doc/release/upcoming_changes/28703.change.rst b/doc/release/upcoming_changes/28703.change.rst deleted file mode 100644 index 87bb431951f9..000000000000 --- a/doc/release/upcoming_changes/28703.change.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Printing of ``np.float16`` and ``np.float32`` scalars and arrays have been improved by - adjusting the transition to scientific notation based on the floating point precision. - A new legacy ``np.printoptions`` mode ``'2.2'`` has been added for backwards compatibility. diff --git a/doc/release/upcoming_changes/28713.change.rst b/doc/release/upcoming_changes/28713.change.rst deleted file mode 100644 index 5e5c5adde88b..000000000000 --- a/doc/release/upcoming_changes/28713.change.rst +++ /dev/null @@ -1 +0,0 @@ -Remove use of -Wl,-ld_classic on macOS. This hack is no longer needed by Spack, and results in libraries that cannot link to other libraries built with ld (new). diff --git a/doc/release/upcoming_changes/28741.change.rst b/doc/release/upcoming_changes/28741.change.rst deleted file mode 100644 index ca9531f490d8..000000000000 --- a/doc/release/upcoming_changes/28741.change.rst +++ /dev/null @@ -1 +0,0 @@ -Re-enable overriding functions in the :mod:`numpy.strings` module. diff --git a/doc/release/upcoming_changes/28769.performance.rst b/doc/release/upcoming_changes/28769.performance.rst deleted file mode 100644 index 7fb8f02282f6..000000000000 --- a/doc/release/upcoming_changes/28769.performance.rst +++ /dev/null @@ -1,8 +0,0 @@ -Performance improvements for ``np.float16`` casts --------------------------------------------------- -Earlier, floating point casts to and from ``np.float16`` types -were emulated in software on all platforms. - -Now, on ARM devices that support Neon float16 intrinsics (such as -recent Apple Silicon), the native float16 path is used to achieve -the best performance. diff --git a/doc/release/upcoming_changes/28856.improvement.rst b/doc/release/upcoming_changes/28856.improvement.rst deleted file mode 100644 index 83911035f097..000000000000 --- a/doc/release/upcoming_changes/28856.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -* ``np.dtypes.StringDType`` is now a - `generic type `_ which - accepts a type argument for ``na_object`` that defaults to ``typing.Never``. - For example, ``StringDType(na_object=None)`` returns a ``StringDType[None]``, - and ``StringDType()`` returns a ``StringDType[typing.Never]``. diff --git a/doc/release/upcoming_changes/28884.deprecation.rst b/doc/release/upcoming_changes/28884.deprecation.rst deleted file mode 100644 index c1be55fb0dd3..000000000000 --- a/doc/release/upcoming_changes/28884.deprecation.rst +++ /dev/null @@ -1,28 +0,0 @@ -``numpy.typing.NBitBase`` deprecation -------------------------------------- -The ``numpy.typing.NBitBase`` type has been deprecated and will be removed in a future version. - -This type was previously intended to be used as a generic upper bound for type-parameters, for example: - -.. code-block:: python - - import numpy as np - import numpy.typing as npt - - def f[NT: npt.NBitBase](x: np.complexfloating[NT]) -> np.floating[NT]: ... - -But in NumPy 2.2.0, ``float64`` and ``complex128`` were changed to concrete subtypes, causing static type-checkers to reject ``x: np.float64 = f(np.complex128(42j))``. - -So instead, the better approach is to use ``typing.overload``: - -.. code-block:: python - - import numpy as np - from typing import overload - - @overload - def f(x: np.complex64) -> np.float32: ... - @overload - def f(x: np.complex128) -> np.float64: ... - @overload - def f(x: np.clongdouble) -> np.longdouble: ... diff --git a/doc/release/upcoming_changes/28940.new_feature.rst b/doc/release/upcoming_changes/28940.new_feature.rst deleted file mode 100644 index e0d3dc8888c3..000000000000 --- a/doc/release/upcoming_changes/28940.new_feature.rst +++ /dev/null @@ -1,6 +0,0 @@ -* Static type-checkers now interpret: - - - ``_: np.ndarray`` as ``_: npt.NDArray[typing.Any]``. - - ``_: np.flatiter`` as ``_: np.flatiter[np.ndarray]``. - - This is because their type parameters now have default values. diff --git a/doc/release/upcoming_changes/28961.expired.rst b/doc/release/upcoming_changes/28961.expired.rst deleted file mode 100644 index 92031de35e62..000000000000 --- a/doc/release/upcoming_changes/28961.expired.rst +++ /dev/null @@ -1 +0,0 @@ -* Removed the ``np.compat`` package source code (removed in 2.0) diff --git a/doc/source/release.rst b/doc/source/release.rst index 36d5e6731f4f..6c6a853b06f5 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.4.0 2.3.0 2.2.6 2.2.5 diff --git a/doc/source/release/2.4.0-notes.rst b/doc/source/release/2.4.0-notes.rst new file mode 100644 index 000000000000..29a7e5ce6073 --- /dev/null +++ b/doc/source/release/2.4.0-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.4.0 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index d448df066a19..0d642d760b21 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -78,5 +78,6 @@ # Version 19 (NumPy 2.1.0) Only header additions # Version 19 (NumPy 2.2.0) No change 0x00000013 = 2b8f1f4da822491ff030b2b37dff07e3 -# Version 19 (NumPy 2.3.0) +# Version 20 (NumPy 2.3.0) +# Version 20 (NumPy 2.4.0) No change 0x00000014 = e56b74d32a934d085e7c3414cb9999b8, diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index ba44c28b9d0f..52d7e2b5d7d7 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -172,7 +172,7 @@ #define NPY_FEATURE_VERSION_STRING "2.0" #elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.1" -#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION +#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION /* also 2.4 */ #define NPY_FEATURE_VERSION_STRING "2.3" #else #error "Missing version string define for new NumPy version." diff --git a/pavement.py b/pavement.py index e00b9647f5e3..369b8703b0ba 100644 --- a/pavement.py +++ b/pavement.py @@ -35,7 +35,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.3.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.4.0-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index b62d71cbba73..5cf75b20a6b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.3.0.dev0" +version = "2.4.0.dev0" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From bae7d087c54f5a4ce25f4906b8fcefc92fed799c Mon Sep 17 00:00:00 2001 From: Stan Ulbrych <89152624+StanFromIreland@users.noreply.github.com> Date: Wed, 21 May 2025 20:50:31 +0100 Subject: [PATCH 0020/1018] Fix workflow error --- .github/workflows/windows.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index f61419df09cd..6c02563150da 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -35,7 +35,7 @@ jobs: persist-credentials: false - name: Setup Python - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: ${{ matrix.compiler-pyversion[1] }} From 54529e22743cffe6f997b725a7fcff650736395b Mon Sep 17 00:00:00 2001 From: Angus Gibson Date: Thu, 22 May 2025 11:42:18 +1000 Subject: [PATCH 0021/1018] BUG: Avoid compile errors in f2py modules Some of the casts from cfuncs pass PyObject* to PyArrayObject*, which causes compile errors due to incompatible pointer types on at least GCC 14. --- numpy/f2py/cfuncs.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index a9a56b2455f2..6c48c1ef0175 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -1047,9 +1047,12 @@ def errmess(s: str) -> None: PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) { - (*v) = *((npy_longdouble *)PyArray_DATA(obj)); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr) == NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(arr)); + return 1; + } } } if (double_from_pyobj(&d, obj, errmess)) { @@ -1131,10 +1134,13 @@ def errmess(s: str) -> None: PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { - (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(obj)))); - (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(obj)))); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr)==NPY_CLONGDOUBLE) { + (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(arr)))); + return 1; + } } } if (complex_double_from_pyobj(&cd,obj,errmess)) { From ce6a72c41efe545f68dcd831e6193fcdd450180b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 22 May 2025 12:09:04 +0200 Subject: [PATCH 0022/1018] DOC: Expand/clean up extension module import error We get a lot of these reports and it is never us... But unfortunately, Python (currently) doesn't report *why* the module wasn't found. (I have opened an issue asking for that.) Until Python does, try to figure it out ourselves, i.e. list C modules (I guess its always one, but OK). If anything it'll give *us* an immediate thing to point out if an issue is reported... I also hid the "source import" thing to only occur if __config__ doesn't exist. Not sure that catches this fully, but I also feel like this isn't an actual problem anymore (i.e. we could just delete it also). Tested locally by renaming or deleting `_multiarray_umath`. --- numpy/__init__.py | 10 ++++++---- numpy/_core/__init__.py | 43 +++++++++++++++++++++++++++++++++-------- 2 files changed, 41 insertions(+), 12 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index 8fb2e742dfc4..3a67bd221247 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -111,10 +111,12 @@ try: from numpy.__config__ import show_config except ImportError as e: - msg = """Error importing numpy: you should not try to import numpy from - its source directory; please exit the numpy source tree, and relaunch - your python interpreter from there.""" - raise ImportError(msg) from e + if isinstance(e, ModuleNotFoundError) and e.name == "numpy.__config__": + # The __config__ module itself was not found, so add this info: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e from . import _core from ._core import ( diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index d0da7e0ad9ed..f19557277756 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -22,29 +22,56 @@ from . import multiarray except ImportError as exc: import sys - msg = """ + + # Basically always, the problem should be that the C module is wrong/missing... + if isinstance(exc, ModuleNotFoundError) and exc.name == "numpy._core._multiarray_umath": + import sys + candidates = [] + for path in __path__: + candidates.extend(f for f in os.listdir(path) if f.startswith("_multiarray_umath")) + if len(candidates) == 0: + bad_c_module_info = ( + "We found no compiled module, was NumPy build successfully?\n") + else: + candidate_str = '\n * '.join(candidates) + # cache_tag is documented to be possibly None, so just use name if it is + # this guesses at cache_tag being the same as the extension module scheme + tag = sys.implementation.cache_tag or sys.implementation.name + bad_c_module_info = ( + f"The following compiled module files exist, but seem incompatible\n" + f"with with either python '{tag}' or the " + f"platform '{sys.platform}':\n\n * {candidate_str}\n" + ) + else: + bad_c_module_info = "" + + major, minor, *_ = sys.version_info + msg = f""" IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! Importing the numpy C-extensions failed. This error can happen for many reasons, often due to issues with your setup or how NumPy was installed. - +{bad_c_module_info} We have compiled some common reasons and troubleshooting tips at: https://numpy.org/devdocs/user/troubleshooting-importerror.html Please note and check the following: - * The Python version is: Python%d.%d from "%s" - * The NumPy version is: "%s" + * The Python version is: Python {major}.{minor} from "{sys.executable}" + * The NumPy version is: "{__version__}" and make sure that they are the versions you expect. -Please carefully study the documentation linked above for further help. -Original error was: %s -""" % (sys.version_info[0], sys.version_info[1], sys.executable, - __version__, exc) +Please carefully study the information and documentation linked above. +This is unlikely to be a NumPy issue but will be caused by a bad install +or environment on your machine. + +Original error was: {exc} +""" + raise ImportError(msg) from exc finally: for envkey in env_added: From 552ce2a6ddba62c874629d7d5c2dfb21af403d50 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 22 May 2025 12:15:36 +0200 Subject: [PATCH 0023/1018] appease linter --- numpy/_core/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index f19557277756..268932649eac 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -24,11 +24,15 @@ import sys # Basically always, the problem should be that the C module is wrong/missing... - if isinstance(exc, ModuleNotFoundError) and exc.name == "numpy._core._multiarray_umath": + if ( + isinstance(exc, ModuleNotFoundError) + and exc.name == "numpy._core._multiarray_umath" + ): import sys candidates = [] for path in __path__: - candidates.extend(f for f in os.listdir(path) if f.startswith("_multiarray_umath")) + candidates.extend( + f for f in os.listdir(path) if f.startswith("_multiarray_umath")) if len(candidates) == 0: bad_c_module_info = ( "We found no compiled module, was NumPy build successfully?\n") From d63cc918f588d6eb92fd20b22dc509e345a728d8 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 22 May 2025 12:33:27 +0200 Subject: [PATCH 0024/1018] Revert enable/CIBW_ENABLE changes [wheel build] --- .github/workflows/wheels.yml | 2 -- pyproject.toml | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index bede99dd6e9a..fa2c1cb5ae97 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -180,8 +180,6 @@ jobs: uses: pypa/cibuildwheel@90a0ddeff0f23eebc21630e65d66d0f4955e9b94 # v3.0.0b1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - # delete when we switch 3.14(t)-dev to 3.14(t) - CIBW_ENABLE: cpython-prerelease - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: diff --git a/pyproject.toml b/pyproject.toml index 2d9fbb1e2675..5cf75b20a6b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -148,7 +148,7 @@ before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" config-settings = "setup-args=-Duse-ilp64=true setup-args=-Dallow-noblas=false build-dir=build" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" -enable = ["cpython-freethreading", "pypy"] +enable = ["cpython-freethreading", "pypy", "cpython-prerelease"] [tool.cibuildwheel.linux] manylinux-x86_64-image = "manylinux_2_28" From dd95e3ec53570fea721d6ff82537d7e657792787 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 22 May 2025 12:34:46 +0200 Subject: [PATCH 0025/1018] explicitly re-raise again (worked before because the next line raises...) --- numpy/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/__init__.py b/numpy/__init__.py index 3a67bd221247..aadc1fab3407 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -117,6 +117,7 @@ its source directory; please exit the numpy source tree, and relaunch your python interpreter from there.""" raise ImportError(msg) from e + raise from . import _core from ._core import ( From ae01519952d0cf4bd8ace320453340a3e8a42ee7 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 23 May 2025 13:50:56 +0200 Subject: [PATCH 0026/1018] Update numpy/_core/__init__.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Melissa Weber Mendonça --- numpy/_core/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index 268932649eac..7b19cefb2f93 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -35,7 +35,7 @@ f for f in os.listdir(path) if f.startswith("_multiarray_umath")) if len(candidates) == 0: bad_c_module_info = ( - "We found no compiled module, was NumPy build successfully?\n") + "We found no compiled module, did NumPy build successfully?\n") else: candidate_str = '\n * '.join(candidates) # cache_tag is documented to be possibly None, so just use name if it is From 3cbb8cd321f7676cf3ba387e081c42a5ff08637b Mon Sep 17 00:00:00 2001 From: Abdu Zoghbi Date: Fri, 23 May 2025 10:00:09 -0400 Subject: [PATCH 0027/1018] BUG: handle the case of modules with derived types --- numpy/f2py/auxfuncs.py | 16 +++++++++++++++- numpy/f2py/auxfuncs.pyi | 2 ++ numpy/f2py/f90mod_rules.py | 4 ++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 6e58e6352224..a5af31d976ec 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -42,7 +42,7 @@ 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', - 'process_f2cmap_dict', 'containscommon' + 'process_f2cmap_dict', 'containscommon', 'containsderivedtypes' ] @@ -569,6 +569,20 @@ def containscommon(rout): return 0 +def hasderivedtypes(rout): + return ('block' in rout) and rout['block'] == 'type' + + +def containsderivedtypes(rout): + if hasderivedtypes(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if hasderivedtypes(b): + return 1 + return 0 + + def containsmodule(block): if ismodule(block): return 1 diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index 2a0d4e106bcc..1212f229c660 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -200,11 +200,13 @@ def isintent_inplace(var: _Var) -> bool: ... def isintent_aux(var: _Var) -> bool: ... # +def containsderivedtypes(rout: _ROut) -> _Bool: ... def containscommon(rout: _ROut) -> _Bool: ... def hasexternals(rout: _ROut) -> bool: ... def hasresultnote(rout: _ROut) -> _Bool: ... def hasbody(rout: _ROut) -> _Bool: ... def hascommon(rout: _ROut) -> bool: ... +def hasderivedtypes(rout: _ROut) -> bool: ... def hascallstatement(rout: _ROut) -> bool: ... def isroutine(rout: _ROut) -> bool: ... def ismodule(rout: _ROut) -> bool: ... diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index 29adbe78a26f..d13a42a9d71f 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -120,6 +120,10 @@ def dadd(line, s=doc): if m['name'] in usenames and containscommon(m): outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a common block...\n") continue + # skip modules with derived types + if m['name'] in usenames and containsderivedtypes(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a derived type...\n") + continue if onlyvars: outmess(f"\t\t Variables: {' '.join(onlyvars)}\n") chooks = [''] From f84c105194013644a6f2e305a3ac6a6d3c946eb3 Mon Sep 17 00:00:00 2001 From: Abdu Zoghbi Date: Fri, 23 May 2025 10:01:41 -0400 Subject: [PATCH 0028/1018] TST: tests for modules with derived types --- .../src/regression/mod_derived_types.f90 | 23 +++++++++++++++++++ numpy/f2py/tests/test_regression.py | 10 ++++++++ 2 files changed, 33 insertions(+) create mode 100644 numpy/f2py/tests/src/regression/mod_derived_types.f90 diff --git a/numpy/f2py/tests/src/regression/mod_derived_types.f90 b/numpy/f2py/tests/src/regression/mod_derived_types.f90 new file mode 100644 index 000000000000..7692c82cf42e --- /dev/null +++ b/numpy/f2py/tests/src/regression/mod_derived_types.f90 @@ -0,0 +1,23 @@ +module mtypes + implicit none + integer, parameter :: value1 = 100 + type :: master_data + integer :: idat = 200 + end type master_data + type(master_data) :: masterdata +end module mtypes + + +subroutine no_type_subroutine(ain, aout) + use mtypes, only: value1 + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + value1 +end subroutine no_type_subroutine + +subroutine type_subroutine(ain, aout) + use mtypes, only: masterdata + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + masterdata%idat +end subroutine type_subroutine \ No newline at end of file diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 1931ad21a48b..5e3bac38b287 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -37,6 +37,16 @@ def test_mdat(self): assert self.module.simple_subroutine(5) == 1014 +class TestModuleWithDerivedType(util.F2PyTest): + # Check that modules with derived types work + sources = [util.getpath("tests", "src", "regression", "mod_derived_types.f90")] + + @pytest.mark.slow + def test_mtypes(self): + assert self.module.no_type_subroutine(10) == 110 + assert self.module.type_subroutine(10) == 210 + + class TestNegativeBounds(util.F2PyTest): # Check that negative bounds work correctly sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] From ac9a024a1b469ccf5a684b74048ea76b0e0d7434 Mon Sep 17 00:00:00 2001 From: Abdu Zoghbi Date: Fri, 23 May 2025 10:21:36 -0400 Subject: [PATCH 0029/1018] fix W291 --- numpy/f2py/tests/test_regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 5e3bac38b287..93eb29e8e723 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -44,7 +44,7 @@ class TestModuleWithDerivedType(util.F2PyTest): @pytest.mark.slow def test_mtypes(self): assert self.module.no_type_subroutine(10) == 110 - assert self.module.type_subroutine(10) == 210 + assert self.module.type_subroutine(10) == 210 class TestNegativeBounds(util.F2PyTest): From cbd2fef887aa32af84bc6efb3653370560e4bcec Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 23 May 2025 17:17:57 +0200 Subject: [PATCH 0030/1018] BUG: Fix cache use regression gh-29006 got the branching wrong leaving the cache undefined on most GCC/clang, which means we wouldn't use it. Also move it up so that we can just remove the unused globals entirely. --- numpy/_core/src/multiarray/alloc.c | 35 +++++++++++++++--------------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 280ca81a35a7..64c2b854167d 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -27,9 +27,22 @@ #endif #endif -#define NBUCKETS 1024 /* number of buckets for data*/ -#define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ -#define NCACHE 7 /* number of cache entries per bucket */ +/* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN + * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN + * use-of-uninitialized-memory warnings less useful. */ +#define USE_ALLOC_CACHE 1 +#ifdef Py_GIL_DISABLED +# define USE_ALLOC_CACHE 0 +#elif defined(__has_feature) +# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) +# define USE_ALLOC_CACHE 0 +# endif +#endif + +#if USE_ALLOC_CACHE +# define NBUCKETS 1024 /* number of buckets for data*/ +# define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ +# define NCACHE 7 /* number of cache entries per bucket */ /* this structure fits neatly into a cacheline */ typedef struct { npy_uintp available; /* number of cached pointers */ @@ -37,7 +50,7 @@ typedef struct { } cache_bucket; static cache_bucket datacache[NBUCKETS]; static cache_bucket dimcache[NBUCKETS_DIM]; - +#endif /* USE_ALLOC_CACHE */ /* * This function tells whether NumPy attempts to call `madvise` with @@ -99,20 +112,6 @@ indicate_hugepages(void *p, size_t size) { } -/* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN - * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN - * use-of-uninitialized-memory warnings less useful. */ -#ifdef Py_GIL_DISABLED -#define USE_ALLOC_CACHE 0 -#elif defined(__has_feature) -# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) -# define USE_ALLOC_CACHE 0 -# endif -#else -#define USE_ALLOC_CACHE 1 -#endif - - /* as the cache is managed in global variables verify the GIL is held */ /* From e57bb2b429fab1b8af27a85182d1ebebf3ea80aa Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 23 May 2025 17:31:42 +0200 Subject: [PATCH 0031/1018] ok, don't attempt to remove the statics (too ingrained) --- numpy/_core/src/multiarray/alloc.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 64c2b854167d..cc9c5762a196 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -39,7 +39,6 @@ # endif #endif -#if USE_ALLOC_CACHE # define NBUCKETS 1024 /* number of buckets for data*/ # define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ # define NCACHE 7 /* number of cache entries per bucket */ @@ -50,7 +49,6 @@ typedef struct { } cache_bucket; static cache_bucket datacache[NBUCKETS]; static cache_bucket dimcache[NBUCKETS_DIM]; -#endif /* USE_ALLOC_CACHE */ /* * This function tells whether NumPy attempts to call `madvise` with From 06f4d5ed1b854869d4e9b1a3df7e9a7b9e60e77d Mon Sep 17 00:00:00 2001 From: lvllvl <24905907+lvllvl@users.noreply.github.com> Date: Fri, 23 May 2025 11:48:01 -0400 Subject: [PATCH 0032/1018] ENH: add __array_function__ protocol in polynomial (#28996) --- numpy/polynomial/polynomial.py | 9 ++++++++- numpy/polynomial/tests/test_polynomial.py | 22 ++++++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 32b53b757a1c..6ec0dc58a1de 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -82,6 +82,7 @@ import numpy as np import numpy.linalg as la +from numpy._core.overrides import array_function_dispatch from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu @@ -841,7 +842,13 @@ def polyvalfromroots(x, r, tensor=True): raise ValueError("x.ndim must be < r.ndim when tensor == False") return np.prod(x - r, axis=0) +def _polyval2d_dispatcher(x, y, c): + return (x, y, c) +def _polygrid2d_dispatcher(x, y, c): + return (x, y, c) + +@array_function_dispatch(_polyval2d_dispatcher) def polyval2d(x, y, c): """ Evaluate a 2-D polynomial at points (x, y). @@ -893,7 +900,7 @@ def polyval2d(x, y, c): """ return pu._valnd(polyval, c, x, y) - +@array_function_dispatch(_polygrid2d_dispatcher) def polygrid2d(x, y, c): """ Evaluate a 2-D polynomial on the Cartesian product of x and y. diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 27513fd682e8..8bfa3c184cf7 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -667,3 +667,25 @@ def test_result_type(self): arr = np.polydiv(1, np.float32(1)) assert_equal(arr[0].dtype, np.float64) + +class ArrayFunctionInterceptor: + def __init__(self): + self.called = False + + def __array_function__(self, func, types, args, kwargs): + self.called = True + return "intercepted" + +def test_polyval2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polyval2d(x, y, c) + assert result == "intercepted" + +def test_polygrid2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polygrid2d(x, y, c) + assert result == "intercepted" From 4c1eb7076a7623937097544cdd6a4ae5fc1aee6d Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 25 May 2025 03:23:48 +0200 Subject: [PATCH 0033/1018] TYP: annotate strings.slice --- numpy/_core/strings.pyi | 15 +++++++++++++++ numpy/strings/__init__.pyi | 2 ++ numpy/typing/tests/data/reveal/strings.pyi | 4 ++++ 3 files changed, 21 insertions(+) diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 52d244b36ccd..b187ce71d25c 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -54,6 +54,7 @@ __all__ = [ "translate", "upper", "zfill", + "slice", ] _StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] @@ -494,3 +495,17 @@ def translate( table: str, deletechars: str | None = None, ) -> _StringDTypeOrUnicodeArray: ... + +# +@overload +def slice(a: U_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.str_]: ... # type: ignore[overload-overlap] +@overload +def slice(a: S_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.bytes_]: ... +@overload +def slice( + a: _StringDTypeSupportsArray, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / +) -> _StringDTypeArray: ... +@overload +def slice( + a: T_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / +) -> _StringDTypeOrUnicodeArray: ... diff --git a/numpy/strings/__init__.pyi b/numpy/strings/__init__.pyi index 2f924dca0b5d..b2fb363531d4 100644 --- a/numpy/strings/__init__.pyi +++ b/numpy/strings/__init__.pyi @@ -36,6 +36,7 @@ from numpy._core.strings import ( rjust, rpartition, rstrip, + slice, startswith, str_len, strip, @@ -92,4 +93,5 @@ __all__ = [ "decode", "encode", "translate", + "slice", ] diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index 746e804ce577..18bd252d5ff9 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -190,3 +190,7 @@ assert_type(np.strings.str_len(AR_T), npt.NDArray[np.int_]) assert_type(np.strings.translate(AR_U, ""), npt.NDArray[np.str_]) assert_type(np.strings.translate(AR_S, ""), npt.NDArray[np.bytes_]) assert_type(np.strings.translate(AR_T, ""), AR_T_alias) + +assert_type(np.strings.slice(AR_U, 1, 5, 2), npt.NDArray[np.str_]) +assert_type(np.strings.slice(AR_S, 1, 5, 2), npt.NDArray[np.bytes_]) +assert_type(np.strings.slice(AR_T, 1, 5, 2), AR_T_alias) From aaae32e45830a9b3d9c026d5e2d79390ccda50e6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 25 May 2025 18:48:58 +0200 Subject: [PATCH 0034/1018] TYP: remove expired ``tostring`` methods --- numpy/lib/_user_array_impl.pyi | 4 +--- numpy/ma/core.pyi | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index c1c72b2320f1..13c0a0163421 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -2,7 +2,7 @@ from types import EllipsisType from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload from _typeshed import Incomplete -from typing_extensions import TypeVar, deprecated, override +from typing_extensions import TypeVar, override import numpy as np import numpy.typing as npt @@ -220,8 +220,6 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # def copy(self, /) -> Self: ... - @deprecated("tostring() is deprecated. Use tobytes() instead.") - def tostring(self, /) -> bytes: ... def tobytes(self, /) -> bytes: ... def byteswap(self, /) -> Self: ... def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index f457f18d57bd..7e87611037b9 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -5,7 +5,7 @@ from collections.abc import Sequence from typing import Any, Literal, Self, SupportsIndex, TypeAlias, overload from _typeshed import Incomplete -from typing_extensions import TypeIs, TypeVar, deprecated +from typing_extensions import TypeIs, TypeVar import numpy as np from numpy import ( @@ -1057,8 +1057,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def toflex(self) -> Incomplete: ... def torecords(self) -> Incomplete: ... def tolist(self, fill_value: Incomplete | None = None) -> Incomplete: ... - @deprecated("tostring() is deprecated. Use tobytes() instead.") - def tostring(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... From 8ba2c63d1f30fc2aece18fcad78597d2a8fa94b6 Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 25 May 2025 21:11:10 +0300 Subject: [PATCH 0035/1018] use pypy 3.11 nightly which has a fix for ctypeslib [skip azp][skip circleci] --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index dc9ef34db71d..742ca5c34144 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -82,7 +82,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: 'pypy3.11-v7.3.19' + python-version: 'pypy3.11-nightly' - name: Setup using scipy-openblas run: | python -m pip install -r requirements/ci_requirements.txt From 3d4629c9de79fe95d7f3d0724693c875d91d48f8 Mon Sep 17 00:00:00 2001 From: Zebreus Date: Mon, 26 May 2025 09:24:10 +0200 Subject: [PATCH 0036/1018] BLD: allow targeting webassembly without emscripten --- numpy/_core/include/numpy/npy_cpu.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/_core/include/numpy/npy_cpu.h b/numpy/_core/include/numpy/npy_cpu.h index 4fb3fb406869..72f7331a0267 100644 --- a/numpy/_core/include/numpy/npy_cpu.h +++ b/numpy/_core/include/numpy/npy_cpu.h @@ -111,8 +111,9 @@ #endif #elif defined(__loongarch_lp64) #define NPY_CPU_LOONGARCH64 -#elif defined(__EMSCRIPTEN__) +#elif defined(__EMSCRIPTEN__) || defined(__wasm__) /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ + /* __wasm__ is defined by clang when targeting wasm */ #define NPY_CPU_WASM #else #error Unknown CPU, please report this to numpy maintainers with \ From 8389ff9d1e6cbe7e922592e2f3d27913cb2e2e99 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 26 May 2025 22:19:08 +0200 Subject: [PATCH 0037/1018] TYP: fix invalid overload definition in ``_core.defchararray.add`` --- numpy/_core/defchararray.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 776962c53998..43005745bfab 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -555,7 +555,7 @@ def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... @overload def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... @overload -def add(x1: T_co, T_co) -> _StringDTypeOrUnicodeArray: ... +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... From 4cce8e4dbdf58ed8851d0ddbdf37bca14fe3d9f7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 26 May 2025 22:27:15 +0200 Subject: [PATCH 0038/1018] TYP: annotate the ``*args`` and ``**kwargs`` of the ``ufunc`` methods --- numpy/_typing/_ufunc.pyi | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 766cde1ad420..104307da89db 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -146,10 +146,10 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i /, ) -> None: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @@ -382,11 +382,11 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i signature: str | _3Tuple[str | None] = ..., ) -> _2Tuple[Any]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @@ -439,11 +439,11 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i signature: str | _4Tuple[str | None] = ..., ) -> _2Tuple[NDArray[Any]]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] @@ -494,11 +494,11 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] axes: list[_2Tuple[SupportsIndex]] = ..., ) -> NDArray[Any]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): From 466f64f961b9bc0845a7d4e69e9186c13b2012c7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 26 May 2025 22:27:54 +0200 Subject: [PATCH 0039/1018] TYP: annotate the return type of ``numpy.typing.__getattr__`` --- numpy/typing/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 173c094b40aa..163655bd7662 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -169,7 +169,7 @@ def __dir__() -> list[str]: return __DIR -def __getattr__(name: str): +def __getattr__(name: str) -> object: if name == "NBitBase": import warnings From 1707fa13355d32b487b1f628a8da9925be136581 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 26 May 2025 22:29:09 +0200 Subject: [PATCH 0040/1018] TYP: annotate ``numpy.lib._format_impl`` --- numpy/lib/_format_impl.pyi | 73 +++++++++++++++++++++++++++----------- 1 file changed, 52 insertions(+), 21 deletions(-) diff --git a/numpy/lib/_format_impl.pyi b/numpy/lib/_format_impl.pyi index f4898d9aefa4..870c2d761bb0 100644 --- a/numpy/lib/_format_impl.pyi +++ b/numpy/lib/_format_impl.pyi @@ -1,26 +1,57 @@ -from typing import Final, Literal +import os +from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard -from numpy.lib._utils_impl import drop_metadata # noqa: F401 +from _typeshed import SupportsRead, SupportsWrite + +import numpy as np +import numpy.typing as npt +from numpy.lib._utils_impl import drop_metadata as drop_metadata __all__: list[str] = [] -EXPECTED_KEYS: Final[set[str]] -MAGIC_PREFIX: Final[bytes] -MAGIC_LEN: Literal[8] -ARRAY_ALIGN: Literal[64] -BUFFER_SIZE: Literal[262144] # 2**18 -GROWTH_AXIS_MAX_DIGITS: Literal[21] +_DTypeDescr: TypeAlias = list[tuple[str, str]] | list[tuple[str, str, tuple[int, ...]]] + +### + +EXPECTED_KEYS: Final[set[str]] = ... +MAGIC_PREFIX: Final = b"\x93NUMPY" +MAGIC_LEN: Final = 8 +ARRAY_ALIGN: Final = 64 +BUFFER_SIZE: Final = 262_144 # 1 << 18 +GROWTH_AXIS_MAX_DIGITS: Final = 21 +_MAX_HEADER_SIZE: Final = 10_000 -def magic(major, minor): ... -def read_magic(fp): ... -def dtype_to_descr(dtype): ... -def descr_to_dtype(descr): ... -def header_data_from_array_1_0(array): ... -def write_array_header_1_0(fp, d): ... -def write_array_header_2_0(fp, d): ... -def read_array_header_1_0(fp): ... -def read_array_header_2_0(fp): ... -def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ... -def read_array(fp, allow_pickle=..., pickle_kwargs=...): ... -def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ... -def isfileobj(f): ... +def magic(major: int, minor: int) -> bytes: ... +def read_magic(fp: SupportsRead[bytes]) -> tuple[int, int]: ... +def dtype_to_descr(dtype: np.dtype) -> _DTypeDescr: ... +def descr_to_dtype(descr: _DTypeDescr) -> np.dtype: ... +def header_data_from_array_1_0(array: np.ndarray) -> dict[str, Any]: ... +def write_array_header_1_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def write_array_header_2_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def read_array_header_1_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def read_array_header_2_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def write_array( + fp: SupportsWrite[bytes], + array: np.ndarray, + version: tuple[int, int] | None = None, + allow_pickle: bool = True, + pickle_kwargs: dict[str, Any] | None = None, +) -> None: ... +def read_array( + fp: SupportsRead[bytes], + allow_pickle: bool = False, + pickle_kwargs: dict[str, Any] | None = None, + *, + max_header_size: int = 10_000, +) -> np.ndarray: ... +def open_memmap( + filename: str | os.PathLike[Any], + mode: str = "r+", + dtype: npt.DTypeLike | None = None, + shape: tuple[int, ...] | None = None, + fortran_order: bool = False, + version: tuple[int, int] | None = None, + *, + max_header_size: int = 10_000, +) -> np.memmap: ... +def isfileobj(f: object) -> TypeGuard[BinaryIO]: ... # don't use `typing.TypeIs` From 436ec159fad4aafd99bba4543ae9aa925e64385f Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 27 May 2025 07:02:02 +0300 Subject: [PATCH 0041/1018] BLD: use sonoma image on Cirrus for [wheel build][skip actions][skip azp] --- tools/ci/cirrus_wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index b531e953daee..6d02411df2e9 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -11,7 +11,7 @@ macosx_arm64_task: CIRRUS_CLONE_SUBMODULES: true macos_instance: matrix: - image: ghcr.io/cirruslabs/macos-monterey-xcode + image: ghcr.io/cirruslabs/macos-runner:sonoma matrix: - env: From fba2e60fb95891e8e044113afe0a321405f581ea Mon Sep 17 00:00:00 2001 From: Hamza Meel Date: Tue, 27 May 2025 11:07:56 +0200 Subject: [PATCH 0042/1018] DOC: fix typo in documentation of vecmat The body of the summary uses the symbol v to reference the `x1` parameter, however in the displayed math, b is used instead. This commit changes b to v in the displayed math for concistency. --- numpy/_core/code_generators/ufunc_docstrings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 5d3ba73c92f0..aa9fe4acea05 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -3030,7 +3030,7 @@ def add_newdoc(place, name, doc): vector-matrix product is defined as: .. math:: - \\mathbf{b} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} + \\mathbf{v} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} where the sum is over the last dimension of ``x1`` and the one-but-last dimensions in ``x2`` (unless `axes` is specified) and where From 39a422230997bfb282503b74b39a26bc75be9a85 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Tue, 27 May 2025 11:30:44 +0200 Subject: [PATCH 0043/1018] MAINT: Enforce ruff E501 --- benchmarks/benchmarks/bench_core.py | 3 ++- benchmarks/benchmarks/bench_function_base.py | 3 ++- benchmarks/benchmarks/bench_ufunc_strides.py | 5 +++-- numpy/_typing/_dtype_like.py | 2 +- numpy/tests/test_public_api.py | 2 +- ruff.toml | 9 ++++----- 6 files changed, 13 insertions(+), 11 deletions(-) diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index 5e174704f105..a9a6c88b87a0 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -151,7 +151,8 @@ class CountNonzero(Benchmark): params = [ [1, 2, 3], [100, 10000, 1000000], - [bool, np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, str, object] + [bool, np.int8, np.int16, np.int32, np.int64, np.float32, + np.float64, str, object] ] def setup(self, numaxes, size, dtype): diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index 57499dc761f8..9b770aeb60bf 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -241,7 +241,8 @@ class Sort(Benchmark): def setup(self, kind, dtype, array_type): rnd = np.random.RandomState(507582308) array_class = array_type[0] - self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:], rnd) + generate_array_method = getattr(SortGenerator, array_class) + self.arr = generate_array_method(self.ARRAY_SIZE, dtype, *array_type[1:], rnd) def time_sort(self, kind, dtype, array_type): # Using np.sort(...) instead of arr.sort(...) because it makes a copy. diff --git a/benchmarks/benchmarks/bench_ufunc_strides.py b/benchmarks/benchmarks/bench_ufunc_strides.py index b86be87f9e68..95df16e2cb5e 100644 --- a/benchmarks/benchmarks/bench_ufunc_strides.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -208,8 +208,9 @@ def train(self, max_epoch): for epoch in range(max_epoch): z = np.matmul(self.X_train, self.W) A = 1 / (1 + np.exp(-z)) # sigmoid(z) - loss = -np.mean(self.Y_train * np.log(A) + (1 - self.Y_train) * np.log(1 - A)) - dz = A - self.Y_train + Y_train = self.Y_train + loss = -np.mean(Y_train * np.log(A) + (1 - Y_train) * np.log(1 - A)) + dz = A - Y_train dw = (1 / self.size) * np.matmul(self.X_train.T, dz) self.W = self.W - self.alpha * dw diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index c406b3098384..d341db5dc23a 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -55,7 +55,7 @@ def dtype(self) -> _DTypeT_co: ... # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] +_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] # noqa: E501 # Would create a dtype[np.void] diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index a56cd13296e3..6a36358c3a06 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -780,7 +780,7 @@ def test___qualname___and___module___attribute(): inspect.ismodule(member) and # it's a module "numpy" in member.__name__ and # inside NumPy not member_name.startswith("_") and # not private - member_name not in {"tests", "typing"} and # 2024-12: type names don't match + member_name not in {"tests", "typing"} and # type names don't match "numpy._core" not in member.__name__ and # outside _core member not in visited_modules # not visited yet ): diff --git a/ruff.toml b/ruff.toml index 6b05d8de69ee..c0008da43226 100644 --- a/ruff.toml +++ b/ruff.toml @@ -68,16 +68,15 @@ ignore = [ "_tempita.py" = ["B909"] "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] -"benchmarks/*py" = ["E501"] +"benchmarks/benchmarks/bench_linalg.py" = ["E501"] "numpy/_core/tests/**" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] -"numpy/_typing/*py" = ["E501"] +"numpy/_typing/_array_like.py" = ["E501"] "numpy/lib/tests/*py" = ["E501"] -"numpy/linalg/tests/*py" = ["E501"] -"numpy/ma/tests/*py" = ["E501"] -"numpy/tests/*py" = ["E501"] +"numpy/linalg/tests/test_linalg.py" = ["E501"] +"numpy/tests/test_configtool.py" = ["E501"] "numpy*pyi" = ["E501"] "numpy/f2py/*py" = ["E501"] "__init__.py" = ["F401", "F403", "F405"] From f8b37c123485032260319f8f79ede1d6ed7e7694 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Tue, 27 May 2025 12:13:12 +0200 Subject: [PATCH 0044/1018] enforce more files --- numpy/lib/tests/test_format.py | 3 --- numpy/lib/tests/test_histograms.py | 8 ++++---- numpy/lib/tests/test_recfunctions.py | 5 ++--- ruff.toml | 5 ++++- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index d805d3493ca4..2ab7026ccc7c 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -384,9 +384,6 @@ ('z', 'u1')] NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index b7752d1a8f1e..be5268d9813a 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -554,7 +554,7 @@ def test_outlier(self): assert_equal(len(a), numbins) def test_scott_vs_stone(self): - """Verify that Scott's rule and Stone's rule converges for normally distributed data""" + """Verify that Scott's rule and Stone's rule converges for normally distributed data""" # noqa: E501 def nbins_ratio(seed, size): rng = np.random.RandomState(seed) @@ -562,10 +562,10 @@ def nbins_ratio(seed, size): a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) return a / (a + b) - ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)] - for seed in range(10)] + geom_space = np.geomspace(start=10, stop=100, num=4).round().astype(int) + ll = [[nbins_ratio(seed, size) for size in geom_space] for seed in range(10)] - # the average difference between the two methods decreases as the dataset size increases. + # the average difference between the two methods decreases as the dataset size increases. # noqa: E501 avg = abs(np.mean(ll, axis=0) - 0.5) assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index eee1f47f834f..72377b8f7c35 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -524,9 +524,8 @@ def test_flatten_wflexible(self): assert_equal(test, control) test = merge_arrays((x, w), flatten=False) - controldtype = [('f0', int), - ('f1', [('a', int), - ('b', [('ba', float), ('bb', int), ('bc', [])])])] + f1_descr = [('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])] + controldtype = [('f0', int), ('f1', f1_descr)] control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], dtype=controldtype) assert_equal(test, control) diff --git a/ruff.toml b/ruff.toml index c0008da43226..7ea90ee57b69 100644 --- a/ruff.toml +++ b/ruff.toml @@ -74,7 +74,10 @@ ignore = [ "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] "numpy/_typing/_array_like.py" = ["E501"] -"numpy/lib/tests/*py" = ["E501"] +"numpy/lib/tests/test_function_base.py" = ["E501"] +"numpy/lib/tests/test_format.py" = ["E501"] +"numpy/lib/tests/test_io.py" = ["E501"] +"numpy/lib/tests/test_polynomial.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/tests/test_configtool.py" = ["E501"] "numpy*pyi" = ["E501"] From 9ea773dec671da1c08068250d64813098d55bb3b Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Tue, 27 May 2025 12:24:05 +0200 Subject: [PATCH 0045/1018] more enforcement --- numpy/_core/tests/test_cython.py | 3 ++- numpy/_core/tests/test_function_base.py | 3 ++- numpy/_core/tests/test_half.py | 2 +- numpy/_core/tests/test_indexing.py | 2 +- numpy/_core/tests/test_mem_overlap.py | 4 ++-- numpy/_core/tests/test_scalarbuffer.py | 4 ++-- numpy/_core/tests/test_unicode.py | 3 ++- ruff.toml | 19 ++++++++++++++++++- 8 files changed, 30 insertions(+), 10 deletions(-) diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index fb3839fd2685..2c7b40c5614c 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -345,7 +345,8 @@ def test_npystring_allocators_other_dtype(install_temp): assert checks.npystring_allocators_other_types(arr1, arr2) == 0 -@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='no checks module on win-arm64') +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='no checks module on win-arm64') def test_npy_uintp_type_enum(): import checks assert checks.check_npy_uintp_type_enum() diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index c925cf1f77e5..3a8552de2d36 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -32,7 +32,8 @@ def _is_armhf(): # Check if the current platform is ARMHF (32-bit ARM architecture) - return platform.machine().startswith('arm') and platform.architecture()[0] == '32bit' + architecture = platform.architecture() + return platform.machine().startswith('arm') and architecture[0] == '32bit' class PhysicalQuantity(float): def __new__(cls, value): diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index 68f17b2a5e14..e2d6e6796db4 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -531,7 +531,7 @@ def test_half_fpe(self): assert_raises_fpe('overflow', lambda a, b: a - b, float16(-65504), float16(17)) assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) - assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) # noqa: E501 assert_raises_fpe('overflow', np.spacing, float16(65504)) # Invalid value errors diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index e722d0c1a9df..1d42cde48682 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -1352,7 +1352,7 @@ def test_boolean_indexing_fast_path(self): "size of axis is 3 but size of corresponding boolean axis is 1", lambda: a[idx1]) - # This used to incorrectly give a ValueError: operands could not be broadcast together + # This used to incorrectly give a ValueError: operands could not be broadcast together # noqa: E501 idx2 = np.array([[False] * 8 + [True]]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index d1735670ad6b..b437a7e14298 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -166,7 +166,7 @@ def check_may_share_memory_exact(a, b): err_msg = "" if got != exact: err_msg = " " + "\n ".join([ - f"base_a - base_b = {a.__array_interface__['data'][0] - b.__array_interface__['data'][0]!r}", + f"base_a - base_b = {a.__array_interface__['data'][0] - b.__array_interface__['data'][0]!r}", # noqa: E501 f"shape_a = {a.shape!r}", f"shape_b = {b.shape!r}", f"strides_a = {a.strides!r}", @@ -402,7 +402,7 @@ def check(A, U, exists=None): exists = (X is not None) if X is not None: - assert_(sum(a * x for a, x in zip(A, X)) == sum(a * u // 2 for a, u in zip(A, U))) + assert_(sum(a * x for a, x in zip(A, X)) == sum(a * u // 2 for a, u in zip(A, U))) # noqa: E501 assert_(all(0 <= x <= u for x, u in zip(X, U))) assert_(any(x != u // 2 for x, u in zip(X, U))) diff --git a/numpy/_core/tests/test_scalarbuffer.py b/numpy/_core/tests/test_scalarbuffer.py index 4d6b5bdd73fc..c957aec4f9b2 100644 --- a/numpy/_core/tests/test_scalarbuffer.py +++ b/numpy/_core/tests/test_scalarbuffer.py @@ -128,8 +128,8 @@ def test_str_ucs4(self, s): s = np.str_(s) # only our subclass implements the buffer protocol # all the same, characters always encode as ucs4 - expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), 'format': '2w', - 'readonly': True} + expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), + 'format': '2w', 'readonly': True} v = memoryview(s) assert self._as_dict(v) == expected diff --git a/numpy/_core/tests/test_unicode.py b/numpy/_core/tests/test_unicode.py index 9fdc55b0e322..6a86503a35ae 100644 --- a/numpy/_core/tests/test_unicode.py +++ b/numpy/_core/tests/test_unicode.py @@ -134,7 +134,8 @@ def test_valuesSD(self): def test_valuesMD(self): # Check creation of multi-dimensional objects with values - ua = np.array([[[self.ucs_value * self.ulen] * 2] * 3] * 4, dtype=f'U{self.ulen}') + data = [[[self.ucs_value * self.ulen] * 2] * 3] * 4 + ua = np.array(data, dtype=f'U{self.ulen}') self.content_check(ua, ua[0, 0, 0], 4 * self.ulen * 2 * 3 * 4) self.content_check(ua, ua[-1, -1, -1], 4 * self.ulen * 2 * 3 * 4) diff --git a/ruff.toml b/ruff.toml index 7ea90ee57b69..b4190fe41787 100644 --- a/ruff.toml +++ b/ruff.toml @@ -69,7 +69,24 @@ ignore = [ "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] "benchmarks/benchmarks/bench_linalg.py" = ["E501"] -"numpy/_core/tests/**" = ["E501"] +"numpy/_core/tests/test_api.py" = ["E501"] +"numpy/_core/tests/test_arrayprint.py" = ["E501"] +"numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] +"numpy/_core/tests/test_cpu_features.py" = ["E501"] +"numpy/_core/tests/test_datetime.py" = ["E501"] +"numpy/_core/tests/test_dtype.py" = ["E501"] +"numpy/_core/tests/test_defchararray.py" = ["E501"] +"numpy/_core/tests/test_einsum.py" = ["E501"] +"numpy/_core/tests/test_multiarray.py" = ["E501"] +"numpy/_core/tests/test_multithreading.py" = ["E501"] +"numpy/_core/tests/test_nditer*py" = ["E501"] +"numpy/_core/tests/test_ufunc*py" = ["E501"] +"numpy/_core/tests/test_umath*py" = ["E501"] +"numpy/_core/tests/test_numeric*.py" = ["E501"] +"numpy/_core/tests/test_regression.py" = ["E501"] +"numpy/_core/tests/test_shape_base.py" = ["E501"] +"numpy/_core/tests/test_simd*.py" = ["E501"] +"numpy/_core/tests/test_strings.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] From f84d9645584d85be1bdd688927f53792b424959d Mon Sep 17 00:00:00 2001 From: Hamza Meel Date: Tue, 27 May 2025 12:47:50 +0200 Subject: [PATCH 0046/1018] DOC: fix typo in documentation of matvec Same as for commit fba2e60fb9, the vector was referred to as v in the body of the summary but b in the displayed math. This commit fixes the inconsistency. --- numpy/_core/code_generators/ufunc_docstrings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index aa9fe4acea05..ddae87bd6012 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -2963,7 +2963,7 @@ def add_newdoc(place, name, doc): matrix-vector product is defined as: .. math:: - \\mathbf{A} \\cdot \\mathbf{b} = \\sum_{j=0}^{n-1} A_{ij} v_j + \\mathbf{A} \\cdot \\mathbf{v} = \\sum_{j=0}^{n-1} A_{ij} v_j where the sum is over the last dimensions in ``x1`` and ``x2`` (unless ``axes`` is specified). (For a matrix-vector product with the From 7eacb9efdb67733081eb3308cbe149f24800f3dc Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Tue, 27 May 2025 16:38:13 +0100 Subject: [PATCH 0047/1018] Merge pull request #29063 from MarcoGorelli/align-maskedarray-with-ndarray --- numpy/typing/tests/data/fail/ma.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 41306b23fe78..5dc6706ebf81 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -2,10 +2,10 @@ from typing import TypeAlias, TypeVar import numpy as np import numpy.typing as npt -from numpy._typing import _Shape +from numpy._typing import _AnyShape _ScalarT = TypeVar("_ScalarT", bound=np.generic) -MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] +MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, np.dtype[_ScalarT]] MAR_1d_f8: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] MAR_b: MaskedArray[np.bool] From ba27d95903ba9d3d201c051ce9c534380b5b3f4d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 27 May 2025 12:16:06 -0600 Subject: [PATCH 0048/1018] BUG: add bounds-checking to in-place string multiply (#29060) * BUG: add bounds-checking to in-place string multiply * MNT: check for overflow and raise OverflowError * MNT: respond to review suggestion * MNT: handle overflow in one more spot * MNT: make test behave the same on all architectures * MNT: reorder to avoid work in some cases --- doc/release/upcoming_changes/29060.change.rst | 3 ++ numpy/_core/src/umath/string_buffer.h | 12 +++++ numpy/_core/src/umath/string_ufuncs.cpp | 48 ++++++++++++++----- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 12 ++--- numpy/_core/strings.py | 2 +- numpy/_core/tests/test_stringdtype.py | 4 +- numpy/_core/tests/test_strings.py | 13 ++++- numpy/typing/tests/data/pass/ma.py | 3 +- 8 files changed, 74 insertions(+), 23 deletions(-) create mode 100644 doc/release/upcoming_changes/29060.change.rst diff --git a/doc/release/upcoming_changes/29060.change.rst b/doc/release/upcoming_changes/29060.change.rst new file mode 100644 index 000000000000..1561da7bf94e --- /dev/null +++ b/doc/release/upcoming_changes/29060.change.rst @@ -0,0 +1,3 @@ +* Multiplication between a string and integer now raises OverflowError instead + of MemoryError if the result of the multiplication would create a string that + is too large to be represented. This follows Python's behavior. diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 554f9ece5197..dafedcbc03ff 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -297,6 +297,18 @@ struct Buffer { return num_codepoints; } + inline size_t + buffer_width() + { + switch (enc) { + case ENCODING::ASCII: + case ENCODING::UTF8: + return after - buf; + case ENCODING::UTF32: + return (after - buf) / sizeof(npy_ucs4); + } + } + inline Buffer& operator+=(npy_int64 rhs) { diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index ffd757815364..95f30ccb109e 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -15,6 +15,7 @@ #include "dtypemeta.h" #include "convert_datatype.h" #include "gil_utils.h" +#include "templ_common.h" /* for npy_mul_size_with_overflow_size_t */ #include "string_ufuncs.h" #include "string_fastsearch.h" @@ -166,26 +167,44 @@ string_add(Buffer buf1, Buffer buf2, Buffer out) template -static inline void +static inline int string_multiply(Buffer buf1, npy_int64 reps, Buffer out) { size_t len1 = buf1.num_codepoints(); if (reps < 1 || len1 == 0) { out.buffer_fill_with_zeros_after_index(0); - return; + return 0; } if (len1 == 1) { out.buffer_memset(*buf1, reps); out.buffer_fill_with_zeros_after_index(reps); + return 0; } - else { - for (npy_int64 i = 0; i < reps; i++) { - buf1.buffer_memcpy(out, len1); - out += len1; - } - out.buffer_fill_with_zeros_after_index(0); + + size_t newlen; + if (NPY_UNLIKELY(npy_mul_with_overflow_size_t(&newlen, reps, len1) != 0) || newlen > PY_SSIZE_T_MAX) { + return -1; + } + + size_t pad = 0; + size_t width = out.buffer_width(); + if (width < newlen) { + reps = width / len1; + pad = width % len1; } + + for (npy_int64 i = 0; i < reps; i++) { + buf1.buffer_memcpy(out, len1); + out += len1; + } + + buf1.buffer_memcpy(out, pad); + out += pad; + + out.buffer_fill_with_zeros_after_index(0); + + return 0; } @@ -238,7 +257,9 @@ string_multiply_strint_loop(PyArrayMethod_Context *context, while (N--) { Buffer buf(in1, elsize); Buffer outbuf(out, outsize); - string_multiply(buf, *(npy_int64 *)in2, outbuf); + if (NPY_UNLIKELY(string_multiply(buf, *(npy_int64 *)in2, outbuf) < 0)) { + npy_gil_error(PyExc_OverflowError, "Overflow detected in string multiply"); + } in1 += strides[0]; in2 += strides[1]; @@ -267,7 +288,9 @@ string_multiply_intstr_loop(PyArrayMethod_Context *context, while (N--) { Buffer buf(in2, elsize); Buffer outbuf(out, outsize); - string_multiply(buf, *(npy_int64 *)in1, outbuf); + if (NPY_UNLIKELY(string_multiply(buf, *(npy_int64 *)in1, outbuf) < 0)) { + npy_gil_error(PyExc_OverflowError, "Overflow detected in string multiply"); + } in1 += strides[0]; in2 += strides[1]; @@ -752,10 +775,11 @@ string_multiply_resolve_descriptors( if (given_descrs[2] == NULL) { PyErr_SetString( PyExc_TypeError, - "The 'out' kwarg is necessary. Use numpy.strings.multiply without it."); + "The 'out' kwarg is necessary when using the string multiply ufunc " + "directly. Use numpy.strings.multiply to multiply strings without " + "specifying 'out'."); return _NPY_ERROR_OCCURRED_IN_CAST; } - loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); if (loop_descrs[0] == NULL) { return _NPY_ERROR_OCCURRED_IN_CAST; diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index adcbfd3b7480..b0181d4186c9 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -137,9 +137,9 @@ static int multiply_loop_core( size_t newsize; int overflowed = npy_mul_with_overflow_size_t( &newsize, cursize, factor); - if (overflowed) { - npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in string multiply"); + if (overflowed || newsize > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, + "Overflow encountered in string multiply"); goto fail; } @@ -1748,9 +1748,9 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, width - num_codepoints); newsize += s1.size; - if (overflowed) { - npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in %s", ufunc_name); + if (overflowed || newsize > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, + "Overflow encountered in %s", ufunc_name); goto fail; } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index fc0a2d0b4d1a..b4dc1656024f 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -218,7 +218,7 @@ def multiply(a, i): # Ensure we can do a_len * i without overflow. if np.any(a_len > sys.maxsize / np.maximum(i, 1)): - raise MemoryError("repeated string is too long") + raise OverflowError("Overflow encountered in string multiply") buffersizes = a_len * i out_dtype = f"{a.dtype.char}{buffersizes.max()}" diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 1c15c4895eaf..9bab810d4421 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -128,8 +128,8 @@ def test_null_roundtripping(): def test_string_too_large_error(): arr = np.array(["a", "b", "c"], dtype=StringDType()) - with pytest.raises(MemoryError): - arr * (2**63 - 2) + with pytest.raises(OverflowError): + arr * (sys.maxsize + 1) @pytest.mark.parametrize( diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index f6de208d7951..56e928df4d7b 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -224,9 +224,20 @@ def test_multiply_raises(self, dt): with pytest.raises(TypeError, match="unsupported type"): np.strings.multiply(np.array("abc", dtype=dt), 3.14) - with pytest.raises(MemoryError): + with pytest.raises(OverflowError): np.strings.multiply(np.array("abc", dtype=dt), sys.maxsize) + def test_inplace_multiply(self, dt): + arr = np.array(['foo ', 'bar'], dtype=dt) + arr *= 2 + if dt != "T": + assert_array_equal(arr, np.array(['foo ', 'barb'], dtype=dt)) + else: + assert_array_equal(arr, ['foo foo ', 'barbar']) + + with pytest.raises(OverflowError): + arr *= sys.maxsize + @pytest.mark.parametrize("i_dt", [np.int8, np.int16, np.int32, np.int64, np.int_]) def test_multiply_integer_dtypes(self, i_dt, dt): diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index dc9474fe4069..e7915a583210 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -16,7 +16,8 @@ MAR_M_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) MAR_S: MaskedArray[np.bytes_] = np.ma.MaskedArray([b'foo'], dtype=np.bytes_) MAR_U: MaskedArray[np.str_] = np.ma.MaskedArray(['foo'], dtype=np.str_) -MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], np.ma.MaskedArray(["a"], "T")) +MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], + np.ma.MaskedArray(["a"], dtype="T")) AR_b: npt.NDArray[np.bool] = np.array([True, False, True]) From 5a8775642acfa24907dd52684c4be26f1ad3e852 Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Wed, 28 May 2025 12:01:46 +0100 Subject: [PATCH 0049/1018] MAINT: Convert umath_linalg to multi-phase init (PEP 489) --- .../upcoming_changes/29030.compatibility.rst | 6 ++ numpy/linalg/umath_linalg.cpp | 71 ++++++++++++------- numpy/tests/test_lazyloading.py | 36 +++++----- numpy/tests/test_reloading.py | 35 +++++---- 4 files changed, 91 insertions(+), 57 deletions(-) create mode 100644 doc/release/upcoming_changes/29030.compatibility.rst diff --git a/doc/release/upcoming_changes/29030.compatibility.rst b/doc/release/upcoming_changes/29030.compatibility.rst new file mode 100644 index 000000000000..cf08551e28ee --- /dev/null +++ b/doc/release/upcoming_changes/29030.compatibility.rst @@ -0,0 +1,6 @@ +* NumPy's C extension modules have begun to use multi-phase initialisation, + as defined by :pep:`489`. As part of this, a new explicit check has been added + that each such module is only imported once per Python process. This comes with + the side-effect that deleting ``numpy`` from ``sys.modules`` and re-importing + it will now fail with an ``ImportError``. This has always been unsafe, with + unexpected side-effects, though did not previously raise an error. diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index ead6d84a73a2..1b6850145bc8 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -4688,57 +4688,54 @@ static PyMethodDef UMath_LinAlgMethods[] = { {NULL, NULL, 0, NULL} /* Sentinel */ }; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - UMATH_LINALG_MODULE_NAME, - NULL, - -1, - UMath_LinAlgMethods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -PyMODINIT_FUNC PyInit__umath_linalg(void) +static int +_umath_linalg_exec(PyObject *m) { - PyObject *m; PyObject *d; PyObject *version; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; - import_array(); - import_ufunc(); + if (PyArray_ImportNumPyAPI() < 0) { + return -1; + } + if (PyUFunc_ImportUFuncAPI() < 0) { + return -1; + } d = PyModule_GetDict(m); if (d == NULL) { - return NULL; + return -1; } version = PyUnicode_FromString(umath_linalg_version_string); if (version == NULL) { - return NULL; + return -1; } int ret = PyDict_SetItemString(d, "__version__", version); Py_DECREF(version); if (ret < 0) { - return NULL; + return -1; } /* Load the ufunc operators into the module's namespace */ if (addUfuncs(d) < 0) { - return NULL; + return -1; } #if PY_VERSION_HEX < 0x30d00b3 && !HAVE_EXTERNAL_LAPACK lapack_lite_lock = PyThread_allocate_lock(); if (lapack_lite_lock == NULL) { PyErr_NoMemory(); - return NULL; + return -1; } #endif @@ -4748,10 +4745,30 @@ PyMODINIT_FUNC PyInit__umath_linalg(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _umath_linalg_slots[] = { + {Py_mod_exec, (void*)_umath_linalg_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, #endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, /* m_base */ + "_umath_linalg", /* m_name */ + NULL, /* m_doc */ + 0, /* m_size */ + UMath_LinAlgMethods, /* m_methods */ + _umath_linalg_slots, /* m_slots */ +}; - return m; +PyMODINIT_FUNC PyInit__umath_linalg(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/tests/test_lazyloading.py b/numpy/tests/test_lazyloading.py index 5f6233f1c5cb..7b0324802611 100644 --- a/numpy/tests/test_lazyloading.py +++ b/numpy/tests/test_lazyloading.py @@ -1,23 +1,23 @@ +import subprocess import sys -from importlib.util import LazyLoader, find_spec, module_from_spec +import textwrap import pytest +from numpy.testing import IS_WASM -# Warning raised by _reload_guard() in numpy/__init__.py -@pytest.mark.filterwarnings("ignore:The NumPy module was reloaded") + +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") def test_lazy_load(): # gh-22045. lazyload doesn't import submodule names into the namespace - # muck with sys.modules to test the importing system - old_numpy = sys.modules.pop("numpy") - numpy_modules = {} - for mod_name, mod in list(sys.modules.items()): - if mod_name[:6] == "numpy.": - numpy_modules[mod_name] = mod - sys.modules.pop(mod_name) + # Test within a new process, to ensure that we do not mess with the + # global state during the test run (could lead to cryptic test failures). + # This is generally unsafe, especially, since we also reload the C-modules. + code = textwrap.dedent(r""" + import sys + from importlib.util import LazyLoader, find_spec, module_from_spec - try: # create lazy load of numpy as np spec = find_spec("numpy") module = module_from_spec(spec) @@ -31,8 +31,12 @@ def test_lazy_load(): # test triggering the import of the package np.ndarray - - finally: - if old_numpy: - sys.modules["numpy"] = old_numpy - sys.modules.update(numpy_modules) + """) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index c21dc007b232..3e6ded326941 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -48,27 +48,34 @@ def test_novalue(): @pytest.mark.skipif(IS_WASM, reason="can't start subprocess") def test_full_reimport(): - """At the time of writing this, it is *not* truly supported, but - apparently enough users rely on it, for it to be an annoying change - when it started failing previously. - """ + # Reimporting numpy like this is not safe due to use of global C state, + # and has unexpected side effects. Test that an ImportError is raised. + # When all extension modules are isolated, this should test that clearing + # sys.modules and reimporting numpy works without error. + # Test within a new process, to ensure that we do not mess with the # global state during the test run (could lead to cryptic test failures). # This is generally unsafe, especially, since we also reload the C-modules. code = textwrap.dedent(r""" import sys - from pytest import warns import numpy as np - for k in list(sys.modules.keys()): - if "numpy" in k: - del sys.modules[k] + for k in [k for k in sys.modules if k.startswith('numpy')]: + del sys.modules[k] - with warns(UserWarning): + try: import numpy as np + except ImportError as err: + if str(err) != "cannot load module more than once per process": + raise SystemExit(f"Unexpected ImportError: {err}") + else: + raise SystemExit("DID NOT RAISE ImportError") """) - p = subprocess.run([sys.executable, '-c', code], capture_output=True) - if p.returncode: - raise AssertionError( - f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}" - ) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout From 7ae80cd29f1ff734d7f42038ca89cb27d6a81a62 Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Wed, 28 May 2025 14:42:26 +0100 Subject: [PATCH 0050/1018] MAINT: Convert dummymodule to multi-phase init (PEP 489) --- numpy/_core/src/dummymodule.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/numpy/_core/src/dummymodule.c b/numpy/_core/src/dummymodule.c index 2f293d6c4cd6..e1ef80ab3af3 100644 --- a/numpy/_core/src/dummymodule.c +++ b/numpy/_core/src/dummymodule.c @@ -14,25 +14,27 @@ static struct PyMethodDef methods[] = { {NULL, NULL, 0, NULL} }; +static struct PyModuleDef_Slot dummy_slots[] = { +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + // signal that this module can be imported in isolated subinterpreters + {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "dummy", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "dummy", + .m_size = 0, + .m_methods = methods, + .m_slots = dummy_slots, }; /* Initialization function for the module */ PyMODINIT_FUNC PyInit__dummy(void) { - PyObject *m; - m = PyModule_Create(&moduledef); - if (!m) { - return NULL; - } - return m; + return PyModuleDef_Init(&moduledef); } From 0145d7c379e245bfd259bf1c519441da4bea089b Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Wed, 28 May 2025 14:47:45 +0100 Subject: [PATCH 0051/1018] MAINT: Convert lapack_lite to multi-phase init (PEP 489) --- numpy/linalg/lapack_litemodule.c | 62 ++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 23 deletions(-) diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c index e5f3af05af22..cad5f3f92f09 100644 --- a/numpy/linalg/lapack_litemodule.c +++ b/numpy/linalg/lapack_litemodule.c @@ -377,28 +377,25 @@ static struct PyMethodDef lapack_lite_module_methods[] = { { NULL,NULL,0, NULL} }; +static int module_loaded = 0; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "lapack_lite", - NULL, - -1, - lapack_lite_module_methods, - NULL, - NULL, - NULL, - NULL -}; - -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit_lapack_lite(void) +static int +lapack_lite_exec(PyObject *m) { - PyObject *m,*d; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + PyObject *d; + + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; + } + module_loaded = 1; + + if (PyArray_ImportNumPyAPI() < 0) { + return -1; } - import_array(); + d = PyModule_GetDict(m); LapackError = PyErr_NewException("numpy.linalg.lapack_lite.LapackError", NULL, NULL); PyDict_SetItemString(d, "LapackError", LapackError); @@ -409,10 +406,29 @@ PyMODINIT_FUNC PyInit_lapack_lite(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot lapack_lite_slots[] = { + {Py_mod_exec, lapack_lite_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "lapack_lite", + .m_size = 0, + .m_methods = lapack_lite_module_methods, + .m_slots = lapack_lite_slots, +}; - return m; +PyMODINIT_FUNC PyInit_lapack_lite(void) { + return PyModuleDef_Init(&moduledef); } From 3363b38d99f87601869c2c9d68c2b16e54509675 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Wed, 28 May 2025 16:06:24 +0100 Subject: [PATCH 0052/1018] TYP: Type ``MaskedArray.__{add,radd,sub,rsub}__`` (#29012) --- numpy/__init__.pyi | 14 +- numpy/ma/core.pyi | 202 +++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 255 ++++++++++++++++++++++++++ 3 files changed, 462 insertions(+), 9 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0359e605a1c3..0e8b4625e7d4 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2809,6 +2809,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + # Keep in sync with `MaskedArray.__add__` @overload def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2856,6 +2857,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__radd__` @overload # signature equivalent to __add__ def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2903,6 +2905,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__sub__` @overload def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2940,6 +2943,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rsub__` @overload def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -3359,6 +3363,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # object and its value is >= 0 # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't # work, as this will lead to `false negatives` when using these inplace ops. + # Keep in sync with `MaskedArray.__iadd__` @overload def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3386,7 +3391,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - # + # Keep in sync with `MaskedArray.__isub__` @overload def __isub__( self: NDArray[unsignedinteger], @@ -3404,7 +3409,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - # + # Keep in sync with `MaskedArray.__imul__` @overload def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3426,6 +3431,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # Keep in sync with `MaskedArray.__ipow__` @overload def __ipow__( self: NDArray[unsignedinteger], @@ -3441,7 +3447,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - # + # Keep in sync with `MaskedArray.__itruediv__` @overload def __itruediv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3449,7 +3455,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - # keep in sync with `__imod__` + # keep in sync with `__imod__` and `MaskedArray.__ifloordiv__` @overload def __ifloordiv__( self: NDArray[unsignedinteger], diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 7e87611037b9..388619e1a654 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2,7 +2,7 @@ # ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 from collections.abc import Sequence -from typing import Any, Literal, Self, SupportsIndex, TypeAlias, overload +from typing import Any, Literal, NoReturn, Self, SupportsIndex, TypeAlias, overload from _typeshed import Incomplete from typing_extensions import TypeIs, TypeVar @@ -19,17 +19,23 @@ from numpy import ( bool_, bytes_, character, + complex128, complexfloating, datetime64, dtype, dtypes, expand_dims, + float16, + float32, float64, floating, generic, + inexact, int_, + integer, intp, ndarray, + number, object_, signedinteger, str_, @@ -40,14 +46,21 @@ from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, NDArray, + _32Bit, + _64Bit, _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeBytes_co, + _ArrayLikeComplex128_co, _ArrayLikeComplex_co, + _ArrayLikeDT64_co, + _ArrayLikeFloat64_co, _ArrayLikeFloat_co, _ArrayLikeInt, _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, _ArrayLikeStr_co, _ArrayLikeString_co, _ArrayLikeTD64_co, @@ -247,8 +260,18 @@ _DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) _ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) +_NumberT = TypeVar("_NumberT", bound=number) # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` _MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] + +_MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] +_MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] +_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] +_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] +_MaskedArrayFloat64_co: TypeAlias = _MaskedArray[floating[_64Bit] | float32 | float16 | integer | np.bool] +_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_MaskedArrayFloat_co: TypeAlias = _MaskedArray[floating | integer | np.bool] + _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] MaskType = bool_ @@ -463,10 +486,179 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __gt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __le__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __lt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] - def __add__(self, other): ... - def __radd__(self, other): ... - def __sub__(self, other): ... - def __rsub__(self, other): ... + + # Keep in sync with `ndarray.__add__` + @overload + def __add__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __add__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __add__( + self: MaskedArray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + @overload + def __add__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __add__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__radd__` + @overload # signature equivalent to __add__ + def __radd__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __radd__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __radd__( + self: MaskedArray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + @overload + def __radd__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __radd__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__sub__` + @overload + def __sub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __sub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rsub__` + @overload + def __rsub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __rsub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rsub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __mul__(self, other): ... def __rmul__(self, other): ... def __truediv__(self, other): ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index deda19c3d743..97f833b6a488 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -12,21 +12,35 @@ class MaskedArraySubclass(MaskedArray[np.complex128]): ... AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] +AR_u4: NDArray[np.uint32] AR_dt64: NDArray[np.datetime64] AR_td64: NDArray[np.timedelta64] AR_o: NDArray[np.timedelta64] +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_td64: list[np.timedelta64] +AR_LIKE_dt64: list[np.datetime64] +AR_LIKE_o: list[np.object_] +AR_number: NDArray[np.number] + MAR_c16: MaskedArray[np.complex128] MAR_b: MaskedArray[np.bool] MAR_f4: MaskedArray[np.float32] MAR_f8: MaskedArray[np.float64] MAR_i8: MaskedArray[np.int64] +MAR_u4: MaskedArray[np.uint32] MAR_dt64: MaskedArray[np.datetime64] MAR_td64: MaskedArray[np.timedelta64] MAR_o: MaskedArray[np.object_] MAR_s: MaskedArray[np.str_] MAR_byte: MaskedArray[np.bytes_] MAR_V: MaskedArray[np.void] +MAR_floating: MaskedArray[np.floating] +MAR_number: MaskedArray[np.number] MAR_subclass: MaskedArraySubclass @@ -368,3 +382,244 @@ assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32] assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) + +# Masked Array addition + +assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b + AR_LIKE_o, Any) + +assert_type(AR_LIKE_u + MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i + MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_b, Any) + +assert_type(MAR_u4 + AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 + AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u + MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i + MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_u4, Any) + +assert_type(MAR_i8 + AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 + AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_i8, Any) + +assert_type(MAR_f8 + AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c + MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o + MAR_f8, Any) + +assert_type(MAR_c16 + AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o + MAR_c16, Any) + +assert_type(MAR_td64 + AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_td64, Any) + +assert_type(MAR_dt64 + AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_o + MAR_dt64, Any) + +assert_type(MAR_o + AR_LIKE_b, Any) +assert_type(MAR_o + AR_LIKE_u, Any) +assert_type(MAR_o + AR_LIKE_i, Any) +assert_type(MAR_o + AR_LIKE_f, Any) +assert_type(MAR_o + AR_LIKE_c, Any) +assert_type(MAR_o + AR_LIKE_td64, Any) +assert_type(MAR_o + AR_LIKE_dt64, Any) +assert_type(MAR_o + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_o, Any) +assert_type(AR_LIKE_u + MAR_o, Any) +assert_type(AR_LIKE_i + MAR_o, Any) +assert_type(AR_LIKE_f + MAR_o, Any) +assert_type(AR_LIKE_c + MAR_o, Any) +assert_type(AR_LIKE_td64 + MAR_o, Any) +assert_type(AR_LIKE_dt64 + MAR_o, Any) +assert_type(AR_LIKE_o + MAR_o, Any) + +# Masked Array subtraction +# Keep in sync with numpy/typing/tests/data/reveal/arithmetic.pyi + +assert_type(MAR_number - AR_number, MaskedArray[np.number]) + +assert_type(MAR_b - AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b - AR_LIKE_o, Any) + +assert_type(AR_LIKE_u - MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i - MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_b, Any) + +assert_type(MAR_u4 - AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 - AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u - MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i - MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_u4, Any) + +assert_type(MAR_i8 - AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 - AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_i8, Any) + +assert_type(MAR_f8 - AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c - MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o - MAR_f8, Any) + +assert_type(MAR_c16 - AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o - MAR_c16, Any) + +assert_type(MAR_td64 - AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_td64, Any) + +assert_type(MAR_dt64 - AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_dt64, MaskedArray[np.timedelta64]) +assert_type(MAR_dt64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_dt64 - MAR_dt64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o - MAR_dt64, Any) + +assert_type(MAR_o - AR_LIKE_b, Any) +assert_type(MAR_o - AR_LIKE_u, Any) +assert_type(MAR_o - AR_LIKE_i, Any) +assert_type(MAR_o - AR_LIKE_f, Any) +assert_type(MAR_o - AR_LIKE_c, Any) +assert_type(MAR_o - AR_LIKE_td64, Any) +assert_type(MAR_o - AR_LIKE_dt64, Any) +assert_type(MAR_o - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_o, Any) +assert_type(AR_LIKE_u - MAR_o, Any) +assert_type(AR_LIKE_i - MAR_o, Any) +assert_type(AR_LIKE_f - MAR_o, Any) +assert_type(AR_LIKE_c - MAR_o, Any) +assert_type(AR_LIKE_td64 - MAR_o, Any) +assert_type(AR_LIKE_dt64 - MAR_o, Any) +assert_type(AR_LIKE_o - MAR_o, Any) From f9baafb4e0c529939e94218ec500ccc3ee5d5dd4 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Wed, 28 May 2025 22:18:28 +0200 Subject: [PATCH 0053/1018] review comments --- numpy/_core/tests/test_indexing.py | 3 ++- numpy/_core/tests/test_mem_overlap.py | 7 +++++-- numpy/_typing/_dtype_like.py | 2 +- numpy/lib/tests/test_histograms.py | 6 ++++-- ruff.toml | 8 ++++++-- 5 files changed, 18 insertions(+), 8 deletions(-) diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 1d42cde48682..757b8d72782f 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -1352,7 +1352,8 @@ def test_boolean_indexing_fast_path(self): "size of axis is 3 but size of corresponding boolean axis is 1", lambda: a[idx1]) - # This used to incorrectly give a ValueError: operands could not be broadcast together # noqa: E501 + # This used to incorrectly give a ValueError: operands could not be + # broadcast together idx2 = np.array([[False] * 8 + [True]]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index b437a7e14298..78b943854679 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -165,8 +165,9 @@ def check_may_share_memory_exact(a, b): err_msg = "" if got != exact: + base_delta = a.__array_interface__['data'][0] - b.__array_interface__['data'][0] err_msg = " " + "\n ".join([ - f"base_a - base_b = {a.__array_interface__['data'][0] - b.__array_interface__['data'][0]!r}", # noqa: E501 + f"base_a - base_b = {base_delta!r}", f"shape_a = {a.shape!r}", f"shape_b = {b.shape!r}", f"strides_a = {a.strides!r}", @@ -402,7 +403,9 @@ def check(A, U, exists=None): exists = (X is not None) if X is not None: - assert_(sum(a * x for a, x in zip(A, X)) == sum(a * u // 2 for a, u in zip(A, U))) # noqa: E501 + sum_ax = sum(a * x for a, x in zip(A, X)) + sum_au_half = sum(a * u // 2 for a, u in zip(A, U)) + assert_(sum_ax == sum_au_half) assert_(all(0 <= x <= u for x, u in zip(X, U))) assert_(any(x != u // 2 for x, u in zip(X, U))) diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index d341db5dc23a..c406b3098384 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -55,7 +55,7 @@ def dtype(self) -> _DTypeT_co: ... # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] # noqa: E501 +_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] # Would create a dtype[np.void] diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index be5268d9813a..4ba953f462fc 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -554,7 +554,8 @@ def test_outlier(self): assert_equal(len(a), numbins) def test_scott_vs_stone(self): - """Verify that Scott's rule and Stone's rule converges for normally distributed data""" # noqa: E501 + # Verify that Scott's rule and Stone's rule converges for normally + # distributed data def nbins_ratio(seed, size): rng = np.random.RandomState(seed) @@ -565,7 +566,8 @@ def nbins_ratio(seed, size): geom_space = np.geomspace(start=10, stop=100, num=4).round().astype(int) ll = [[nbins_ratio(seed, size) for size in geom_space] for seed in range(10)] - # the average difference between the two methods decreases as the dataset size increases. # noqa: E501 + # the average difference between the two methods decreases as the dataset + # size increases. avg = abs(np.mean(ll, axis=0) - 0.5) assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) diff --git a/ruff.toml b/ruff.toml index b4190fe41787..deb52e834df9 100644 --- a/ruff.toml +++ b/ruff.toml @@ -68,6 +68,7 @@ ignore = [ "_tempita.py" = ["B909"] "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] + "benchmarks/benchmarks/bench_linalg.py" = ["E501"] "numpy/_core/tests/test_api.py" = ["E501"] "numpy/_core/tests/test_arrayprint.py" = ["E501"] @@ -90,15 +91,18 @@ ignore = [ "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] -"numpy/_typing/_array_like.py" = ["E501"] "numpy/lib/tests/test_function_base.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] "numpy/lib/tests/test_io.py" = ["E501"] "numpy/lib/tests/test_polynomial.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/tests/test_configtool.py" = ["E501"] -"numpy*pyi" = ["E501"] "numpy/f2py/*py" = ["E501"] +# for typing related files we follow https://typing.python.org/en/latest/guides/writing_stubs.html#maximum-line-length +"numpy/_typing/_array_like.py" = ["E501"] +"numpy/_typing/_dtype_like.py" = ["E501"] +"numpy*pyi" = ["E501"] + "__init__.py" = ["F401", "F403", "F405"] "__init__.pyi" = ["F401"] "numpy/_core/defchararray.py" = ["F403", "F405"] From 5ab56f649972b77daf20f7f5aa3e796232c05a00 Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Thu, 29 May 2025 08:41:39 +0100 Subject: [PATCH 0054/1018] Convert pocketfft_umath to multi-phase init (PEP 489) (#29028) --- numpy/fft/_pocketfft_umath.cpp | 64 +++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 24 deletions(-) diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index 525b5e5a23da..ab8af5aa522e 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -388,41 +388,57 @@ add_gufuncs(PyObject *dictionary) { return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_umath", - NULL, - -1, - NULL, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit__pocketfft_umath(void) +static int +_pocketfft_umath_exec(PyObject *m) { - PyObject *m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; /* Import the array and ufunc objects */ - import_array(); - import_ufunc(); + if (PyArray_ImportNumPyAPI() < 0) { + return -1; + } + if (PyUFunc_ImportUFuncAPI() < 0) { + return -1; + } PyObject *d = PyModule_GetDict(m); if (add_gufuncs(d) < 0) { Py_DECREF(d); - Py_DECREF(m); - return NULL; + return -1; } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _pocketfft_umath_slots[] = { + {Py_mod_exec, (void*)_pocketfft_umath_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, /* m_base */ + "_pocketfft_umath", /* m_name */ + NULL, /* m_doc */ + 0, /* m_size */ + NULL, /* m_methods */ + _pocketfft_umath_slots, /* m_slots */ +}; - return m; +PyMODINIT_FUNC PyInit__pocketfft_umath(void) { + return PyModuleDef_Init(&moduledef); } From e107f24f32254d063249fc84e34aa707d826b054 Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Thu, 29 May 2025 13:04:38 +0100 Subject: [PATCH 0055/1018] MAINT: Convert multiarray to multi-phase init (PEP 489) (#29022) --- numpy/_core/__init__.py | 4 + .../src/multiarray/_multiarray_tests.c.src | 59 ++++--- numpy/_core/src/multiarray/multiarraymodule.c | 166 +++++++++--------- 3 files changed, 125 insertions(+), 104 deletions(-) diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index 7b19cefb2f93..b0be8d1cbab6 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -23,6 +23,10 @@ except ImportError as exc: import sys + # Bypass for the module re-initialization opt-out + if exc.msg == "cannot load module more than once per process": + raise + # Basically always, the problem should be that the C module is wrong/missing... if ( isinstance(exc, ModuleNotFoundError) diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index fc73a64b19a0..8012a32b070e 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -2413,41 +2413,56 @@ static PyMethodDef Multiarray_TestsMethods[] = { }; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_tests", - NULL, - -1, - Multiarray_TestsMethods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -PyMODINIT_FUNC PyInit__multiarray_tests(void) +static int +_multiarray_tests_exec(PyObject *m) { - PyObject *m; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; + } + module_loaded = 1; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return m; + if (PyArray_ImportNumPyAPI() < 0) { + return -1; } - import_array(); if (init_argparse_mutex() < 0) { - return NULL; + return -1; } if (PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "cannot load _multiarray_tests module."); } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _multiarray_tests_slots[] = { + {Py_mod_exec, _multiarray_tests_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "_multiarray_tests", + .m_size = 0, + .m_methods = Multiarray_TestsMethods, + .m_slots = _multiarray_tests_slots, +}; - return m; +PyMODINIT_FUNC PyInit__multiarray_tests(void) +{ + return PyModuleDef_Init(&moduledef); } NPY_NO_EXPORT int diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 8ba38b555edb..022a54fe17da 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4773,36 +4773,27 @@ initialize_thread_unsafe_state(void) { return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_umath", - NULL, - -1, - array_module_methods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit__multiarray_umath(void) { - PyObject *m, *d, *s; - PyObject *c_api; +static int +_multiarray_umath_exec(PyObject *m) { + PyObject *d, *s, *c_api; - /* Create the module and add the functions */ - m = PyModule_Create(&moduledef); - if (!m) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; /* Initialize CPU features */ if (npy_cpu_init() < 0) { - goto err; + return -1; } /* Initialize CPU dispatch tracer */ if (npy_cpu_dispatch_tracer_init(m) < 0) { - goto err; + return -1; } #if defined(MS_WIN64) && defined(__GNUC__) @@ -4818,62 +4809,62 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { numpy_pydatetime_import(); if (PyErr_Occurred()) { - goto err; + return -1; } /* Add some symbolic constants to the module */ d = PyModule_GetDict(m); if (!d) { - goto err; + return -1; } if (intern_strings() < 0) { - goto err; + return -1; } if (initialize_static_globals() < 0) { - goto err; + return -1; } if (initialize_thread_unsafe_state() < 0) { - goto err; + return -1; } if (init_import_mutex() < 0) { - goto err; + return -1; } if (init_extobj() < 0) { - goto err; + return -1; } if (PyType_Ready(&PyUFunc_Type) < 0) { - goto err; + return -1; } PyArrayDTypeMeta_Type.tp_base = &PyType_Type; if (PyType_Ready(&PyArrayDTypeMeta_Type) < 0) { - goto err; + return -1; } PyArrayDescr_Type.tp_hash = PyArray_DescrHash; Py_SET_TYPE(&PyArrayDescr_Type, &PyArrayDTypeMeta_Type); if (PyType_Ready(&PyArrayDescr_Type) < 0) { - goto err; + return -1; } initialize_casting_tables(); initialize_numeric_types(); if (initscalarmath(m) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArray_Type) < 0) { - goto err; + return -1; } if (setup_scalartypes(d) < 0) { - goto err; + return -1; } PyArrayIter_Type.tp_iter = PyObject_SelfIter; @@ -4881,28 +4872,28 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { PyArrayMultiIter_Type.tp_iter = PyObject_SelfIter; PyArrayMultiIter_Type.tp_free = PyArray_free; if (PyType_Ready(&PyArrayIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayMapIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayMultiIter_Type) < 0) { - goto err; + return -1; } PyArrayNeighborhoodIter_Type.tp_new = PyType_GenericNew; if (PyType_Ready(&PyArrayNeighborhoodIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&NpyIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayFlags_Type) < 0) { - goto err; + return -1; } NpyBusDayCalendar_Type.tp_new = PyType_GenericNew; if (PyType_Ready(&NpyBusDayCalendar_Type) < 0) { - goto err; + return -1; } /* @@ -4923,43 +4914,43 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { s = npy_cpu_features_dict(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_features__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = npy_cpu_baseline_list(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_baseline__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = npy_cpu_dispatch_list(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_dispatch__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = PyCapsule_New((void *)_datetime_strings, NULL, NULL); if (s == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "DATETIMEUNITS", s); Py_DECREF(s); #define ADDCONST(NAME) \ - s = PyLong_FromLong(NPY_##NAME); \ + s = PyLong_FromLong(NPY_##NAME); \ PyDict_SetItemString(d, #NAME, s); \ Py_DECREF(s) @@ -4999,39 +4990,39 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* Finalize scalar types and expose them via namespace or typeinfo dict */ if (set_typeinfo(d) != 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayFunctionDispatcher_Type) < 0) { - goto err; + return -1; } PyDict_SetItemString( d, "_ArrayFunctionDispatcher", (PyObject *)&PyArrayFunctionDispatcher_Type); if (PyType_Ready(&PyArrayArrayConverter_Type) < 0) { - goto err; + return -1; } PyDict_SetItemString( d, "_array_converter", (PyObject *)&PyArrayArrayConverter_Type); if (PyType_Ready(&PyArrayMethod_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyBoundArrayMethod_Type) < 0) { - goto err; + return -1; } if (initialize_and_map_pytypes_to_dtypes() < 0) { - goto err; + return -1; } if (PyArray_InitializeCasts() < 0) { - goto err; + return -1; } if (init_string_dtype() < 0) { - goto err; + return -1; } /* @@ -5040,7 +5031,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { PyDataMem_DefaultHandler = PyCapsule_New( &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); if (PyDataMem_DefaultHandler == NULL) { - goto err; + return -1; } /* @@ -5049,32 +5040,32 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { */ current_handler = PyContextVar_New("current_allocator", PyDataMem_DefaultHandler); if (current_handler == NULL) { - goto err; + return -1; } if (initumath(m) != 0) { - goto err; + return -1; } if (set_matmul_flags(d) < 0) { - goto err; + return -1; } // initialize static references to ndarray.__array_*__ special methods npy_static_pydata.ndarray_array_finalize = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_finalize__"); if (npy_static_pydata.ndarray_array_finalize == NULL) { - goto err; + return -1; } npy_static_pydata.ndarray_array_ufunc = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_ufunc__"); if (npy_static_pydata.ndarray_array_ufunc == NULL) { - goto err; + return -1; } npy_static_pydata.ndarray_array_function = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_function__"); if (npy_static_pydata.ndarray_array_function == NULL) { - goto err; + return -1; } /* @@ -5090,13 +5081,13 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { if (npy_cache_import_runtime( "numpy.dtypes", "_add_dtype_helper", &npy_runtime_imports._add_dtype_helper) == -1) { - goto err; + return -1; } if (PyObject_CallFunction( npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)&PyArray_StringDType, NULL) == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "StringDType", (PyObject *)&PyArray_StringDType); @@ -5104,13 +5095,13 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { npy_static_pydata.zero_pyint_like_arr = PyArray_ZEROS( 0, NULL, NPY_DEFAULT_INT, NPY_FALSE); if (npy_static_pydata.zero_pyint_like_arr == NULL) { - goto err; + return -1; } ((PyArrayObject_fields *)npy_static_pydata.zero_pyint_like_arr)->flags |= (NPY_ARRAY_WAS_PYTHON_INT|NPY_ARRAY_WAS_INT_AND_REPLACED); if (verify_static_structs_initialized() < 0) { - goto err; + return -1; } /* @@ -5120,33 +5111,44 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* The dtype API is not auto-filled/generated via Python scripts: */ _fill_dtype_api(PyArray_API); if (c_api == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "_ARRAY_API", c_api); Py_DECREF(c_api); c_api = PyCapsule_New((void *)PyUFunc_API, NULL, NULL); if (c_api == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "_UFUNC_API", c_api); Py_DECREF(c_api); if (PyErr_Occurred()) { - goto err; + return -1; } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _multiarray_umath_slots[] = { + {Py_mod_exec, _multiarray_umath_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, #endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; - return m; +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "_multiarray_umath", + .m_size = 0, + .m_methods = array_module_methods, + .m_slots = _multiarray_umath_slots, +}; - err: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, - "cannot load multiarray module."); - } - Py_DECREF(m); - return NULL; +PyMODINIT_FUNC PyInit__multiarray_umath(void) { + return PyModuleDef_Init(&moduledef); } From b37f7616879b8ce977deca81069c886a4096511d Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 29 May 2025 17:33:36 +0300 Subject: [PATCH 0056/1018] BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 (#29039) * BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 [wheel build] * Update requirements/ci_requirements.txt Co-authored-by: Sebastian Berg * use pip to install anaconda-client on win-arm64 [wheel build] * allow noblas in win32 wheels, use scipy-openblas32 on win-arm64 [wheel build] * improve runner arch detection logic [wheel build] * remove win_arm64 cibuildwheel override * remove 'strip' before calling delvewheel [wheel build] * use openblas 0.3.29.265 only on win-arm64 [wheel build] * add comment about lack of win-arm64 openblas64 wheels [wheel build] --------- Co-authored-by: Sebastian Berg Co-authored-by: Joe Rickerby --- .github/workflows/wheels.yml | 13 ++++++++++++- pyproject.toml | 9 ++------- requirements/ci32_requirements.txt | 3 ++- requirements/ci_requirements.txt | 6 ++++-- tools/wheels/cibw_before_build.sh | 26 ++++++++++++++++++-------- tools/wheels/repair_windows.sh | 23 ----------------------- 6 files changed, 38 insertions(+), 42 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index fa2c1cb5ae97..097efe8e7225 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -186,7 +186,8 @@ jobs: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc + - name: install micromamba + uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment with: # for installation of anaconda-client, required for upload to @@ -200,6 +201,16 @@ jobs: create-args: >- anaconda-client + - name: win-arm64 install anaconda client + if: ${{ matrix.buildplat[1] == 'win_arm64' }} + run: | + # Rust installation needed for rpds-py. + Invoke-WebRequest https://static.rust-lang.org/rustup/dist/aarch64-pc-windows-msvc/rustup-init.exe -UseBasicParsing -Outfile rustup-init.exe + .\rustup-init.exe -y + $env:PATH="$env:PATH;$env:USERPROFILE\.cargo\bin" + pip install anaconda-client + + - name: Upload wheels if: success() && github.repository == 'numpy/numpy' shell: bash -el {0} diff --git a/pyproject.toml b/pyproject.toml index 5cf75b20a6b6..1e08544ced75 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -178,21 +178,16 @@ repair-wheel-command = [ ] [tool.cibuildwheel.windows] -# This does not work, use CIBW_ENVIRONMENT_WINDOWS -environment = {PKG_CONFIG_PATH="./.openblas"} config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=false build-dir=build" repair-wheel-command = "bash -el ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" +# This does not work, use CIBW_ENVIRONMENT_WINDOWS +environment = {PKG_CONFIG_PATH="./.openblas"} [[tool.cibuildwheel.overrides]] select = "*-win32" config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" repair-wheel-command = "" -[[tool.cibuildwheel.overrides]] -select = "*-win_arm64" -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" -repair-wheel-command = "" - [[tool.cibuildwheel.overrides]] select = "*pyodide*" before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 5a7be719214a..74c9a51ec111 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,4 @@ spin==0.13 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.29.0.0 +scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' +scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index adf7d86558f0..b6ea06c812c8 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,6 @@ spin==0.13 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.29.0.0 -scipy-openblas64==0.3.29.0.0 +scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' +scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' +# Note there is not yet a win-arm64 wheel, so we currently only exclude win-arm64 +scipy-openblas64==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 3e1d4498fe7c..e41e5d37316b 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -22,9 +22,6 @@ fi if [[ $(python -c"import sys; print(sys.maxsize)") < $(python -c"import sys; print(2**33)") ]]; then echo "No BLAS used for 32-bit wheels" export INSTALL_OPENBLAS=false -elif [[ $(python -c"import sysconfig; print(sysconfig.get_platform())") == "win-arm64" ]]; then - echo "No BLAS used for ARM64 wheels" - export INSTALL_OPENBLAS=false elif [ -z $INSTALL_OPENBLAS ]; then # the macos_arm64 build might not set this variable export INSTALL_OPENBLAS=true @@ -32,20 +29,33 @@ fi # Install Openblas from scipy-openblas64 if [[ "$INSTALL_OPENBLAS" = "true" ]] ; then - echo PKG_CONFIG_PATH $PKG_CONFIG_PATH + # by default, use scipy-openblas64 + OPENBLAS=openblas64 + # Possible values for RUNNER_ARCH in github are + # X86, X64, ARM, or ARM64 + # TODO: should we detect a missing RUNNER_ARCH and use platform.machine() + # when wheel build is run outside github? + # On 32-bit platforms, use scipy_openblas32 + # On win-arm64 use scipy_openblas32 + if [[ $RUNNER_ARCH == "X86" || $RUNNER_ARCH == "ARM" ]] ; then + OPENBLAS=openblas32 + elif [[ $RUNNER_ARCH == "ARM64" && $RUNNER_OS == "Windows" ]] ; then + OPENBLAS=openblas32 + fi + echo PKG_CONFIG_PATH is $PKG_CONFIG_PATH, OPENBLAS is ${OPENBLAS} PKG_CONFIG_PATH=$PROJECT_DIR/.openblas rm -rf $PKG_CONFIG_PATH mkdir -p $PKG_CONFIG_PATH python -m pip install -r requirements/ci_requirements.txt - python -c "import scipy_openblas64; print(scipy_openblas64.get_pkg_config())" > $PKG_CONFIG_PATH/scipy-openblas.pc + python -c "import scipy_${OPENBLAS}; print(scipy_${OPENBLAS}.get_pkg_config())" > $PKG_CONFIG_PATH/scipy-openblas.pc # Copy the shared objects to a path under $PKG_CONFIG_PATH, the build # will point $LD_LIBRARY_PATH there and then auditwheel/delocate-wheel will # pull these into the wheel. Use python to avoid windows/posix problems python < Date: Thu, 29 May 2025 01:10:35 -0400 Subject: [PATCH 0057/1018] CI: bump to cibuildwheel 3.0.0b4 [wheel build] This bumps to cibuildwheel 3.0.0b4, which contains CPython 3.14.0b2, and removes the directory changing workaround. Signed-off-by: Henry Schreiner --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- tools/wheels/cibw_test_command.sh | 4 ---- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index fea77068e128..86628f6882cd 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # 2.23.3 + - uses: pypa/cibuildwheel@cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971 # v3.0.0b4 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 097efe8e7225..3736f28cbd8c 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -177,7 +177,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@90a0ddeff0f23eebc21630e65d66d0f4955e9b94 # v3.0.0b1 + uses: pypa/cibuildwheel@cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971 # v3.0.0b4 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 2d39687a861b..60e90ef5beb6 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -4,10 +4,6 @@ set -xe PROJECT_DIR="$1" -if [ -d tools ]; then - cd tools -fi - python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" From bcf8a99e8ca8c068f9b51a7a086000b53632c3cd Mon Sep 17 00:00:00 2001 From: Henry Schreiner Date: Wed, 28 May 2025 13:02:47 -0400 Subject: [PATCH 0058/1018] CI: clean up cibuildwheel config a bit [wheel build] This simplifies the configuration a bit: * Combine pyodide blocks * Use tables/lists for config-settings and skip * Remove a few repeated lines * Use a list for select Signed-off-by: Henry Schreiner --- pyproject.toml | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1e08544ced75..b0e58705ebd1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -142,14 +142,17 @@ tracker = "https://github.com/numpy/numpy/issues" # build wheels for in CI are controlled in `.github/workflows/wheels.yml` and # `tools/ci/cirrus_wheels.yml`. build-frontend = "build" -skip = "*_i686 *_ppc64le *_s390x *_universal2" +skip = ["*_i686", "*_ppc64le", "*_s390x", "*_universal2"] before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" -# The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) -config-settings = "setup-args=-Duse-ilp64=true setup-args=-Dallow-noblas=false build-dir=build" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" enable = ["cpython-freethreading", "pypy", "cpython-prerelease"] +# The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) +[tool.cibuildwheel.config-settings] +setup-args = ["-Duse-ilp64=true", "-Dallow-noblas=false"] +build-dir = "build" + [tool.cibuildwheel.linux] manylinux-x86_64-image = "manylinux_2_28" manylinux-aarch64-image = "manylinux_2_28" @@ -157,7 +160,14 @@ musllinux-x86_64-image = "musllinux_1_2" musllinux-aarch64-image = "musllinux_1_2" [tool.cibuildwheel.pyodide] -config-settings = "build-dir=build setup-args=--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross setup-args=-Dblas=none setup-args=-Dlapack=none" +before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" +# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten +repair-wheel-command = "" +test-command = "python -m pytest --pyargs numpy -m 'not slow'" + +[tool.cibuildwheel.pyodide.config-settings] +build-dir = "build" +setup-args = ["--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross", "-Dblas=none", "-Dlapack=none"] [tool.cibuildwheel.linux.environment] # RUNNER_OS is a GitHub Actions specific env var; define it here so it works on Cirrus CI too @@ -178,22 +188,16 @@ repair-wheel-command = [ ] [tool.cibuildwheel.windows] -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=false build-dir=build" +config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=false"], build-dir="build"} repair-wheel-command = "bash -el ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" # This does not work, use CIBW_ENVIRONMENT_WINDOWS environment = {PKG_CONFIG_PATH="./.openblas"} [[tool.cibuildwheel.overrides]] -select = "*-win32" -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" +select = ["*-win32"] +config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=true"], build-dir="build"} repair-wheel-command = "" -[[tool.cibuildwheel.overrides]] -select = "*pyodide*" -before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" -# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten -repair-wheel-command = "" -test-command = "python -m pytest --pyargs numpy -m 'not slow'" [tool.meson-python] meson = 'vendored-meson/meson/meson.py' From 3318fbd3a137a44d8d54ab304a16fd359d80887b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 29 May 2025 09:31:10 -0600 Subject: [PATCH 0059/1018] MNT: Avoid use of deprecated _PyDict_GetItemStringWithError in f2py --- numpy/f2py/src/fortranobject.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 4e2aa370b643..5c2b4bdf0931 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -363,6 +363,8 @@ fortran_getattr(PyFortranObject *fp, char *name) { int i, j, k, flag; if (fp->dict != NULL) { + // python 3.13 added PyDict_GetItemRef +#if PY_VERSION_HEX < 0x030D0000 PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); if (v == NULL && PyErr_Occurred()) { return NULL; @@ -371,6 +373,17 @@ fortran_getattr(PyFortranObject *fp, char *name) Py_INCREF(v); return v; } +#else + PyObject *v; + int result = PyDict_GetItemStringRef(fp->dict, name, &v); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return v; + } +#endif + } for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); i++) From 336d661e958065f8c95a559a24e143b5fc747c1d Mon Sep 17 00:00:00 2001 From: crusaderky Date: Thu, 29 May 2025 20:10:18 +0100 Subject: [PATCH 0060/1018] Ignore all build-* directories --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index df7f084e3645..c4de68c1a9a7 100644 --- a/.gitignore +++ b/.gitignore @@ -64,7 +64,7 @@ GTAGS ################ # meson build/installation directories build -build-install +build-* # meson python output .mesonpy-native-file.ini # sphinx build directory From d27f6618923f6b65841d7eaebcd21062ecb5a8aa Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 18:18:11 +0200 Subject: [PATCH 0061/1018] TYP: fix `NDArray[integer]` inplace operator mypy issue --- numpy/__init__.pyi | 94 +++------------------ numpy/ma/core.pyi | 57 +++---------- numpy/typing/tests/data/fail/arithmetic.pyi | 2 - 3 files changed, 23 insertions(+), 130 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0e8b4625e7d4..df72ce3d877a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3367,13 +3367,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iadd__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __iadd__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3393,13 +3387,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__isub__` @overload - def __isub__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __isub__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __isub__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3414,15 +3402,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: ndarray[Any, dtype[signedinteger | character] | dtypes.StringDType], - other: _ArrayLikeInt_co, - /, + self: ndarray[Any, dtype[integer | character] | dtypes.StringDType], other: _ArrayLikeInt_co, / ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3433,13 +3413,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__ipow__` @overload - def __ipow__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ipow__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3457,13 +3431,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with `__imod__` and `MaskedArray.__ifloordiv__` @overload - def __ifloordiv__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ifloordiv__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ifloordiv__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ifloordiv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3471,13 +3439,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with `__ifloordiv__` @overload - def __imod__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imod__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imod__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imod__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3491,25 +3453,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with `__irshift__` @overload - def __ilshift__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ilshift__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ilshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... # keep in sync with `__ilshift__` @overload - def __irshift__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __irshift__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __irshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3517,13 +3467,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iand__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iand__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iand__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3531,13 +3475,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ixor__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ixor__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ixor__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3545,13 +3483,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ior__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ior__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ior__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3559,9 +3491,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __imatmul__(self: NDArray[unsignedinteger], other: _ArrayLikeUInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imatmul__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 388619e1a654..da4ad3b333db 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -668,20 +668,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __pow__(self, other, mod: None = None, /): ... def __rpow__(self, other, mod: None = None, /): ... - # Keep in sync with `ndarray.__iadd__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__iadd__` @overload def __iadd__( self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iadd__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __iadd__( self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / @@ -707,16 +700,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__isub__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. - @overload - def __isub__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + # Keep in sync with `ndarray.__isub__` @overload - def __isub__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __isub__( self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / @@ -734,20 +720,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__imul__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__imul__` @overload def __imul__( self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: MaskedArray[Any, dtype[signedinteger] | dtype[character] | dtypes.StringDType], - other: _ArrayLikeInt_co, / + self: MaskedArray[Any, dtype[integer] | dtype[character] | dtypes.StringDType], other: _ArrayLikeInt_co, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__( @@ -762,16 +742,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__ifloordiv__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. - @overload - def __ifloordiv__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + # Keep in sync with `ndarray.__ifloordiv__` @overload - def __ifloordiv__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __ifloordiv__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __ifloordiv__( self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / @@ -781,8 +754,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__itruediv__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__itruediv__` @overload def __itruediv__( self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / @@ -798,16 +770,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__ipow__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__ipow__` @overload - def __ipow__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __ipow__( self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index e94861a3eba7..e696083b8614 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -85,7 +85,6 @@ AR_b *= AR_LIKE_f # type: ignore[arg-type] AR_b *= AR_LIKE_c # type: ignore[arg-type] AR_b *= AR_LIKE_m # type: ignore[arg-type] -AR_u *= AR_LIKE_i # type: ignore[arg-type] AR_u *= AR_LIKE_f # type: ignore[arg-type] AR_u *= AR_LIKE_c # type: ignore[arg-type] AR_u *= AR_LIKE_m # type: ignore[arg-type] @@ -105,7 +104,6 @@ AR_b **= AR_LIKE_i # type: ignore[misc] AR_b **= AR_LIKE_f # type: ignore[misc] AR_b **= AR_LIKE_c # type: ignore[misc] -AR_u **= AR_LIKE_i # type: ignore[arg-type] AR_u **= AR_LIKE_f # type: ignore[arg-type] AR_u **= AR_LIKE_c # type: ignore[arg-type] From 998f561a1af90083099e4410412a842c49b4f993 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 18:32:14 +0200 Subject: [PATCH 0062/1018] TYP: regression tests for `NDArray[integer]` inplace ops --- numpy/typing/tests/data/pass/arithmetic.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index b50d28e5fca5..3b2901cf2b51 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any +from typing import Any, cast import numpy as np import numpy.typing as npt import pytest @@ -61,6 +61,7 @@ def __rpow__(self, value: Any) -> Object: AR_b: npt.NDArray[np.bool] = np.array([True]) AR_u: npt.NDArray[np.uint32] = np.array([1], dtype=np.uint32) AR_i: npt.NDArray[np.int64] = np.array([1]) +AR_integer: npt.NDArray[np.integer] = cast(npt.NDArray[np.integer], AR_i) AR_f: npt.NDArray[np.float64] = np.array([1.0]) AR_c: npt.NDArray[np.complex128] = np.array([1j]) AR_m: npt.NDArray[np.timedelta64] = np.array([np.timedelta64(1, "D")]) @@ -282,6 +283,10 @@ def __rpow__(self, value: Any) -> Object: AR_i *= AR_LIKE_u AR_i *= AR_LIKE_i +AR_integer *= AR_LIKE_b +AR_integer *= AR_LIKE_u +AR_integer *= AR_LIKE_i + AR_f *= AR_LIKE_b AR_f *= AR_LIKE_u AR_f *= AR_LIKE_i @@ -314,6 +319,10 @@ def __rpow__(self, value: Any) -> Object: AR_i **= AR_LIKE_u AR_i **= AR_LIKE_i +AR_integer **= AR_LIKE_b +AR_integer **= AR_LIKE_u +AR_integer **= AR_LIKE_i + AR_f **= AR_LIKE_b AR_f **= AR_LIKE_u AR_f **= AR_LIKE_i From 8c35aaad673e64c8806ec13141124667eb450469 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 May 2025 17:14:40 +0000 Subject: [PATCH 0063/1018] MAINT: Bump ossf/scorecard-action from 2.4.1 to 2.4.2 Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.4.1 to 2.4.2. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/f49aabe0b5af0936a0987cfb85d86b75731b0186...05b42c624433fc40578a4040d5cf5e36ddca8cde) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-version: 2.4.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/scorecards.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 11a5be5f488a..f4e06677f804 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -30,7 +30,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 + uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 with: results_file: results.sarif results_format: sarif From 32cbf09a31fc859f98b8ed7178f5ea8d95310b39 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 22:13:07 +0200 Subject: [PATCH 0064/1018] MAINT: bump `mypy` to `1.16.0` --- environment.yml | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 91585a8dcb13..d2964bf78368 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.15.0 + - mypy=1.16.0 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index a2a68f044a50..4fb1d47bf50d 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -13,7 +13,7 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.15.0; platform_python_implementation != "PyPy" +mypy==1.16.0; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer From 87e769a196336ca1427eb2215925bf987154b316 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 23:22:31 +0200 Subject: [PATCH 0065/1018] TYP: run mypy in strict mode --- numpy/typing/tests/data/mypy.ini | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/numpy/typing/tests/data/mypy.ini b/numpy/typing/tests/data/mypy.ini index bca203260efa..4aa465ae087b 100644 --- a/numpy/typing/tests/data/mypy.ini +++ b/numpy/typing/tests/data/mypy.ini @@ -1,9 +1,8 @@ [mypy] +strict = True enable_error_code = deprecated, ignore-without-code, truthy-bool -strict_bytes = True -warn_unused_ignores = True -implicit_reexport = False disallow_any_unimported = True -disallow_any_generics = True +allow_redefinition_new = True +local_partial_types = True show_absolute_path = True pretty = True From 51f4ac8f6188a4cd7dbb81ee4fff0cf7aef34e3e Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 23:23:22 +0200 Subject: [PATCH 0066/1018] TYP: disable mypy's `no-untyped-call` errors in the `MaskedArray` type-tests --- numpy/typing/tests/data/pass/ma.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index e7915a583210..b9be2b2e4384 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -7,6 +7,8 @@ _ScalarT = TypeVar("_ScalarT", bound=np.generic) MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] +# mypy: disable-error-code=no-untyped-call + MAR_b: MaskedArray[np.bool] = np.ma.MaskedArray([True]) MAR_u: MaskedArray[np.uint32] = np.ma.MaskedArray([1], dtype=np.uint32) MAR_i: MaskedArray[np.int64] = np.ma.MaskedArray([1]) From 57296fbbcba96beaf923e5b7c45bfef547d1484a Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 23:24:07 +0200 Subject: [PATCH 0067/1018] TYP: remove problematic runtime code from a `.pyi` test module --- numpy/typing/tests/data/reveal/nbit_base_example.pyi | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index 33229660b6f8..66470b95bf15 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -7,8 +7,7 @@ from numpy._typing import _32Bit, _64Bit T1 = TypeVar("T1", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] T2 = TypeVar("T2", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: - return a + b +def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... i8: np.int64 i4: np.int32 From f925c0cefc05e4619bd7554eb6c301e7e5934685 Mon Sep 17 00:00:00 2001 From: Guido Imperiale Date: Sat, 31 May 2025 09:26:07 +0100 Subject: [PATCH 0068/1018] BUG: f2py: thread-safe forcomb (#29091) --- numpy/f2py/cfuncs.py | 39 ++++++++++++++++++++++----------------- numpy/f2py/rules.py | 5 +++-- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 6c48c1ef0175..b2b1cad3d867 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -598,32 +598,37 @@ def errmess(s: str) -> None: return ii; }""" cfuncs['forcomb'] = """ -static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; -static int initforcomb(npy_intp *dims,int nd,int tr) { +struct ForcombCache { int nd;npy_intp *d;int *i,*i_tr,tr; }; +static int initforcomb(struct ForcombCache *cache, npy_intp *dims,int nd,int tr) { int k; if (dims==NULL) return 0; if (nd<0) return 0; - forcombcache.nd = nd; - forcombcache.d = dims; - forcombcache.tr = tr; - if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + cache->nd = nd; + cache->d = dims; + cache->tr = tr; + + cache->i = (int *)malloc(sizeof(int)*nd); + if (cache->i==NULL) return 0; + cache->i_tr = (int *)malloc(sizeof(int)*nd); + if (cache->i_tr==NULL) {free(cache->i); return 0;}; + for (k=1;ki[k] = cache->i_tr[nd-k-1] = 0; } - forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1; + cache->i[0] = cache->i_tr[nd-1] = -1; return 1; } -static int *nextforcomb(void) { +static int *nextforcomb(struct ForcombCache *cache) { + if (cache==NULL) return NULL; int j,*i,*i_tr,k; - int nd=forcombcache.nd; - if ((i=forcombcache.i) == NULL) return NULL; - if ((i_tr=forcombcache.i_tr) == NULL) return NULL; - if (forcombcache.d == NULL) return NULL; + int nd=cache->nd; + if ((i=cache->i) == NULL) return NULL; + if ((i_tr=cache->i_tr) == NULL) return NULL; + if (cache->d == NULL) return NULL; i[0]++; - if (i[0]==forcombcache.d[0]) { + if (i[0]==cache->d[0]) { j=1; - while ((jd[j]-1)) j++; if (j==nd) { free(i); free(i_tr); @@ -634,7 +639,7 @@ def errmess(s: str) -> None: i_tr[nd-j-1]++; } else i_tr[nd-1]++; - if (forcombcache.tr) return i_tr; + if (cache->tr) return i_tr; return i; }""" needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index c10d2afdd097..667ef287f92b 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -1184,9 +1184,10 @@ """\ int *_i,capi_i=0; CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); - if (initforcomb(PyArray_DIMS(capi_#varname#_as_array), + struct ForcombCache cache; + if (initforcomb(&cache, PyArray_DIMS(capi_#varname#_as_array), PyArray_NDIM(capi_#varname#_as_array),1)) { - while ((_i = nextforcomb())) + while ((_i = nextforcomb(&cache))) #varname#[capi_i++] = #init#; /* fortran way */ } else { PyObject *exc, *val, *tb; From ecb2f40970fb28b997a3e781dd2733500ebf9a6f Mon Sep 17 00:00:00 2001 From: DWesl <22566757+DWesl@users.noreply.github.com> Date: Sat, 31 May 2025 04:32:42 -0400 Subject: [PATCH 0069/1018] PERF: Use dict instead of list to make NpzFile member existence checks constant time (#29098) Use dict instead of list to convert the passed key to the name used in the archive. --- numpy/lib/_npyio_impl.py | 64 ++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 35 deletions(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index e588b8454b44..f284eeb74834 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -195,16 +195,13 @@ def __init__(self, fid, own_fid=False, allow_pickle=False, # Import is postponed to here since zipfile depends on gzip, an # optional component of the so-called standard library. _zip = zipfile_factory(fid) - self._files = _zip.namelist() - self.files = [] + _files = _zip.namelist() + self.files = [name.removesuffix(".npy") for name in _files] + self._files = dict(zip(self.files, _files)) + self._files.update(zip(_files, _files)) self.allow_pickle = allow_pickle self.max_header_size = max_header_size self.pickle_kwargs = pickle_kwargs - for x in self._files: - if x.endswith('.npy'): - self.files.append(x[:-4]) - else: - self.files.append(x) self.zip = _zip self.f = BagObj(self) if own_fid: @@ -240,37 +237,34 @@ def __len__(self): return len(self.files) def __getitem__(self, key): - # FIXME: This seems like it will copy strings around - # more than is strictly necessary. The zipfile - # will read the string and then - # the format.read_array will copy the string - # to another place in memory. - # It would be better if the zipfile could read - # (or at least uncompress) the data - # directly into the array memory. - member = False - if key in self._files: - member = True - elif key in self.files: - member = True - key += '.npy' - if member: - bytes = self.zip.open(key) - magic = bytes.read(len(format.MAGIC_PREFIX)) - bytes.close() - if magic == format.MAGIC_PREFIX: - bytes = self.zip.open(key) - return format.read_array(bytes, - allow_pickle=self.allow_pickle, - pickle_kwargs=self.pickle_kwargs, - max_header_size=self.max_header_size) - else: - return self.zip.read(key) + try: + key = self._files[key] + except KeyError: + raise KeyError(f"{key} is not a file in the archive") from None else: - raise KeyError(f"{key} is not a file in the archive") + with self.zip.open(key) as bytes: + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.seek(0) + if magic == format.MAGIC_PREFIX: + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + return format.read_array( + bytes, + allow_pickle=self.allow_pickle, + pickle_kwargs=self.pickle_kwargs, + max_header_size=self.max_header_size + ) + else: + return bytes.read(key) def __contains__(self, key): - return (key in self._files or key in self.files) + return (key in self._files) def __repr__(self): # Get filename or default to `object` From 358a13c861d728e11f1efc18e6c04b915c682ccf Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Sat, 31 May 2025 01:48:27 -0700 Subject: [PATCH 0070/1018] BENCH: Increase array sizes for ufunc and sort benchmarks (#29084) --- benchmarks/benchmarks/bench_function_base.py | 2 +- benchmarks/benchmarks/bench_ufunc_strides.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index 9b770aeb60bf..f72d50eb74ce 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -236,7 +236,7 @@ class Sort(Benchmark): param_names = ['kind', 'dtype', 'array_type'] # The size of the benchmarked arrays. - ARRAY_SIZE = 10000 + ARRAY_SIZE = 1000000 def setup(self, kind, dtype, array_type): rnd = np.random.RandomState(507582308) diff --git a/benchmarks/benchmarks/bench_ufunc_strides.py b/benchmarks/benchmarks/bench_ufunc_strides.py index 95df16e2cb5e..0c80b1877b3a 100644 --- a/benchmarks/benchmarks/bench_ufunc_strides.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -10,7 +10,7 @@ class _AbstractBinary(Benchmark): params = [] param_names = ['ufunc', 'stride_in0', 'stride_in1', 'stride_out', 'dtype'] timeout = 10 - arrlen = 10000 + arrlen = 1000000 data_finite = True data_denormal = False data_zeros = False @@ -63,7 +63,7 @@ class _AbstractUnary(Benchmark): params = [] param_names = ['ufunc', 'stride_in', 'stride_out', 'dtype'] timeout = 10 - arrlen = 10000 + arrlen = 1000000 data_finite = True data_denormal = False data_zeros = False From 79ee0b208345696f77697c0fd6671e2763dfe51b Mon Sep 17 00:00:00 2001 From: Yuki Kobayashi Date: Sun, 1 Jun 2025 01:08:58 +0000 Subject: [PATCH 0071/1018] Fix some incorrect reST markups --- doc/changelog/1.21.5-changelog.rst | 2 +- doc/neps/nep-0021-advanced-indexing.rst | 6 +++--- doc/neps/nep-0030-duck-array-protocol.rst | 4 ++-- doc/source/dev/internals.code-explanations.rst | 2 +- doc/source/reference/c-api/types-and-structures.rst | 2 +- doc/source/reference/maskedarray.generic.rst | 4 ++-- doc/source/reference/simd/build-options.rst | 2 +- doc/source/release/1.11.0-notes.rst | 2 +- doc/source/release/1.13.0-notes.rst | 2 +- doc/source/release/1.14.0-notes.rst | 4 ++-- doc/source/release/1.15.0-notes.rst | 6 +++--- doc/source/release/1.18.0-notes.rst | 4 ++-- doc/source/release/1.21.5-notes.rst | 2 +- doc/source/user/c-info.ufunc-tutorial.rst | 12 ++++++------ doc/source/user/how-to-io.rst | 2 +- numpy/lib/_polynomial_impl.py | 4 ++-- numpy/lib/_version.py | 3 +-- 17 files changed, 31 insertions(+), 32 deletions(-) diff --git a/doc/changelog/1.21.5-changelog.rst b/doc/changelog/1.21.5-changelog.rst index acd3599d48ef..04ff638d42a3 100644 --- a/doc/changelog/1.21.5-changelog.rst +++ b/doc/changelog/1.21.5-changelog.rst @@ -22,7 +22,7 @@ A total of 11 pull requests were merged for this release. * `#20462 `__: BUG: Fix float16 einsum fastpaths using wrong tempvar * `#20463 `__: BUG, DIST: Print os error message when the executable not exist * `#20464 `__: BLD: Verify the ability to compile C++ sources before initiating... -* `#20465 `__: BUG: Force ``npymath` ` to respect ``npy_longdouble`` +* `#20465 `__: BUG: Force ``npymath`` to respect ``npy_longdouble`` * `#20466 `__: BUG: Fix failure to create aligned, empty structured dtype * `#20467 `__: ENH: provide a convenience function to replace npy_load_module * `#20495 `__: MAINT: update wheel to version that supports python3.10 diff --git a/doc/neps/nep-0021-advanced-indexing.rst b/doc/neps/nep-0021-advanced-indexing.rst index badd41875af2..11ef238d6179 100644 --- a/doc/neps/nep-0021-advanced-indexing.rst +++ b/doc/neps/nep-0021-advanced-indexing.rst @@ -123,7 +123,7 @@ with shape ``(1,)``, not a 2D sub-matrix with shape ``(1, 1)``. Mixed indexing seems so tricky that it is tempting to say that it never should be used. However, it is not easy to avoid, because NumPy implicitly adds full slices if there are fewer indices than the full dimensionality of the indexed -array. This means that indexing a 2D array like `x[[0, 1]]`` is equivalent to +array. This means that indexing a 2D array like ``x[[0, 1]]`` is equivalent to ``x[[0, 1], :]``. These cases are not surprising, but they constrain the behavior of mixed indexing. @@ -236,7 +236,7 @@ be deduced: For the beginning, this probably means cases where ``arr[ind]`` and ``arr.oindex[ind]`` return different results give deprecation warnings. This includes every use of vectorized indexing with multiple integer arrays. - Due to the transposing behaviour, this means that``arr[0, :, index_arr]`` + Due to the transposing behaviour, this means that ``arr[0, :, index_arr]`` will be deprecated, but ``arr[:, 0, index_arr]`` will not for the time being. 7. To ensure that existing subclasses of `ndarray` that override indexing @@ -285,7 +285,7 @@ Open Questions Copying always "fixes" this possible inconsistency. * The final state to morph plain indexing in is not fixed in this PEP. - It is for example possible that `arr[index]`` will be equivalent to + It is for example possible that ``arr[index]`` will be equivalent to ``arr.oindex`` at some point in the future. Since such a change will take years, it seems unnecessary to make specific decisions at this time. diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst index 7fb8c9734900..4a3d268697a2 100644 --- a/doc/neps/nep-0030-duck-array-protocol.rst +++ b/doc/neps/nep-0030-duck-array-protocol.rst @@ -102,14 +102,14 @@ a complete implementation would look like the following: The implementation above exemplifies the simplest case, but the overall idea is that libraries will implement a ``__duckarray__`` method that returns the original object, and an ``__array__`` method that either creates and returns an -appropriate NumPy array, or raises a``TypeError`` to prevent unintentional use +appropriate NumPy array, or raises a ``TypeError`` to prevent unintentional use as an object in a NumPy array (if ``np.asarray`` is called on an arbitrary object that does not implement ``__array__``, it will create a NumPy array scalar). In case of existing libraries that don't already implement ``__array__`` but would like to use duck array typing, it is advised that they introduce -both ``__array__`` and``__duckarray__`` methods. +both ``__array__`` and ``__duckarray__`` methods. Usage ----- diff --git a/doc/source/dev/internals.code-explanations.rst b/doc/source/dev/internals.code-explanations.rst index b1ee9b114aa8..1bb8f60528c1 100644 --- a/doc/source/dev/internals.code-explanations.rst +++ b/doc/source/dev/internals.code-explanations.rst @@ -401,7 +401,7 @@ Iterators for the output arguments are then processed. Finally, the decision is made about how to execute the looping mechanism to ensure that all elements of the input arrays are combined to produce the output arrays of the correct type. The options for loop -execution are one-loop (for :term`contiguous`, aligned, and correct data +execution are one-loop (for :term:`contiguous`, aligned, and correct data type), strided-loop (for non-contiguous but still aligned and correct data type), and a buffered loop (for misaligned or incorrect data type situations). Depending on which execution method is called for, diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 1790c4f4d04d..3f16b5f4dbc4 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -1618,7 +1618,7 @@ NumPy C-API and C complex When you use the NumPy C-API, you will have access to complex real declarations ``npy_cdouble`` and ``npy_cfloat``, which are declared in terms of the C standard types from ``complex.h``. Unfortunately, ``complex.h`` contains -`#define I ...`` (where the actual definition depends on the compiler), which +``#define I ...`` (where the actual definition depends on the compiler), which means that any downstream user that does ``#include `` could get ``I`` defined, and using something like declaring ``double I;`` in their code will result in an obscure compiler error like diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst index 9c44ebcbc589..3fbe25d5b03c 100644 --- a/doc/source/reference/maskedarray.generic.rst +++ b/doc/source/reference/maskedarray.generic.rst @@ -66,7 +66,7 @@ attributes and methods are described in more details in the .. try_examples:: -The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: + The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: >>> import numpy as np >>> import numpy.ma as ma @@ -521,7 +521,7 @@ Numerical operations -------------------- Numerical operations can be easily performed without worrying about missing -values, dividing by zero, square roots of negative numbers, etc.:: +values, dividing by zero, square roots of negative numbers, etc.: .. try_examples:: diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 524a1532ca57..2b7039136e75 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -234,7 +234,7 @@ The need to align certain CPU features that are assured to be supported by successive generations of the same architecture, some cases: - On ppc64le ``VSX(ISA 2.06)`` and ``VSX2(ISA 2.07)`` both imply one another since the - first generation that supports little-endian mode is Power-8`(ISA 2.07)` + first generation that supports little-endian mode is ``Power-8(ISA 2.07)`` - On AArch64 ``NEON NEON_FP16 NEON_VFPV4 ASIMD`` implies each other since they are part of the hardware baseline. diff --git a/doc/source/release/1.11.0-notes.rst b/doc/source/release/1.11.0-notes.rst index 36cd1d65a266..f6fe84a4b17f 100644 --- a/doc/source/release/1.11.0-notes.rst +++ b/doc/source/release/1.11.0-notes.rst @@ -205,7 +205,7 @@ New Features - ``np.int16``, ``np.uint16``, - ``np.int32``, ``np.uint32``, - ``np.int64``, ``np.uint64``, - - ``np.int_ ``, ``np.intp`` + - ``np.int_``, ``np.intp`` The specification is by precision rather than by C type. Hence, on some platforms ``np.int64`` may be a ``long`` instead of ``long long`` even if diff --git a/doc/source/release/1.13.0-notes.rst b/doc/source/release/1.13.0-notes.rst index 3bfaf1ea5169..400c9553fbd3 100644 --- a/doc/source/release/1.13.0-notes.rst +++ b/doc/source/release/1.13.0-notes.rst @@ -136,7 +136,7 @@ implement ``__*slice__`` on the derived class, as ``__*item__`` will intercept these calls correctly. Any code that did implement these will work exactly as before. Code that -invokes``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will +invokes ``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will now issue a DeprecationWarning - ``.__getitem__(slice(start, end))`` should be used instead. diff --git a/doc/source/release/1.14.0-notes.rst b/doc/source/release/1.14.0-notes.rst index 68040b470caa..055a933291b9 100644 --- a/doc/source/release/1.14.0-notes.rst +++ b/doc/source/release/1.14.0-notes.rst @@ -409,8 +409,8 @@ This new default changes the float output relative to numpy 1.13. The old behavior can be obtained in 1.13 "legacy" printing mode, see compatibility notes above. -``hermitian`` option added to``np.linalg.matrix_rank`` ------------------------------------------------------- +``hermitian`` option added to ``np.linalg.matrix_rank`` +------------------------------------------------------- The new ``hermitian`` option allows choosing between standard SVD based matrix rank calculation and the more efficient eigenvalue based method for symmetric/hermitian matrices. diff --git a/doc/source/release/1.15.0-notes.rst b/doc/source/release/1.15.0-notes.rst index e84386f0fa5d..7aa85d167d29 100644 --- a/doc/source/release/1.15.0-notes.rst +++ b/doc/source/release/1.15.0-notes.rst @@ -213,7 +213,7 @@ C API changes New functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` ----------------------------------------------------------------------------------- Functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` -have been added and should be used in place of the ``npy_get_floatstatus``and +have been added and should be used in place of the ``npy_get_floatstatus`` and ``npy_clear_status`` functions. Optimizing compilers like GCC 8.1 and Clang were rearranging the order of operations when the previous functions were used in the ufunc SIMD functions, resulting in the floatstatus flags being checked @@ -326,8 +326,8 @@ passed explicitly, and are not yet computed automatically. No longer does an IQR of 0 result in ``n_bins=1``, rather the number of bins chosen is related to the data size in this situation. -The edges returned by `histogram`` and ``histogramdd`` now match the data float type ------------------------------------------------------------------------------------- +The edges returned by ``histogram`` and ``histogramdd`` now match the data float type +------------------------------------------------------------------------------------- When passed ``np.float16``, ``np.float32``, or ``np.longdouble`` data, the returned edges are now of the same dtype. Previously, ``histogram`` would only return the same type if explicit bins were given, and ``histogram`` would diff --git a/doc/source/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst index 15e0ad77f5d1..a90dbb7a67d9 100644 --- a/doc/source/release/1.18.0-notes.rst +++ b/doc/source/release/1.18.0-notes.rst @@ -202,9 +202,9 @@ exception will require adaptation, and code that mistakenly called Moved modules in ``numpy.random`` --------------------------------- As part of the API cleanup, the submodules in ``numpy.random`` -``bit_generator``, ``philox``, ``pcg64``, ``sfc64, ``common``, ``generator``, +``bit_generator``, ``philox``, ``pcg64``, ``sfc64``, ``common``, ``generator``, and ``bounded_integers`` were moved to ``_bit_generator``, ``_philox``, -``_pcg64``, ``_sfc64, ``_common``, ``_generator``, and ``_bounded_integers`` +``_pcg64``, ``_sfc64``, ``_common``, ``_generator``, and ``_bounded_integers`` respectively to indicate that they are not part of the public interface. (`gh-14608 `__) diff --git a/doc/source/release/1.21.5-notes.rst b/doc/source/release/1.21.5-notes.rst index c69d26771268..b3e810b51c06 100644 --- a/doc/source/release/1.21.5-notes.rst +++ b/doc/source/release/1.21.5-notes.rst @@ -33,7 +33,7 @@ A total of 11 pull requests were merged for this release. * `#20462 `__: BUG: Fix float16 einsum fastpaths using wrong tempvar * `#20463 `__: BUG, DIST: Print os error message when the executable not exist * `#20464 `__: BLD: Verify the ability to compile C++ sources before initiating... -* `#20465 `__: BUG: Force ``npymath` ` to respect ``npy_longdouble`` +* `#20465 `__: BUG: Force ``npymath`` to respect ``npy_longdouble`` * `#20466 `__: BUG: Fix failure to create aligned, empty structured dtype * `#20467 `__: ENH: provide a convenience function to replace npy_load_module * `#20495 `__: MAINT: update wheel to version that supports python3.10 diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 6b1aca65ed00..76e8af63462f 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -157,7 +157,7 @@ the module. return m; } -To use the ``setup.py file``, place ``setup.py`` and ``spammodule.c`` +To use the ``setup.py`` file, place ``setup.py`` and ``spammodule.c`` in the same folder. Then ``python setup.py build`` will build the module to import, or ``python setup.py install`` will install the module to your site-packages directory. @@ -240,8 +240,8 @@ and then the ``setup.py`` file used to create the module containing the ufunc. The place in the code corresponding to the actual computations for -the ufunc are marked with ``/\* BEGIN main ufunc computation \*/`` and -``/\* END main ufunc computation \*/``. The code in between those lines is +the ufunc are marked with ``/* BEGIN main ufunc computation */`` and +``/* END main ufunc computation */``. The code in between those lines is the primary thing that must be changed to create your own ufunc. .. code-block:: c @@ -339,7 +339,7 @@ the primary thing that must be changed to create your own ufunc. return m; } -This is a ``setup.py file`` for the above code. As before, the module +This is a ``setup.py`` file for the above code. As before, the module can be build via calling ``python setup.py build`` at the command prompt, or installed to site-packages via ``python setup.py install``. The module can also be placed into a local folder e.g. ``npufunc_directory`` below @@ -408,8 +408,8 @@ sections we first give the ``.c`` file and then the corresponding ``setup.py`` file. The places in the code corresponding to the actual computations for -the ufunc are marked with ``/\* BEGIN main ufunc computation \*/`` and -``/\* END main ufunc computation \*/``. The code in between those lines +the ufunc are marked with ``/* BEGIN main ufunc computation */`` and +``/* END main ufunc computation */``. The code in between those lines is the primary thing that must be changed to create your own ufunc. diff --git a/doc/source/user/how-to-io.rst b/doc/source/user/how-to-io.rst index a90fbecfdec4..81055d42b9ac 100644 --- a/doc/source/user/how-to-io.rst +++ b/doc/source/user/how-to-io.rst @@ -206,7 +206,7 @@ Human-readable :func:`numpy.save` and :func:`numpy.savez` create binary files. To **write a human-readable file**, use :func:`numpy.savetxt`. The array can only be 1- or -2-dimensional, and there's no ` savetxtz` for multiple files. +2-dimensional, and there's no ``savetxtz`` for multiple files. Large arrays ------------ diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index a58ca76ec2b0..de4c01ecb95c 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -520,9 +520,9 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): - residuals -- sum of squared residuals of the least squares fit - rank -- the effective rank of the scaled Vandermonde - coefficient matrix + coefficient matrix - singular_values -- singular values of the scaled Vandermonde - coefficient matrix + coefficient matrix - rcond -- value of `rcond`. For more details, see `numpy.linalg.lstsq`. diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index f7a353868fd2..d70a61040a40 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -22,8 +22,7 @@ class NumpyVersion: - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) - Development versions after a1: '1.8.0a1.dev-f1234afa', - '1.8.0b2.dev-f1234afa', - '1.8.1rc1.dev-f1234afa', etc. + '1.8.0b2.dev-f1234afa', '1.8.1rc1.dev-f1234afa', etc. - Development versions (no git hash available): '1.8.0.dev-Unknown' Comparing needs to be done against a valid version string or other From 5e3aa37b1af5207a7aee113cfc5153ace3c67238 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 2 Jun 2025 08:07:35 +0200 Subject: [PATCH 0072/1018] MAINT: fix SPDX license expressions for LAPACK, GCC runtime libs See https://github.com/scipy/scipy/issues/23061 for details. [skip ci] --- tools/wheels/LICENSE_linux.txt | 4 ++-- tools/wheels/LICENSE_osx.txt | 4 ++-- tools/wheels/LICENSE_win32.txt | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt index 9e2d9053b8a7..db488c6cff47 100644 --- a/tools/wheels/LICENSE_linux.txt +++ b/tools/wheels/LICENSE_linux.txt @@ -44,7 +44,7 @@ Name: LAPACK Files: numpy.libs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution +License: BSD-3-Clause-Open-MPI Copyright (c) 1992-2013 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. @@ -99,7 +99,7 @@ Name: GCC runtime library Files: numpy.libs/libgfortran*.so Description: dynamically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception +License: GPL-3.0-or-later WITH GCC-exception-3.1 Copyright (C) 2002-2017 Free Software Foundation, Inc. Libgfortran is free software; you can redistribute it and/or modify diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt index 7ef2e381874e..5cea18441b35 100644 --- a/tools/wheels/LICENSE_osx.txt +++ b/tools/wheels/LICENSE_osx.txt @@ -44,7 +44,7 @@ Name: LAPACK Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution +License: BSD-3-Clause-Open-MPI Copyright (c) 1992-2013 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. @@ -99,7 +99,7 @@ Name: GCC runtime library Files: numpy/.dylibs/libgfortran*, numpy/.dylibs/libgcc* Description: dynamically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception +License: GPL-3.0-or-later WITH GCC-exception-3.1 Copyright (C) 2002-2017 Free Software Foundation, Inc. Libgfortran is free software; you can redistribute it and/or modify diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt index c8277e7710a2..aed96845583b 100644 --- a/tools/wheels/LICENSE_win32.txt +++ b/tools/wheels/LICENSE_win32.txt @@ -44,7 +44,7 @@ Name: LAPACK Files: numpy.libs\libscipy_openblas*.dll Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution +License: BSD-3-Clause-Open-MPI Copyright (c) 1992-2013 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. @@ -99,7 +99,7 @@ Name: GCC runtime library Files: numpy.libs\libscipy_openblas*.dll Description: statically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception +License: GPL-3.0-or-later WITH GCC-exception-3.1 Copyright (C) 2002-2017 Free Software Foundation, Inc. Libgfortran is free software; you can redistribute it and/or modify From 3b465b051616c2ffa299fb0313a9ef609862cf38 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 3 Jun 2025 14:04:23 +0300 Subject: [PATCH 0073/1018] MAINT: cleanup from finalized concatenate deprecation (#29115) --- numpy/_core/src/multiarray/multiarraymodule.c | 27 +++++-------------- 1 file changed, 6 insertions(+), 21 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 022a54fe17da..7724756ba351 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -530,8 +530,7 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis, NPY_NO_EXPORT PyArrayObject * PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, NPY_ORDER order, PyArrayObject *ret, - PyArray_Descr *dtype, NPY_CASTING casting, - npy_bool casting_not_passed) + PyArray_Descr *dtype, NPY_CASTING casting) { int iarrays; npy_intp shape = 0; @@ -647,12 +646,11 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, * @param ret output array to fill * @param dtype Forced output array dtype (cannot be combined with ret) * @param casting Casting mode used - * @param casting_not_passed Deprecation helper */ NPY_NO_EXPORT PyObject * PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret, PyArray_Descr *dtype, - NPY_CASTING casting, npy_bool casting_not_passed) + NPY_CASTING casting) { int iarrays, narrays; PyArrayObject **arrays; @@ -698,7 +696,7 @@ PyArray_ConcatenateInto(PyObject *op, if (axis == NPY_RAVEL_AXIS) { ret = PyArray_ConcatenateFlattenedArrays( narrays, arrays, NPY_CORDER, ret, dtype, - casting, casting_not_passed); + casting); } else { ret = PyArray_ConcatenateArrays( @@ -743,7 +741,7 @@ PyArray_Concatenate(PyObject *op, int axis) casting = NPY_SAME_KIND_CASTING; } return PyArray_ConcatenateInto( - op, axis, NULL, NULL, casting, 0); + op, axis, NULL, NULL, casting); } static int @@ -2489,7 +2487,6 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *out = NULL; PyArray_Descr *dtype = NULL; NPY_CASTING casting = NPY_SAME_KIND_CASTING; - PyObject *casting_obj = NULL; PyObject *res; int axis = 0; @@ -2499,22 +2496,10 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), "|axis", &PyArray_AxisConverter, &axis, "|out", NULL, &out, "$dtype", &PyArray_DescrConverter2, &dtype, - "$casting", NULL, &casting_obj, + "$casting", &PyArray_CastingConverter, &casting, NULL, NULL, NULL) < 0) { return NULL; } - int casting_not_passed = 0; - if (casting_obj == NULL) { - /* - * Casting was not passed in, needed for deprecation only. - * This should be simplified once the deprecation is finished. - */ - casting_not_passed = 1; - } - else if (!PyArray_CastingConverter(casting_obj, &casting)) { - Py_XDECREF(dtype); - return NULL; - } if (out != NULL) { if (out == Py_None) { out = NULL; @@ -2526,7 +2511,7 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), } } res = PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out, dtype, - casting, casting_not_passed); + casting); Py_XDECREF(dtype); return res; } From d52bccb9b1ccc80b5dbd06f1b4bfeaeba901affd Mon Sep 17 00:00:00 2001 From: Danis <96629796+DanisNone@users.noreply.github.com> Date: Tue, 3 Jun 2025 19:11:17 +0500 Subject: [PATCH 0074/1018] TYP: minor ufunc alias fixes in ``__init__.pyi`` (#29120) --- numpy/__init__.pyi | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index df72ce3d877a..41d7411dfdd8 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4784,19 +4784,17 @@ arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] bitwise_count: _UFunc_Nin1_Nout1[L['bitwise_count'], L[11], None] -bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None] bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] -conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] -divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +divide: _UFunc_Nin2_Nout1[L['divide'], L[11], None] divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] @@ -4839,7 +4837,6 @@ matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"] matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]] maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] -mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] @@ -4863,7 +4860,6 @@ square: _UFunc_Nin1_Nout1[L['square'], L[18], None] subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] -true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]] @@ -4878,10 +4874,14 @@ atanh = arctanh atan2 = arctan2 concat = concatenate bitwise_left_shift = left_shift +bitwise_not = invert bitwise_invert = invert bitwise_right_shift = right_shift +conj = conjugate +mod = remainder permute_dims = transpose pow = power +true_divide = divide class errstate: def __init__( From 2306bced4d3234e7cc8be8631b2b72773eabf7f3 Mon Sep 17 00:00:00 2001 From: abhishek-fujitsu Date: Wed, 14 May 2025 15:17:51 +0530 Subject: [PATCH 0075/1018] update windows-2019 to windows-2022 and meson flag[wheel build] Co-authored-by: Charles Harris --- .github/workflows/wheels.yml | 6 +++--- .github/workflows/windows.yml | 4 ++-- .github/workflows/windows_arm64.yml | 2 +- azure-pipelines.yml | 2 +- meson_cpu/x86/meson.build | 2 ++ numpy/_core/tests/test_umath.py | 7 +++++++ 6 files changed, 16 insertions(+), 7 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 3736f28cbd8c..7e034779fd0b 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -89,13 +89,13 @@ jobs: # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile - [macos-13, macosx_x86_64, accelerate] - [macos-14, macosx_arm64, accelerate] # always use accelerate - - [windows-2019, win_amd64, ""] - - [windows-2019, win32, ""] + - [windows-2022, win_amd64, ""] + - [windows-2022, win32, ""] - [windows-11-arm, win_arm64, ""] python: ["cp311", "cp312", "cp313", "cp313t", "cp314", "cp314t", "pp311"] exclude: # Don't build PyPy 32-bit windows - - buildplat: [windows-2019, win32, ""] + - buildplat: [windows-2022, win32, ""] python: "pp311" # Don't build PyPy arm64 windows - buildplat: [windows-11-arm, win_arm64, ""] diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 6c02563150da..e760e37780a7 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -16,7 +16,7 @@ permissions: jobs: python64bit_openblas: name: x86-64, LP64 OpenBLAS - runs-on: windows-2019 + runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' strategy: @@ -92,7 +92,7 @@ jobs: fail-fast: false matrix: include: - - os: windows-2019 + - os: windows-2022 architecture: x86 - os: windows-11-arm architecture: arm64 diff --git a/.github/workflows/windows_arm64.yml b/.github/workflows/windows_arm64.yml index 0a691bff9b21..71fa9dd88d3b 100644 --- a/.github/workflows/windows_arm64.yml +++ b/.github/workflows/windows_arm64.yml @@ -15,7 +15,7 @@ permissions: jobs: windows_arm: - runs-on: windows-2019 + runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 36362f6cacc7..af6e5cf52ac4 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -75,7 +75,7 @@ stages: - job: Windows timeoutInMinutes: 120 pool: - vmImage: 'windows-2019' + vmImage: 'windows-2022' strategy: maxParallel: 3 matrix: diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 8c7a0fb59a57..1276e922ff2a 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -212,6 +212,8 @@ if compiler_id == 'msvc' endif endforeach FMA3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) + # Add floating-point contract flag to fixes transcendental function accuracy on Windows Server 2022 + FMA3.update(args: {'val': '/fp:contract'}) AVX2.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) AVX512_SKX.update(args: {'val': '/arch:AVX512', 'match': clear_arch}) endif diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 4b698ce82bc6..001a7bffbcc8 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1879,8 +1879,15 @@ def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): # FIXME: NAN raises FP invalid exception: # - ceil/float16 on MSVC:32-bit # - spacing/float16 on almost all platforms + # - spacing/float32,float64 on Windows MSVC with VS2022 if ufunc in (np.spacing, np.ceil) and dtype == 'e': return + # Skip spacing tests with NaN on Windows MSVC (all dtypes) + import platform + if (ufunc == np.spacing and + platform.system() == 'Windows' and + any(np.isnan(d) if isinstance(d, (int, float)) else False for d in data)): + pytest.skip("spacing with NaN generates warnings on Windows/VS2022") array = np.array(data, dtype=dtype) with assert_no_warnings(): ufunc(array) From 9328c192239e8f288b54ecae98210440f11a7b60 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Jun 2025 17:13:25 +0000 Subject: [PATCH 0076/1018] MAINT: Bump github/codeql-action from 3.28.18 to 3.28.19 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.18 to 3.28.19. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/ff0a06e83cb2de871e5a09832bc6a81e7276941f...fca7ace96b7d713c7035871441bd52efbe39e27e) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.28.19 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index fb0dd766a1d8..e0318652d2af 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/autobuild@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index f4e06677f804..9e21251f87c8 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v2.1.27 + uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v2.1.27 with: sarif_file: results.sarif From 69948f351fcdf7a873fdb360eeaa4d84673ea90f Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 3 Jun 2025 19:28:34 +0200 Subject: [PATCH 0077/1018] DOC: remove very outdated info on ATLAS (#29119) * DOC: remove very outdated info on ATLAS ATLAS hasn't been developed for years, there is no reason to ever use it instead of OpenBLAS, BLIS, or MKL. So remove mentions of it. The troubleshooting instructions haven't been relevant in quite a while either. Addresses a comment on gh-29108 [skip cirrus] [skip github] [skip azp] * accept review suggestion [skip ci] Co-authored-by: Matti Picus --------- Co-authored-by: Matti Picus --- INSTALL.rst | 12 ++-------- .../user/troubleshooting-importerror.rst | 22 ------------------- 2 files changed, 2 insertions(+), 32 deletions(-) diff --git a/INSTALL.rst b/INSTALL.rst index 017e4de8c9d4..6e9d2cd242f5 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -135,12 +135,8 @@ For best performance, a development package providing BLAS and CBLAS should be installed. Some of the options available are: - ``libblas-dev``: reference BLAS (not very optimized) -- ``libatlas-base-dev``: generic tuned ATLAS, it is recommended to tune it to - the available hardware, see /usr/share/doc/libatlas3-base/README.Debian for - instructions -- ``libopenblas-base``: fast and runtime detected so no tuning required but a - very recent version is needed (>=0.2.15 is recommended). Older versions of - OpenBLAS suffered from correctness issues on some CPUs. +- ``libopenblas-base``: (recommended) OpenBLAS is performant, and used + in the NumPy wheels on PyPI except where Apple's Accelerate is tuned better for Apple hardware The package linked to when numpy is loaded can be chosen after installation via the alternatives mechanism:: @@ -148,10 +144,6 @@ the alternatives mechanism:: update-alternatives --config libblas.so.3 update-alternatives --config liblapack.so.3 -Or by preloading a specific BLAS library with:: - - LD_PRELOAD=/usr/lib/atlas-base/atlas/libblas.so.3 python ... - Build issues ============ diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index 6be8831d9c2a..da456dd17e36 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -83,28 +83,6 @@ on how to properly configure Eclipse/PyDev to use Anaconda Python with specific conda environments. -Raspberry Pi ------------- - -There are sometimes issues reported on Raspberry Pi setups when installing -using ``pip3 install`` (or ``pip`` install). These will typically mention:: - - libf77blas.so.3: cannot open shared object file: No such file or directory - - -The solution will be to either:: - - sudo apt-get install libatlas-base-dev - -to install the missing libraries expected by the self-compiled NumPy -(ATLAS is a possible provider of linear algebra). - -*Alternatively* use the NumPy provided by Raspbian. In which case run:: - - pip3 uninstall numpy # remove previously installed version - apt install python3-numpy - - Debug build on Windows ---------------------- From 8944b2b800f5bfcf48a6b0134a2588f3a5a54d66 Mon Sep 17 00:00:00 2001 From: Marc Redemske Date: Tue, 3 Jun 2025 23:21:09 +0200 Subject: [PATCH 0078/1018] DOC: fix typo in Numpy's module structure --- doc/source/reference/module_structure.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 01a5bcff7fbc..83e697ac04fe 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -5,7 +5,7 @@ NumPy's module structure ************************ NumPy has a large number of submodules. Most regular usage of NumPy requires -only the main namespace and a smaller set of submodules. The rest either either +only the main namespace and a smaller set of submodules. The rest either has special-purpose or niche namespaces. Main namespaces From 6822a7b10f21c895ca0037a9b9060bb944fb7f96 Mon Sep 17 00:00:00 2001 From: Marc Redemske Date: Wed, 4 Jun 2025 07:16:33 +0200 Subject: [PATCH 0079/1018] update according to review --- doc/source/reference/module_structure.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 83e697ac04fe..98e3dda54e7b 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -5,7 +5,7 @@ NumPy's module structure ************************ NumPy has a large number of submodules. Most regular usage of NumPy requires -only the main namespace and a smaller set of submodules. The rest either has +only the main namespace and a smaller set of submodules. The rest either have special-purpose or niche namespaces. Main namespaces From 6ab549012852157727097c52328bad56cfd85d46 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Jun 2025 18:00:32 +0000 Subject: [PATCH 0080/1018] MAINT: Bump conda-incubator/setup-miniconda from 3.1.1 to 3.2.0 Bumps [conda-incubator/setup-miniconda](https://github.com/conda-incubator/setup-miniconda) from 3.1.1 to 3.2.0. - [Release notes](https://github.com/conda-incubator/setup-miniconda/releases) - [Changelog](https://github.com/conda-incubator/setup-miniconda/blob/main/CHANGELOG.md) - [Commits](https://github.com/conda-incubator/setup-miniconda/compare/505e6394dae86d6a5c7fbb6e3fb8938e3e863830...835234971496cad1653abb28a638a281cf32541f) --- updated-dependencies: - dependency-name: conda-incubator/setup-miniconda dependency-version: 3.2.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/macos.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 1388a756d216..418dc7d52fc1 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -55,7 +55,7 @@ jobs: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - name: Setup Miniforge - uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3.1.1 + uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 with: python-version: ${{ matrix.python-version }} channels: conda-forge diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 7e034779fd0b..df0c779241f3 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -280,7 +280,7 @@ jobs: name: sdist path: ./dist/* - - uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3.1.1 + - uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 with: # for installation of anaconda-client, required for upload to # anaconda.org From a88d0149ae91b301696f507f350499207d11f442 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 5 Jun 2025 11:00:59 +0200 Subject: [PATCH 0081/1018] MAINT: Bump ``scipy-doctest`` to 1.8 (#29085) * MAINT: check-docs: require scipy-doctest >= 1.8.0 See for the discussion https://discuss.scientific-python.org/t/scipy-doctest-select-only-doctests-or-both-doctests-and-unit-tests/1950 [docs only] * CI: bump scipy-doctest version on CI [skip azp] [skip actions] [skip cirrus] * MAINT: tweak doctests to not fail --- .github/workflows/linux.yml | 2 +- .spin/cmds.py | 3 +++ numpy/_core/_add_newdocs.py | 2 +- numpy/lib/introspect.py | 2 +- requirements/doc_requirements.txt | 2 +- 5 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 742ca5c34144..668c1191d055 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -271,7 +271,7 @@ jobs: # - name: Check docstests # shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' # run: | - # pip install scipy-doctest==1.6.0 hypothesis==6.104.1 matplotlib scipy pytz pandas + # pip install scipy-doctest>=1.8.0 hypothesis==6.104.1 matplotlib scipy pytz pandas # spin check-docs -v # spin check-tutorials -v diff --git a/.spin/cmds.py b/.spin/cmds.py index e5ae29d4a6a2..66885de630e0 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -196,6 +196,8 @@ def check_docs(*, parent_callback, pytest_args, **kwargs): import scipy_doctest # noqa: F401 except ModuleNotFoundError as e: raise ModuleNotFoundError("scipy-doctest not installed") from e + if scipy_doctest.__version__ < '1.8.0': + raise ModuleNotFoundError("please update scipy_doctests to >= 1.8.0") if (not pytest_args): pytest_args = ('--pyargs', 'numpy') @@ -203,6 +205,7 @@ def check_docs(*, parent_callback, pytest_args, **kwargs): # turn doctesting on: doctest_args = ( '--doctest-modules', + '--doctest-only-doctests=true', '--doctest-collect=api' ) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 8f5de4b7bd89..597d5c6deaf3 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -5944,7 +5944,7 @@ >>> import numpy as np >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> print(dt.fields) - {'name': (dtype('|S16'), 0), 'grades': (dtype(('float64',(2,))), 16)} + {'name': (dtype('>> import json - >>> print(json.dumps(dict, indent=2)) + >>> print(json.dumps(dict, indent=2)) # may vary (architecture) { "absolute": { "dd": { diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 330f0f7ac8b9..23a0e6deb60f 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -19,7 +19,7 @@ toml # for doctests, also needs pytz which is in test_requirements -scipy-doctest==1.6.0 +scipy-doctest>=1.8.0 # interactive documentation utilities # see https://github.com/jupyterlite/pyodide-kernel#compatibility From 7fbc71c354c21c2264d79e3657829eb2608f4dbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Jun 2025 17:05:30 +0000 Subject: [PATCH 0082/1018] MAINT: Bump mamba-org/setup-micromamba from 2.0.4 to 2.0.5 Bumps [mamba-org/setup-micromamba](https://github.com/mamba-org/setup-micromamba) from 2.0.4 to 2.0.5. - [Release notes](https://github.com/mamba-org/setup-micromamba/releases) - [Commits](https://github.com/mamba-org/setup-micromamba/compare/0dea6379afdaffa5d528b3d1dabc45da37f443fc...b09ef9b599704322748535812ca03efb2625677b) --- updated-dependencies: - dependency-name: mamba-org/setup-micromamba dependency-version: 2.0.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/wheels.yml | 2 +- .github/workflows/windows_arm64.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index df0c779241f3..f74be5f4a455 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -187,7 +187,7 @@ jobs: path: ./wheelhouse/*.whl - name: install micromamba - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc + uses: mamba-org/setup-micromamba@b09ef9b599704322748535812ca03efb2625677b if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment with: # for installation of anaconda-client, required for upload to diff --git a/.github/workflows/windows_arm64.yml b/.github/workflows/windows_arm64.yml index 71fa9dd88d3b..3eaf02eb062c 100644 --- a/.github/workflows/windows_arm64.yml +++ b/.github/workflows/windows_arm64.yml @@ -174,7 +174,7 @@ jobs: path: ./*.whl - name: Setup Mamba - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc + uses: mamba-org/setup-micromamba@b09ef9b599704322748535812ca03efb2625677b with: # for installation of anaconda-client, required for upload to # anaconda.org From e81abf7f0ab8c0eba1541e35d048469731c4c6a2 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 7 Jun 2025 10:01:04 -0600 Subject: [PATCH 0083/1018] MAINT: Update main after 2.3.0 release. - Forwardport 2.3.0-changelog.rst - Forwardport 2.3.0-notes.rst - Forwardport .mailmap [skip azp] [skip cirrus] [skip actions] --- .mailmap | 39 +- doc/changelog/2.3.0-changelog.rst | 704 +++++++++++++++++++++++++++++ doc/source/release/2.3.0-notes.rst | 517 ++++++++++++++++++++- 3 files changed, 1254 insertions(+), 6 deletions(-) create mode 100644 doc/changelog/2.3.0-changelog.rst diff --git a/.mailmap b/.mailmap index f33dfddb6492..e3e3bb56ecdf 100644 --- a/.mailmap +++ b/.mailmap @@ -11,6 +11,7 @@ !Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> !DWesl <22566757+DWesl@users.noreply.github.com> !Dreamge +!EarlMilktea <66886825+EarlMilktea@users.noreply.github.com> !Endolith !GalaxySnail !Illviljan <14371165+Illviljan@users.noreply.github.com> @@ -21,6 +22,7 @@ !Scian <65375075+hoony6134@users.noreply.github.com> !Searchingdays !amagicmuffin <2014wcheng@gmail.com> +!amotzop !bersbersbers <12128514+bersbersbers@users.noreply.github.com> !code-review-doctor !cook-1229 <70235336+cook-1229@users.noreply.github.com> @@ -34,6 +36,7 @@ !hutauf !jbCodeHub !juztamau5 +!karl3wm !legoffant <58195095+legoffant@users.noreply.github.com> !liang3zy22 <35164941+liang3zy22@users.noreply.github.com> !luzpaz @@ -51,6 +54,7 @@ !pmvz !pojaghi <36278217+pojaghi@users.noreply.github.com> !pratiklp00 +!samir539 !sfolje0 !spacescientist !stefan6419846 @@ -59,12 +63,15 @@ !tautaus !undermyumbrella1 !vahidmech +!wenlong2 !xoviat <49173759+xoviat@users.noreply.github.com> !xoviat <49173759+xoviat@users.noreply.github.com> !yan-wyb !yetanothercheer Aaron Baecker Adrin Jalali +Abhishek Kumar +Abhishek Kumar <142383124+abhishek-iitmadras@users.noreply.github.com> Abraham Medina Arun Kota Arun Kota Arun Kota @@ -140,6 +147,8 @@ Anton Prosekin Anže Starič Arfy Slowy Arnaud Ma +Arnaud Tremblay +Arnaud Tremblay <59627629+Msa360@users.noreply.github.com> Aron Ahmadia Arun Kota Arun Kota @@ -190,6 +199,8 @@ Carl Kleffner Carl Leake Carlos Henrique Hermanny Moreira da Silva Carlos Henrique Hermanny Moreira da Silva <121122527+carlosilva10260@users.noreply.github.com> +Carlos Martin +Carlos Martin Cédric Hannotier Charles Stern <62192187+cisaacstern@users.noreply.github.com> Chiara Marmo @@ -200,6 +211,8 @@ Chris Burns Chris Fu (傅立业) <17433201@qq.com> Chris Holland <41524756+ChrisAHolland@users.noreply.github.com> Chris Kerr +Chris Navarro +Chris Navarro <24905907+lvllvl@users.noreply.github.com> Chris Vavaliaris Christian Clauss Christopher Dahlin @@ -270,6 +283,7 @@ Eric Fode Eric Fode Eric Quintero Eric Xie <161030123+EngineerEricXie@users.noreply.github.com> Ernest N. Mamikonyan +Ernst Peng Eskild Eriksen Eskild Eriksen <42120229+iameskild@users.noreply.github.com> Eskild Eriksen @@ -300,8 +314,11 @@ Gregory R. Lee Gregory R. Lee Guo Ci guoci Guo Shuai +Gyeongjae Choi Habiba Hye Habiba Hye <145866308+HabibiHye@users.noreply.github.com> +Halle Loveday +Halle Loveday Hameer Abbasi Hannah Aizenman Han Genuit @@ -311,6 +328,10 @@ Helder Oliveira Hemil Desai Himanshu Hiroyuki V. Yamazaki +Daniel Hrisca +Daniel Hrisca +François de Coatpont +François de Coatpont <93073405+Chevali2004@users.noreply.github.com> Hugo van Kemenade Iantra Solari I-Shen Leong @@ -363,6 +384,7 @@ Jessé Pires Jessi J Zhao <35235453+jessijzhao@users.noreply.github.com> Jhong-Ken Chen (陳仲肯) Jhong-Ken Chen (陳仲肯) <37182101+kennychenfs@users.noreply.github.com> +Jiuding Tan (谭九鼎) <109224573@qq.com> Johann Faouzi Johann Rohwer Johann Rohwer jmrohwer @@ -447,10 +469,13 @@ Luke Zoltan Kelley Madhulika Jain Chambers <53166646+madhulikajc@users.noreply.github.com> Magdalena Proszewska Magdalena Proszewska <38814059+mproszewska@users.noreply.github.com> +Makima C. Yang Malik Idrees Hasan Khan <77000356+MalikIdreesHasanKhan@users.noreply.github.com>C Manoj Kumar Marcel Loose Marcin Podhajski <36967358+m-podhajski@users.noreply.github.com> +Marco Edward Gorelli +Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Margret Pax Margret Pax <13646646+paxcodes@users.noreply.github.com> Mark DePristo @@ -508,6 +533,7 @@ Michael Schnaitter Michael Seifert Michel Fruchart +Mike O'Brien Mike Toews Miki Watanabe (渡邉 美希) Miles Cranmer @@ -516,9 +542,12 @@ Milica Dančuk love-bees <33499899+love-bees@users.noreply.g Mircea Akos Bruma Mircea Akos Bruma Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> +Mohammed Abdul Rahman +Mohammed Abdul Rahman <130785777+that-ar-guy@users.noreply.github.com> Mohaned Qunaibit Muhammad Kasim Muhammed Muhsin +Mugundan Selvanayagam Mukulika Pahari Mukulika Pahari <60316606+Mukulikaa@users.noreply.github.com> Munira Alduraibi @@ -571,6 +600,7 @@ Peter J Cock Peter Kämpf Peyton Murray Phil Elson +Filipe Laíns Pierre GM Pierre GM pierregm Piotr Gaiński @@ -591,6 +621,8 @@ Rehas Sachdeva Richard Howe <45905457+rmhowe425@users.noreply.github.com> Ritta Narita Riya Sharma +Rob Timpe +Rob Timpe Robert Kern Robert LU Robert T. McGibbon @@ -660,6 +692,7 @@ Steve Stagg Steven J Kern Stuart Archibald Stuart Archibald +SUMIT SRIMANI <2301109104@ptuniv.edu.in SuryaChand P Sylvain Ferriol Takanori Hirano @@ -696,6 +729,8 @@ Vinith Kishore <85550536+vinith2@users.noreply.github Vrinda Narayan Vrinda Narayan Vrinda Narayan <48102157+vrindaaa@users.noreply.github.com> +Wang Yang (杨旺) +Wang Yang (杨旺) <1113177880@qq.com> Wansoo Kim Warrick Ball Warrick Ball @@ -711,11 +746,11 @@ Xiangyi Wang Yamada Fuyuka Yang Hau Yang Hau -Yang Wang Yash Pethe Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra Yashasvi Misra <54177363+yashasvimisra2798@users.noreply.github.com> +Yichi Zhang Yogesh Raisinghani <46864533+raisinghanii@users.noreply.github.com> Younes Sandi Younes Sandi <65843206+Unessam@users.noreply.github.com> @@ -723,6 +758,8 @@ Yu Feng Yuji Kanagawa Yuki K Yury Kirienko +Yuvraj Pradhan +Yuvraj Pradhan Zac Hatfield-Dodds Zach Brugh <111941670+zachbrugh@users.noreply.github.com> Zé Vinícius diff --git a/doc/changelog/2.3.0-changelog.rst b/doc/changelog/2.3.0-changelog.rst new file mode 100644 index 000000000000..7ca672ba8dbf --- /dev/null +++ b/doc/changelog/2.3.0-changelog.rst @@ -0,0 +1,704 @@ + +Contributors +============ + +A total of 134 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* !EarlMilktea + +* !amotzop + +* !fengluoqiuwu +* !h-vetinari +* !karl3wm + +* !partev +* !samir539 + +* !wenlong2 + +* Aarni Koskela + +* Abdu Zoghbi + +* Abhishek Kumar +* Adam J. Stewart +* Aditi Juneja +* Adrin Jalali +* Agriya Khetarpal +* Alicia Boya García + +* Andrej Zhilenkov +* Andrew Nelson +* Angus Gibson + +* Antonio Rech Santos + +* Ari Gato + +* Arnaud Tremblay + +* Arvid Bessen + +* Baskar Gopinath + +* Carlos Martin + +* Charles Harris +* Chris Navarro + +* Chris Sidebottom +* Christian Lorentzen +* Christine P. Chai + +* Christopher Sidebottom +* Clément Robert +* Colin Gilgenbach + +* Craig Peters + +* Cédric Hannotier +* Daniel Hrisca +* Derek Homeier +* Diego Baldassar + +* Dimitri Papadopoulos Orfanos +* Eoghan O'Connell + +* Eric Larson +* Ernst Peng + +* Evgeni Burovski +* Filipe Laíns +* François Rozet + +* François de Coatpont + +* GUAN MING +* Giovanni Del Monte +* Guan Ming(Wesley) Chiu + +* Guido Imperiale + +* Gyeongjae Choi + +* Halle Loveday + +* Hannah Wheeler + +* Hao Chen + +* Harmen Stoppels + +* Hin-Tak Leung + +* Ian DesJardin + +* Ihar Hrachyshka + +* Ilhan Polat +* Inessa Pawson +* J. Steven Dodge + +* Jake VanderPlas +* Jiachen An + +* Jiuding Tan (谭九鼎) +* Joe Rickerby + +* John Kirkham +* John Stilley + +* Jonathan Albrecht + +* Joren Hammudoglu +* Kai Germaschewski + +* Krishna Bindumadhavan + +* Lucas Colley +* Luka Krmpotić + +* Lysandros Nikolaou +* Maanas Arora +* Makima C. Yang + +* Marco Barbosa + +* Marco Edward Gorelli + +* Mark Harfouche +* Marten van Kerkwijk +* Mateusz Sokół +* Matt Haberland +* Matthew Brett +* Matthew Goldsberry + +* Matthew Sterrett +* Matthias Diener +* Matthieu Darbois +* Matti Picus +* Melissa Weber Mendonça +* Michael Siebert +* Mike O'Brien + +* Mohammed Abdul Rahman + +* Mugundan Selvanayagam + +* Musharaf Aijaz Baba + +* Musharraffaijaz + +* Nathan Goldbaum +* Nicholas Christensen + +* Nitish Satyavolu + +* Omid Rajaei +* PTUsumit + +* Peter Hawkins +* Peyton Murray +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Rob Timpe + +* Robert Kern +* Rohit Goswami +* Ross Barnowski +* Roy Smart +* Saransh Chopra +* Saraswathy Kalaiselvan + +* Sayed Adel +* Sebastian Berg +* Shantanu Jain + +* Shashwat Pandey + +* Shi Entong + +* Simon Altrogge +* Stan Ulbrych +* Thomas A Caswell +* Théotime Grohens + +* Tyler Reddy +* WANG Xuerui + +* WEN Hao + +* Wang Yang (杨旺) + +* Warren Weckesser +* Warrick Ball +* William Andrea +* Yakov Danishevsky + +* Yichi Zhang + +* Yuvraj Pradhan + +* dependabot[bot] +* hfloveday12 + + +Pull requests merged +==================== + +A total of 556 pull requests were merged for this release. + +* `#22718 `__: DOC: Add docs on using GitHub Codespaces for NumPy development +* `#25675 `__: ENH: add matvec and vecmat gufuncs +* `#25934 `__: ENH: Convert tanh from C universal intrinsics to C++ using Highway +* `#25991 `__: ENH: Optimize polyutils as_series +* `#26018 `__: ENH add hash based unique +* `#26745 `__: ENH, DOC: Add support for interactive examples for NumPy with... +* `#26958 `__: BUG: index overlap copy +* `#27288 `__: BUG: Scalar array comparison should return np.bool +* `#27300 `__: CI: pycodestyle → ruff +* `#27309 `__: MNT: Enforce ruff/Pyflakes rules (F) +* `#27324 `__: DOC: Removing module name from by-topic docs +* `#27343 `__: ENH: Add support for flat indexing on flat iterator +* `#27404 `__: DOC: document type promotion with Python types +* `#27522 `__: ENH: Cleanup npy_find_array_wrap +* `#27523 `__: ENH: Improve performance of np.count_nonzero for float arrays +* `#27648 `__: MAINT: Fix the code style to our C-Style-Guide +* `#27738 `__: DEP: testing: disable deprecated use of keywords x/y +* `#27784 `__: BUG: ``sinc``\ : fix underflow for float16 +* `#27789 `__: ENH: Implement np.strings.slice as a gufunc +* `#27819 `__: CI: add windows free-threaded CI +* `#27823 `__: BEG, MAINT: Begin NumPy 2.3.0 development. +* `#27824 `__: BUG: Fix mismatch in definition and declaration for a couple... +* `#27826 `__: CI: update circleci to python3.11.10, limit parallel builds. +* `#27827 `__: CI: skip ninja installation in linux_qemu workflows +* `#27830 `__: ENH: speedup evaluation of numpy.polynomial.legendre.legval. +* `#27839 `__: DOC: Correct version-added for mean arg for nanvar and nanstd +* `#27841 `__: BUG: Never negate strides in reductions (for now) +* `#27847 `__: MAINT: Bump pypa/cibuildwheel from 2.21.3 to 2.22.0 +* `#27848 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.1 to 2.0.2 +* `#27850 `__: DOC: Correct versionadded for vecmat and matvec. +* `#27854 `__: MAINT: Use mask_store instead of store for compiler bug workaround +* `#27856 `__: SIMD: add lsx optimization for loongarch, and add Qemu tests +* `#27858 `__: DOC: Fix typo +* `#27860 `__: MAINT: Add helper for static or heap allocated scratch space +* `#27862 `__: MAINT: Drop Python 3.10 support. +* `#27864 `__: ENH: stack custom multiarray import exception with the original... +* `#27868 `__: BUG: fix importing numpy in Python's optimized mode +* `#27869 `__: TYP: Fix ``np.interp`` signature for scalar types +* `#27875 `__: MAINT: Ensure correct handling for very large unicode strings +* `#27877 `__: ENH: Refactor ``__qualname__`` across API +* `#27878 `__: DOC: Fix double import in docs +* `#27879 `__: DEV: Add venv files to .gitignore +* `#27883 `__: MAINT,ENH: Reorganize buffered iteration setup +* `#27884 `__: ENH: Remove unnecessary list collection +* `#27886 `__: MAINT: Move uint aligned check to actual transfer function setup +* `#27887 `__: MAINT: A few other small nditer fixes +* `#27896 `__: PERF: improve multithreaded ufunc scaling +* `#27897 `__: MAINT: Bump github/codeql-action from 3.27.5 to 3.27.6 +* `#27898 `__: MAINT: Remove ``25675.new_feature.rst`` snippet. +* `#27899 `__: TST: add timeouts for github actions tests and wheel builds. +* `#27901 `__: MAINT: simplify power fast path logic +* `#27910 `__: MAINT: Make qualname tests more specific and fix code where needed +* `#27914 `__: DOC: Remove 27896-snippet. +* `#27915 `__: MAINT: Bump actions/cache from 4.1.2 to 4.2.0 +* `#27917 `__: CI: Use hashes in specifying some actions. +* `#27920 `__: DOC: Fix invalid URL in the index.rst file. +* `#27921 `__: MAINT: Bump actions/checkout from 4.1.1 to 4.2.2 +* `#27922 `__: MAINT: Move user pointers out of axisdata and simplify iternext +* `#27923 `__: ENH: Add cython wrappers for NpyString API +* `#27927 `__: DOC: Use internal/intersphinx links for neps. +* `#27930 `__: MAINT: Fix cirrus MacOs wheel builds [wheel build] +* `#27931 `__: CI: audit with zizmor +* `#27933 `__: BUG: fix building numpy on musl s390x +* `#27936 `__: MAINT: Update main after 2.2.0 release. +* `#27940 `__: BUG: Fix potential inconsistent behaviour for high-demnsional... +* `#27943 `__: TEST: cleanups +* `#27947 `__: BUG:fix compile error libatomic link test to meson.build +* `#27955 `__: BUG: fix use-after-free error in npy_hashtable.cpp +* `#27956 `__: BLD: add missing include to fix build with freethreading +* `#27962 `__: MAINT: Bump github/codeql-action from 3.27.6 to 3.27.7 +* `#27963 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.2 to 2.0.3 +* `#27967 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27973 `__: MAINT: Apply assorted ruff/flake8-pie rules (PIE) +* `#27974 `__: MAINT: Apply ruff/flake8-implicit-str-concat rules (ISC) +* `#27975 `__: MAINT: Apply ruff/flake8-comprehensions rules (C4) +* `#27976 `__: MAINT: Apply assorted ruff/flake8-pyi rules (PYI) +* `#27978 `__: MAINT: Apply assorted ruff/flake8-simplify rules (SIM) +* `#27981 `__: DOC: Document abi3 compat +* `#27992 `__: BUG: Fix segfault in stringdtype lexsort +* `#27996 `__: MAINT: Bump github/codeql-action from 3.27.7 to 3.27.9 +* `#27997 `__: MAINT: Remove unnecessary (and not safe in free-threaded) 1-D... +* `#27998 `__: API,MAINT: Make ``NpyIter_GetTransferFlags`` public and avoid... +* `#27999 `__: DOC, MAINT: Fix typos found by codespell +* `#28001 `__: DOC: Fix documentation for np.dtype.kind to include 'T' for StringDType +* `#28003 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28004 `__: DOC: fix several doctests in dtype method docstrings +* `#28005 `__: BUG: Cython API was missing NPY_UINTP. +* `#28008 `__: BUG: Fix handling of matrix class in np.unique. +* `#28009 `__: TST: lib: Test average with object dtype and no weights. +* `#28013 `__: DOC: Fixed typos in development_advanced_debugging.rst +* `#28015 `__: MAINT: run ruff from the repository root +* `#28020 `__: CI: pin scipy-doctest to 1.5.1 +* `#28022 `__: MAINT: Add all submodules to ruff exclusion list. +* `#28023 `__: DOC: update to scipy-doctest 1.6.0 and fix tests +* `#28029 `__: MAINT: Bump actions/upload-artifact from 4.4.3 to 4.5.0 +* `#28032 `__: BUG,MAINT: Fix size bug in new alloc helper and use it in one... +* `#28033 `__: MAINT: Use userpointers to avoid NPY_MAXARGS in iternext() +* `#28035 `__: MAINT: Move ``lib.format`` and ``ctypeslib`` to submodules/private... +* `#28036 `__: Replace Twitter with X +* `#28039 `__: TYP: allow ``None`` in operand sequence of nditer +* `#28043 `__: BUG: Ensure einsum uses chunking (now that nditer doesn't) +* `#28051 `__: MAINT: Update main after 2.2.1 release. +* `#28053 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28056 `__: BUG: Fix f2py directives and ``--lower`` casing +* `#28058 `__: MAINT: Update ``spin lint`` command +* `#28060 `__: CI: fix check that GIL remains disabled for free-threaded wheels +* `#28065 `__: TYP: fix unnecessarily broad ``integer`` binop return types +* `#28069 `__: MAINT: update NEP 29 +* `#28073 `__: TYP: use mypy_primer to surface type checking regressions +* `#28074 `__: DOC: clarify np.gradient varargs requirement for axis parameter +* `#28075 `__: MAINT: Replace usage of fixed strides with inner strides in einsum +* `#28080 `__: ENH: Allow an arbitrary number of operands in nditer +* `#28081 `__: DOC: Add release snippets for iteration changes +* `#28083 `__: MAINT: Update LICENSE Copyright to 2025 +* `#28088 `__: BUG: update check for highway compiler support +* `#28089 `__: MAINT: bump ``mypy`` to ``1.14.1`` +* `#28090 `__: DOC:Fixed docstring with example use of np.select +* `#28091 `__: MAINT: Refactor stringdtype casts.c to use cpp templates +* `#28092 `__: MAINT: LoongArch: switch away from the __loongarch64 preprocessor... +* `#28094 `__: DOC: Fix documentation example for numpy.ma.masked +* `#28100 `__: DOC: Move linalg.outer from Decompositions to Matrix and vector... +* `#28101 `__: DOC: Fix sphinx markup in source/reference/random/extending.rst +* `#28102 `__: MAINT: update oldest supported GCC version from 8.4 to 9.3 +* `#28103 `__: MAINT: random: Call np.import_array() in _examples/cython/extending_distribution... +* `#28105 `__: ENH: support no-copy pickling for any array that can be transposed... +* `#28108 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28109 `__: TYP: Fix the incorrect ``bool`` return type of ``issubdtype`` +* `#28110 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28116 `__: MAINT: random: Explicitly cast RAND_INT_MAX to double to avoid... +* `#28118 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28121 `__: MAINT: Correct NumPy 2.3 C-API versioning and version information +* `#28123 `__: BUG: move reduction initialization to ufunc initialization +* `#28127 `__: DOC: Improve slice docstrings +* `#28128 `__: BUG: Don't use C99 construct in import_array +* `#28129 `__: DEP: Deprecate ``numpy.typing.mypy_plugin`` +* `#28130 `__: CI: Fix mypy_primer comment workflow +* `#28133 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28134 `__: DEP: Deprecate ``numpy.typing.mypy_plugin``\ : The sequel +* `#28141 `__: DOC: Add instructions to build NumPy on WoA +* `#28142 `__: ENH: inline UTF-8 byte counter and make it branchless +* `#28144 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28148 `__: MAINT: Replace usage of outdated fixed strides with inner strides... +* `#28149 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28154 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28161 `__: DOC: Clarify ``np.loadtxt`` encoding argument default value in... +* `#28163 `__: MAINT: Avoid a redundant copy on ``a[...] = b`` +* `#28167 `__: DOC: fix formatting typo in basics.copies.rst +* `#28168 `__: TYP: Fix overlapping overloads issue in "2 in, 1 out" ufuncs +* `#28169 `__: TYP: preserve shape-type in ``ndarray.astype()`` +* `#28170 `__: TYP: Fix missing and spurious top-level exports +* `#28172 `__: BUG: Include Python-including headers first +* `#28179 `__: DOC: Remove duplicate wishlist tab in NEPs. +* `#28180 `__: DOC: Update links in HOWTO_RELEASE.rst +* `#28181 `__: CI: replace quansight-labs/setup-python with astral-sh/setup-uv +* `#28183 `__: MAINT: testing: specify python executable to use in extbuild +* `#28186 `__: MAINT: Update main after 2.2.2 release. +* `#28189 `__: MAINT, DOC: Add sphinx extension to allow svg images in PDF docs... +* `#28202 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28203 `__: BUG: fix data race in ``np.repeat`` +* `#28206 `__: BUG: Remove unnecessary copying and casting from out array in... +* `#28210 `__: corrected the numpy logo visibility issues on darkmode with the... +* `#28211 `__: MAINT: Hide decorator from pytest traceback +* `#28214 `__: ENH: add pkg_config entrypoint +* `#28219 `__: DOC: Add versionadded directive for axis argument in trim_zeros... +* `#28221 `__: BUG: allclose does not warn for invalid value encountered in... +* `#28222 `__: MAINT: Update highway to latest +* `#28223 `__: MAINT: Add [[maybe_unused] to silence some warnings +* `#28226 `__: DOC: Clarify ``__array__`` protocol arguments +* `#28228 `__: BUG: handle case when StringDType na_object is nan in float to... +* `#28229 `__: DOC: Fix a typo in doc/source/dev/development_workflow.rst +* `#28230 `__: DOC: FIx a link in Roadmap +* `#28231 `__: DOC: Fix external links in the navbar of neps webpage +* `#28232 `__: BUG: Fix float128 FPE handling on ARM64 with Clang compiler +* `#28234 `__: BUG: Add cpp atomic support +* `#28235 `__: MAINT: Compile fix for clang-cl on WoA +* `#28241 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28242 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28246 `__: BLD: better fix for clang / ARM compiles +* `#28250 `__: dtype.__repr__: prefer __name__ for user-defined types. +* `#28252 `__: test_casting_unittests.py: remove tuple +* `#28254 `__: MAINT: expire deprecations +* `#28258 `__: DOC: Change the scientific page link in NumPy/MATLAB +* `#28259 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28262 `__: TYP: expire deprecations +* `#28263 `__: ENH: Add ARM64 (aarch64) CI testing +* `#28264 `__: DOC: Remove an invalid link in f2py-examples.rst +* `#28270 `__: TYP: Fixed missing typing information of set_printoptions +* `#28273 `__: CI: update sanitizer CI to use python compiled with ASAN and... +* `#28276 `__: BUG: fix incorrect bytes to StringDType coercion +* `#28279 `__: TYP: Fix scalar constructors +* `#28280 `__: TYP: stub ``numpy.matlib`` +* `#28281 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28288 `__: DOC: Correct a typo in Intel License URL +* `#28290 `__: BUG: fix race initializing legacy dtype casts +* `#28291 `__: BUG: Prevent class-bound attr mutation in ``lib._iotools.NameValidator`` +* `#28294 `__: MAINT: Enable building tanh on vector length agnostic architectures +* `#28295 `__: TYP: stub ``numpy._globals`` +* `#28296 `__: TYP: stub ``numpy._expired_attrs_2_0`` +* `#28297 `__: TYP: stub ``numpy._configtool`` and ``numpy._distributor_init`` +* `#28298 `__: TYP: stub ``numpy.lib._iotools`` +* `#28299 `__: TYP: stub ``lib.user_array`` and ``lib._user_array_impl`` +* `#28300 `__: TYP: stub ``lib.introspect`` +* `#28301 `__: TYP: stub ``lib.recfunctions`` +* `#28302 `__: TYP: fix and improve ``numpy._core.arrayprint`` +* `#28303 `__: TYP: stub ``lib._datasource`` and fix ``lib._npyio_impl`` +* `#28304 `__: DOC: Remove reference to python2 +* `#28307 `__: MAINT: bump ``mypy`` to ``1.15.0`` +* `#28312 `__: DOC: remove references to Python 2 +* `#28319 `__: BUG: numpy.loadtxt reads only 50000 lines when skip_rows >= max_rows +* `#28320 `__: MAINT: Update actions/cache and use hash. +* `#28323 `__: DOC: Correct a typo in Exception TooHardError +* `#28327 `__: TYP: fix positional- and keyword-only params in ``astype``\ ,... +* `#28328 `__: CI: Update FreeBSD base image in ``cirrus_arm.yml`` +* `#28330 `__: ENH: Ensure ``lib._format_impl.read_array`` handles file reading... +* `#28332 `__: BUG: avoid segfault in np._core.multiarray.scalar +* `#28335 `__: MAINT: Update main after 2.2.3 release. +* `#28336 `__: DOC: Update link to Anaconda Eclipse/PyDev documentation +* `#28338 `__: MAINT: use OpenBLAS 0.3.29 +* `#28339 `__: MAIN: Update c,c++ line length to 88 +* `#28343 `__: BUG: Fix ``linalg.norm`` to handle empty matrices correctly. +* `#28350 `__: DOC: fix typo +* `#28353 `__: DOC: Make numpy.fft a clickable link to module +* `#28355 `__: BUG: safer bincount casting +* `#28358 `__: MAINT: No need to check for check for FPEs in casts to/from object +* `#28359 `__: DOC: Make the first paragraph more concise in internals.rst +* `#28361 `__: BUG: Make np.nonzero threading safe +* `#28370 `__: DOC: Revise bullet point formatting in ``arrays.promotions.rst`` +* `#28382 `__: DOC: fix C API docs for ``PyArray_Size`` +* `#28383 `__: DOC: Added links to CTypes and CFFI in Numba +* `#28386 `__: MAINT: Extend the default ruff exclude files +* `#28387 `__: DOC: fix expected exception from StringDType without string coercion +* `#28390 `__: MAINT: speed up slow test under TSAN +* `#28391 `__: CI: use free-threaded build for ASAN tests +* `#28392 `__: CI: build Linux aarch64 wheels on GitHub Actions +* `#28393 `__: BUG: Fix building on s390x with clang +* `#28396 `__: BUG: Fix crackfortran parsing error when a division occurs within... +* `#28404 `__: MAINT: remove legacy ucsnarrow module +* `#28406 `__: BUG: Include Python.h first +* `#28407 `__: BUG: sanity check ``__array_interface__`` number of dimensions +* `#28408 `__: DOC: Update link to Nix in Cross Compilation +* `#28411 `__: CI: use QEMU 9.2.2 for Linux Qemu tests +* `#28413 `__: DOC: add scimath in np.lib submodules listing +* `#28414 `__: DOC: Add missing punctuation to the random sampling page +* `#28415 `__: BLD: update cibuildwheel and build PyPy 3.11 wheels [wheel build] +* `#28421 `__: BUG: skip legacy dtype multithreaded test on 32 bit runners +* `#28426 `__: BUG: Limit the maximal number of bins for automatic histogram... +* `#28427 `__: DOC: remove mention of Poly.nickname +* `#28431 `__: MAINT: PY_VERSION_HEX simplify +* `#28436 `__: BUILD: move to manylinux_2_28 wheel builds +* `#28437 `__: DOC: fix documentation for Flag checking functions and macros +* `#28442 `__: ENH: Check for floating point exceptions in dot +* `#28444 `__: DOC: fix URL redirects +* `#28447 `__: DOC: repositioned bitwise_count under bit-wise operations +* `#28451 `__: DOC: Add -avx512_spr to disable AVX512 in build options +* `#28452 `__: TYP: stub ``random._pickle`` +* `#28453 `__: BUG: Fix return type of NpyIter_GetIterNext in Cython declarations +* `#28455 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.3 to 2.0.4 +* `#28456 `__: MAINT: Bump actions/cache from 4.2.0 to 4.2.2 +* `#28458 `__: MAINT: Bump actions/upload-artifact from 4.5.0 to 4.6.1 +* `#28459 `__: MAINT: Bump github/codeql-action from 3.27.9 to 3.28.11 +* `#28460 `__: MAINT: Bump astral-sh/setup-uv from 5.2.1 to 5.3.1 +* `#28461 `__: MAINT: Update dependabot.yml file +* `#28462 `__: TYP: Add specializations to meshgrid stubs +* `#28464 `__: MAINT: Bump actions/setup-python from 5.3.0 to 5.4.0 +* `#28465 `__: MAINT: Bump ossf/scorecard-action from 2.4.0 to 2.4.1 +* `#28466 `__: MAINT: Bump actions/checkout from 4.1.1 to 4.2.2 +* `#28467 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.1.0 to 3.1.1 +* `#28468 `__: MAINT: Bump actions/download-artifact from 4.1.8 to 4.1.9 +* `#28473 `__: DOC: add typing badge to README +* `#28475 `__: MAINT: skip slow_pypy tests on pypy +* `#28477 `__: MAINT: fix typo in normal distribution functions docstrings +* `#28480 `__: ENH: Convert logical from C universal intrinsics to C++ using... +* `#28483 `__: DOC: only change tp_name on CPython +* `#28485 `__: MAINT: Bump actions/setup-python from 5.3.0 to 5.4.0 +* `#28488 `__: fix aarch64 CI run +* `#28489 `__: MAINT: Enable building loop_logical on vector length agnostic... +* `#28491 `__: TYP: fix typing errors in ``_core.shape_base`` +* `#28492 `__: TYP: fix typing errors in ``_core.strings`` +* `#28494 `__: TYP: fix typing errors in ``_core.records`` +* `#28495 `__: DOC: let docstring mention that unique_values is now unsorted +* `#28497 `__: TYP: don't use literals in shape-types +* `#28498 `__: TYP: accept non-integer shapes in array constructor without a... +* `#28499 `__: TYP: remove unneseccary cast +* `#28500 `__: TYP: stub ``numpy.random._bounded_integers`` +* `#28502 `__: TYP: stub ``numpy.random._common`` +* `#28503 `__: API: Always allow ``sorted=False`` and make a note about it +* `#28505 `__: TYP: stub ``numpy._core.umath`` +* `#28506 `__: TYP: fix typing errors in ``numpy.lib._arrayterator_impl`` +* `#28507 `__: MAINT: remove ``ma.timer_comparison`` +* `#28508 `__: TYP: fix signatures of ``ndarray.put`` and ``ndarray.view`` +* `#28509 `__: TYP: annotate the missing ``ufunc.resolve_dtypes`` method +* `#28511 `__: TYP: fix stubtest errors in ``numpy._core`` +* `#28513 `__: TYP: stub ``numpy._core.overrides`` +* `#28514 `__: TYP: stub ``numpy._utils`` +* `#28515 `__: TYP: stub ``numpy._core._dtype[_ctypes]`` +* `#28517 `__: TYP: stub the remaining ``numpy._core.\*`` modules +* `#28518 `__: TYP: stub the missing submodules of ``numpy.linalg`` +* `#28519 `__: TYP: stub ``numpy._pyinstaller`` +* `#28520 `__: TYP: stub ``numpy.fft.helper`` (deprecated) +* `#28522 `__: TYP: stub ``numpy.f2py`` +* `#28523 `__: TYP: annotate the missing deprecated ``row_stack`` function +* `#28524 `__: CI, TST: Bump to cibuildwheel 2.23 (Pyodide 0.27.0) for WASM... +* `#28525 `__: TYP: fix stubtest errors in ``numpy.dtype`` and ``numpy.dtypes.\*`` +* `#28526 `__: TYP: fix stubtest errors in ``timedelta64`` and ``object_`` +* `#28527 `__: TYP: fix stubtest errors in ``numpy.lib._function_base_impl`` +* `#28528 `__: TYP: fix stubtest errors in ``numpy.lib._arraysetops_impl`` +* `#28529 `__: TYP: fix stubtest errors in ``numpy.lib._index_tricks_impl`` +* `#28530 `__: TYP: fix stubtest errors in ``numpy.lib._twodim_base_impl`` +* `#28531 `__: ENH: Add Cygwin extensions to list to copy to CWD in f2py meson... +* `#28532 `__: DOC: minor editorial change +* `#28535 `__: TYP: fix stubtest errors in ``numpy._core`` +* `#28536 `__: TYP: fix stubtest errors in ``numpy._globals`` +* `#28537 `__: TYP: fix stubtest errors in ``numpy.mat[rix]lib`` +* `#28538 `__: TYP: fix stubtest errors in ``numpy.random`` +* `#28539 `__: TYP: fix stubtest errors in ``numpy.testing`` +* `#28540 `__: TYP: fix typing errors in ``numpy.ndarray`` +* `#28541 `__: TYP: fix stubtest error in ``numpy.ma`` +* `#28546 `__: MAINT: Update main after NumPy 2.2.4 release. +* `#28547 `__: MAINT: Bump pypa/cibuildwheel from 2.23.0 to 2.23.1 +* `#28555 `__: MAINT: Bump actions/download-artifact from 4.1.9 to 4.2.0 +* `#28556 `__: NEP 54: Change status to Accepted +* `#28560 `__: MAINT: Bump actions/download-artifact from 4.2.0 to 4.2.1 +* `#28561 `__: MAINT: Bump github/codeql-action from 3.28.11 to 3.28.12 +* `#28562 `__: MAINT: Bump actions/upload-artifact from 4.6.1 to 4.6.2 +* `#28563 `__: MAINT: Bump actions/cache from 4.2.2 to 4.2.3 +* `#28568 `__: MAINT: Bump astral-sh/setup-uv from 5.3.1 to 5.4.0 +* `#28569 `__: Fixing various spelling errors +* `#28571 `__: BLD: use ``manylinux_2_28:2025.03.23-1`` [wheel build] +* `#28576 `__: API,ENH: Allow forcing an array result in ufuncs +* `#28577 `__: BUG: avoid deadlocks with C++ shared mutex in dispatch cache +* `#28581 `__: MAINT: Bump github/codeql-action from 3.28.12 to 3.28.13 +* `#28586 `__: MAINT: Bump pypa/cibuildwheel from 2.23.1 to 2.23.2 +* `#28587 `__: MAINT: Bump actions/setup-python from 5.4.0 to 5.5.0 +* `#28591 `__: TYP: Type masked array shape, dtype, __int__, and __float__ +* `#28593 `__: TYP: Type ``numpy.ma.min`` +* `#28600 `__: BUG: Set writeable flag for writeable dlpacks. +* `#28601 `__: MAINT: Bump scientific-python/upload-nightly-action from 0.6.1... +* `#28607 `__: CI: fix cirrus config [wheel build] +* `#28611 `__: MAINT: Bump astral-sh/setup-uv from 5.4.0 to 5.4.1 +* `#28612 `__: TYP: Type ``ma.max`` and ``ma.ptp`` +* `#28615 `__: ENH: Upgrade Array API version to 2024.12 +* `#28616 `__: TYP: Type ``ma.MaskedArray.min`` +* `#28617 `__: MAINT: Bump actions/dependency-review-action from 4.5.0 to 4.6.0 +* `#28618 `__: MAINT, CI: Update Ubuntu to 22.04 in azure-pipelines +* `#28619 `__: ENH: Use openmp on x86-simd-sort to speed up np.sort and np.argsort +* `#28621 `__: DOC: Fix typo in ``numpy/typing/__init__.py`` +* `#28623 `__: TYP: Type ``ma.MaskedArray.max`` and ``ma.MaskedArray.ptp`` +* `#28624 `__: BUG: fix ``np.vectorize`` for object dtype +* `#28626 `__: DOC: update array API standard version in compatibility page +* `#28627 `__: MAINT: replace string.format() with f-strings +* `#28635 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28637 `__: TYP: Fix overload for ``ma.MaskedArray.{min,max,ptp}`` and ``ma.{min,max,ptp}`` ... +* `#28638 `__: TYP: Type ``MaskedArray.{argmin, argmax}`` and ``np.ma.{argmin,``... +* `#28643 `__: BUG: fix deepcopying StringDType arrays +* `#28644 `__: TYP: fix ``ndarray.tolist()`` and ``.item()`` for unknown dtype +* `#28645 `__: DOC: fixes classes decorated with set_module not showing its... +* `#28647 `__: DOC: Fix typos found by codespell +* `#28649 `__: ENH: Improve np.linalg.det performance +* `#28653 `__: CI: Replace QEMU armhf with native (32-bit compatibility mode) +* `#28657 `__: TYP: simplified type-aliases in ``numpy._typing`` +* `#28660 `__: TYP: Accept objects that ``write()`` to ``str`` in ``savetxt`` +* `#28662 `__: MAINT: Remove distutils CPU dispatcher compatibility code +* `#28664 `__: TYP: Type ``MaskedArray.sort`` +* `#28666 `__: MAINT: Bump github/codeql-action from 3.28.13 to 3.28.14 +* `#28667 `__: TYP: replace ``_ScalarType`` with ``_SCT`` +* `#28668 `__: TYP: replace ``_ArrayType`` with ``_ArrayT`` +* `#28669 `__: TYP: default to ``dtype[Any]`` +* `#28671 `__: SIMD: Fix Highway QSort symbol linking error on aarch32/ASIMD +* `#28672 `__: MAINT: Bump github/codeql-action from 3.28.14 to 3.28.15 +* `#28674 `__: TYP: add missing ``"b1"`` literals for ``dtype[bool]`` +* `#28675 `__: TYP: fix and improve ``numpy.lib._type_check_impl`` +* `#28676 `__: TYP: fix mypy test failures +* `#28677 `__: TYP: Type ``MaskedArray.partition`` and ``MaskedArray.argpartition`` +* `#28678 `__: DEP: Deprecate ``.T`` property for non-2dim arrays and scalars +* `#28680 `__: TYP: Type ``MaskedArray.take`` and ``np.ma.take`` +* `#28684 `__: TYP: replace ``_DType`` with ``_DTypeT`` +* `#28688 `__: TYP: rename ``_ShapeType`` TypeVar to ``_ShapeT`` +* `#28689 `__: TYP: Type ``MaskedArray.__{ge,gt,le,lt}__`` +* `#28690 `__: TYP: replace ``_SCT`` with ``_ScalarT`` +* `#28693 `__: BLD: fix meson_version warning +* `#28695 `__: DOC: linalg.matrix_transpose: add alias note +* `#28699 `__: TYP: Fix false rejection of ``NDArray[object_].__abs__()`` +* `#28702 `__: TYP: Fix inconsistent ``NDArray[float64].__[r]truediv__`` return... +* `#28703 `__: MAINT: Improve float16 and float32 printing +* `#28710 `__: ENH: Improve performance for np.result_type +* `#28712 `__: MAINT: ``%i`` → ``%d`` +* `#28715 `__: TYP: Type ``np.ma.{is_masked,ndim,size,ids,iscontiguous}`` +* `#28717 `__: TYP: fix string-like ``ndarray`` rich comparison operators +* `#28719 `__: MAINT: switching from ``%i`` to ``fstrings`` +* `#28720 `__: TYP: drop py310 support +* `#28724 `__: STY: Apply assorted ruff rules (RUF) +* `#28725 `__: STY: Enforce ruff/pycodestyle warnings (W) +* `#28726 `__: STY: Apply assorted ruff/refurb rules (FURB) +* `#28728 `__: STY: Apply assorted ruff/pyupgrade rules (UP) +* `#28731 `__: BUG: Prevent nanmax/nanmin from copying memmap arrays +* `#28733 `__: TYP: remove ``_typing._UnknownType`` and ``_ArrayLikeUnknown`` +* `#28735 `__: TYP: Type ``MaskedArray.count`` and ``np.ma.count`` +* `#28738 `__: TYP: fix incorrect ``random.Generator.integers`` return type +* `#28739 `__: MNT: get rid of references to Python 3.10 +* `#28740 `__: MAINT: Bump astral-sh/setup-uv from 5.4.1 to 5.4.2 +* `#28741 `__: BUG: Re-enable overriding functions in the ``np.strings`` module. +* `#28742 `__: TYP: Type ``MaskedArray.filled`` and ``np.ma.filled`` +* `#28743 `__: MNT: Enforce ruff/pygrep-hooks rules (PGH) +* `#28744 `__: STY: Apply more ruff rules (RUF) +* `#28745 `__: TYP: Type ``MaskedArray.put``\ , ``np.ma.put``\ , ``np.ma.putmask`` +* `#28746 `__: TYP: ``numpy.ma`` squiggly line cleanup +* `#28747 `__: TYP: some ``[arg]partition`` fixes +* `#28748 `__: ENH: Support Python 3.14 +* `#28750 `__: TYP: fix ``count_nonzero`` signature +* `#28751 `__: MNT: discard Python 2 leftover +* `#28752 `__: MNT: Apply ruff/Pylint rule PLW0129 (assertions that never fail) +* `#28754 `__: MNT: Enforce ruff/Pylint Error rules (PLE) +* `#28755 `__: MNT: Apply assorted ruff/Pylint Refactor rules (PLR) +* `#28756 `__: MNT: Apply assorted ruff/Pylint Warning rules (PLW) +* `#28757 `__: BUG: Fix AVX512_SPR dispatching for SVML half-precision operations +* `#28760 `__: STY: Apply ruff/pyupgrade rule UP032 +* `#28763 `__: STY: Use f-string instead of ``format`` call +* `#28764 `__: MNT: Enforce ruff rules: Flynt (FLY) and flake8-pie (PIE) +* `#28765 `__: MNT: Enforce ruff/flake8-bugbear rules (B) +* `#28766 `__: TYP: Type ``MaskedArray.compressed`` and ``np.ma.compressed`` +* `#28768 `__: MAINT: getting rid of old ``%`` and ``.format(...)`` strings... +* `#28769 `__: ENH: Improve Floating Point Cast Performance on ARM +* `#28770 `__: MNT: Enforce ruff/pyupgrade rules (UP) +* `#28771 `__: ENH: Include offset in error message when fallocate() fails +* `#28775 `__: STY: Partially apply ruff/pycodestyle rules (E) +* `#28779 `__: MAINT: Update main after Numpy 2.2.5 release +* `#28789 `__: BUG: Re-enable GCC function-specific optimization attributes +* `#28793 `__: TYP: Type ``np.ma.allclose`` and ``np.ma.allequal`` +* `#28798 `__: TST: skip test if spawning threads triggers a RuntimeError +* `#28803 `__: MAINT: Bump github/codeql-action from 3.28.15 to 3.28.16 +* `#28804 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28806 `__: BUG: Fix `` __array__(None)`` to preserve dtype +* `#28807 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28808 `__: CI: Make clang_TSAN CI job use cpython_sanity docker image +* `#28809 `__: TYP: write ``dtype[Any]`` as ``dtype`` +* `#28810 `__: TYP: replace ``_Self`` type parameters with ``typing.Self`` +* `#28811 `__: TYP: remove unnecessary scalar-type ``Any`` type-args +* `#28816 `__: MAINT: Bump actions/setup-python from 5.5.0 to 5.6.0 +* `#28817 `__: MAINT: Bump astral-sh/setup-uv from 5.4.2 to 6.0.0 +* `#28818 `__: MAINT: Bump actions/download-artifact from 4.2.1 to 4.3.0 +* `#28819 `__: TYP: simplify redundant unions of builtin scalar types +* `#28820 `__: TYP: ``None`` at the end of a union +* `#28821 `__: BUG: Use unrotated companion matrix in polynomial.polyroots. +* `#28831 `__: TYP: Fix type annotations for ``np.ma.nomask`` and ``np.ma.MaskType`` +* `#28832 `__: TYP: Type ``np.ma.getmask`` +* `#28833 `__: TYP: Type ``np.ma.is_mask`` +* `#28836 `__: ENH: Provide Windows 11 ARM64 wheels (#22530) +* `#28841 `__: BUG: Fix Clang warning in loops_half.dispatch.c.src +* `#28845 `__: TYP: Type ``MaskedArray.nonzero`` +* `#28847 `__: TYP: Use _Array1D alias in ``numpy.ma.core.pyi`` +* `#28848 `__: TYP: Type ``MaskedArray.ravel`` +* `#28849 `__: TYP: Type ``MaskedArray.repeat``\ , improve overloads for ``NDArray.repeat``\... +* `#28850 `__: TYP: Type ``MaskedArray.swapaxes`` +* `#28854 `__: MAINT: Bump pypa/cibuildwheel from 2.23.2 to 2.23.3 +* `#28855 `__: TYP: add missing ``mod`` params to ``__[r]pow__`` +* `#28856 `__: TYP: generic ``StringDType`` +* `#28857 `__: TYP: implicit ``linalg`` private submodule re-exports +* `#28858 `__: TYP: fix the ``set_module`` signature +* `#28859 `__: DOC: Replace http:// with https:// +* `#28860 `__: BLD: update vendored Meson: v1.6.1 and iOS support +* `#28862 `__: BUG: fix stringdtype singleton thread safety +* `#28863 `__: TYP: Improve consistency of (masked) array typing aliases +* `#28867 `__: TYP: Type ``MaskedArray.{__setmask__,mask,harden_mask,soften_mask,hardmask,unsha``... +* `#28868 `__: TYP: Type ``MaskedArray.{imag, real, baseclass, mT}`` +* `#28869 `__: MAINT: Bump astral-sh/setup-uv from 6.0.0 to 6.0.1 +* `#28870 `__: MNT: retire old script for SVN repositories +* `#28871 `__: MNT: retire script superseded by ruff rule W605 +* `#28872 `__: DOC: consistent and updated LICENSE files for wheels +* `#28874 `__: DOC: ``numpy.i`` will not be included as part of SWIG +* `#28876 `__: MNT: discard unused function using os.system() +* `#28877 `__: DOC: update content of cross compilation build docs +* `#28878 `__: STY: Enforce more ruff rules +* `#28879 `__: STY: Apply assorted ruff/refurb rules (FURB) +* `#28880 `__: TYP: Type ``MaskedArray.all`` and ``MaskedArray.any`` +* `#28882 `__: MAINT: address warning in SWIG tests +* `#28883 `__: MAINT: from_dlpack thread safety fixes +* `#28884 `__: DEP: deprecate ``numpy.typing.NBitBase`` +* `#28887 `__: MAINT: Bump github/codeql-action from 3.28.16 to 3.28.17 +* `#28888 `__: DOC: math mode x to \times in docstring for numpy.linalg.multi_dot +* `#28892 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28893 `__: TYP: remove non-existent extended-precision scalar types +* `#28898 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28904 `__: BLD: update vendored Meson to include iOS fix +* `#28905 `__: TYP: Test ``MaskedArray.transpose`` and ``MaskedArray.T``\ ,... +* `#28906 `__: TYP: np.argmin and np.argmax overload changes +* `#28908 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28912 `__: TYP: add ``float64`` overloads to ``{lin,log,geom}space`` +* `#28918 `__: DOC: Fixes absent line numbers on link to classes decorated with... +* `#28923 `__: BUG: Use string conversion defined on dtype for .str +* `#28927 `__: MAINT: Remove outdated ``MaskedArray.__div__`` and ``MaskedArray.__idiv__`` +* `#28928 `__: MNT: add support for 3.14.0b1 +* `#28929 `__: MAINT: remove py2 ``__div__`` methods from ``poly1d`` and ``ABCPolyBase`` +* `#28930 `__: MAINT: remove py2 ``__div__`` remnants from the tests +* `#28931 `__: MAINT: remove py2 ``__div__`` methods from ``lib.user_array.container`` +* `#28932 `__: MAINT: remove references to 256-bits extended precision types +* `#28933 `__: MAINT: Use consistent naming for ``numpy/typing/tests/data/fail/ma.pyi`` +* `#28934 `__: TYP, TST: improved type-testing +* `#28935 `__: MAINT: Enable ruff E251 +* `#28936 `__: TST: Prevent import error when tests are not included in the... +* `#28937 `__: CI: fix TSAN CI by using a different docker image +* `#28938 `__: MNT: clean up free-threaded CI configuration +* `#28939 `__: MAINT: Bump actions/dependency-review-action from 4.6.0 to 4.7.0 +* `#28940 `__: TYP: optional type parameters for ``ndarray`` and ``flatiter`` +* `#28941 `__: DOC: Fix titles in ``development_ghcodespaces.rst`` +* `#28945 `__: MAINT: Enable linting with ruff E501 +* `#28952 `__: MAINT: Bump actions/dependency-review-action from 4.7.0 to 4.7.1 +* `#28954 `__: MAINT: Enable linting with ruff E501 for numpy._core +* `#28956 `__: DOC: Remove references to Python 2/3 +* `#28958 `__: TYP: reject complex scalar types in ``ndarray.__ifloordiv__`` +* `#28959 `__: TYP: remove redundant ``ndarray`` inplace operator overloads +* `#28960 `__: TYP: fix mypy & pyright errors in ``np.matrix`` +* `#28961 `__: DEP: finalize removal of ``numpy.compat`` +* `#28962 `__: TYP: type-testing without the mypy plugin +* `#28963 `__: MAINT: Update ruff to 0.11.9 in linting requirements +* `#28969 `__: MNT: Enforce ruff/isort rules (I) +* `#28971 `__: MAINT: Enable linting with ruff E501 +* `#28972 `__: MNT: Get rif of ``# pylint: `` pragma controls +* `#28974 `__: MNT: Get rid of ``version: $Id`` CVS tags +* `#28975 `__: MNT: import numpy as np +* `#28976 `__: MNT: Get rid of Pyflakes / flake8 +* `#28977 `__: MNT: Enforce ruff/flake8-implicit-str-concat rules (ISC) +* `#28978 `__: MNT: Enforce ruff/pandas-vet rules (PD) +* `#28981 `__: STY: reformat the ``_typing`` imports without trailing commas +* `#28982 `__: TYP: Gradual shape type defaults +* `#28984 `__: MNT: Use isinstance() instead of comparing type() +* `#28986 `__: TYP: Type ``MaskedArray.__{iadd,isub,imul,itruediv,ifloordiv,ipow}__`` +* `#28987 `__: MNT: Align ruff pin between ``requirements/linter_requirements.txt``... +* `#28988 `__: TYP: add missing ``ndarray.__{add,mul}__`` ``character`` type... +* `#28989 `__: MAINT: Bump github/codeql-action from 3.28.17 to 3.28.18 +* `#28990 `__: Revert "DEP: Deprecate ``.T`` property for non-2dim arrays and... +* `#28993 `__: MAINT: update NPY_FEATURE_VERSION after dropping python 3.10 +* `#28994 `__: TYP: allow inplace division of ``NDArray[timedelta64]`` by floats +* `#28995 `__: TYP: remove ``from __future__ import annotations`` +* `#28998 `__: MAINT: Update main after 2.2.6 release. +* `#29002 `__: MAINT: Update download-wheels for multiple pages +* `#29006 `__: ENH: Disable the alloc cache under address and memory sanitizers +* `#29008 `__: MNT: fix CI issues on main +* `#29018 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#29033 `__: BUG: Fix workflow error +* `#29042 `__: MNT: constant string arrays instead of pointers in C +* `#29043 `__: BUG: Avoid compile errors in f2py modules +* `#29044 `__: BUG: Fix f2py derived types in modules +* `#29046 `__: BUG: Fix cache use regression +* `#29047 `__: REL: Prepare for the NumPy 2.3.0rc1 release [wheel build] +* `#29070 `__: TYP: Various typing fixes. +* `#29072 `__: MAINT: use pypy 3.11 nightly which has a fix for ctypeslib +* `#29073 `__: BLD: use sonoma image on Cirrus for wheel build +* `#29074 `__: BUG: add bounds-checking to in-place string multiply +* `#29082 `__: BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 [wheel... +* `#29089 `__: MNT: Avoid use of deprecated _PyDict_GetItemStringWithError in... +* `#29099 `__: BUG: f2py: thread-safe forcomb (#29091) +* `#29100 `__: TYP: fix NDArray[integer] inplace operator mypy issue +* `#29101 `__: PERF: Make NpzFile member existence constant time +* `#29116 `__: MAINT: Update to vs2022 in NumPy 2.3.x [wheel build] +* `#29118 `__: MAINT: fix SPDX license expressions for LAPACK, GCC runtime libs +* `#29132 `__: MAINT: Fix for segfaults with GCC 15 + diff --git a/doc/source/release/2.3.0-notes.rst b/doc/source/release/2.3.0-notes.rst index 74f11a0b4537..faad9ffcc8eb 100644 --- a/doc/source/release/2.3.0-notes.rst +++ b/doc/source/release/2.3.0-notes.rst @@ -4,16 +4,523 @@ NumPy 2.3.0 Release Notes ========================== +The NumPy 2.3.0 release continues the work to improve free threaded Python +support and annotations together with the usual set of bug fixes. It is unusual +in the number of expired deprecations, code modernizations, and style cleanups. +The latter may not be visible to users, but is important for code maintenance +over the long term. Note that we have also upgraded from manylinux2014 to +manylinux_2_28. + +Users running on a Mac having an M4 cpu might see various warnings about +invalid values and such. The warnings are a known problem with Accelerate. +They are annoying, but otherwise harmless. Apple promises to fix them. + +This release supports Python versions 3.11-3.13, Python 3.14 will be supported +when it is released. + Highlights ========== -*We'll choose highlights for this release near the end of the release cycle.* +* Interactive examples in the NumPy documentation. +* Building NumPy with OpenMP Parallelization. +* Preliminary support for Windows on ARM. +* Improved support for free threaded Python. +* Improved annotations. + + +New functions +============= + +New function ``numpy.strings.slice`` +------------------------------------ +The new function ``numpy.strings.slice`` was added, which implements fast +native slicing of string arrays. It supports the full slicing API including +negative slice offsets and steps. + +(`gh-27789 `__) + + +Deprecations +============ + +* The ``numpy.typing.mypy_plugin`` has been deprecated in favor of platform-agnostic + static type inference. Please remove ``numpy.typing.mypy_plugin`` from the ``plugins`` + section of your mypy configuration. If this change results in new errors being + reported, kindly open an issue. + + (`gh-28129 `__) + +* The ``numpy.typing.NBitBase`` type has been deprecated and will be removed in + a future version. + + This type was previously intended to be used as a generic upper bound for + type-parameters, for example: + + .. code-block:: python + + import numpy as np + import numpy.typing as npt + + def f[NT: npt.NBitBase](x: np.complexfloating[NT]) -> np.floating[NT]: ... + + But in NumPy 2.2.0, ``float64`` and ``complex128`` were changed to concrete + subtypes, causing static type-checkers to reject ``x: np.float64 = + f(np.complex128(42j))``. + + So instead, the better approach is to use ``typing.overload``: + + .. code-block:: python + + import numpy as np + from typing import overload + + @overload + def f(x: np.complex64) -> np.float32: ... + @overload + def f(x: np.complex128) -> np.float64: ... + @overload + def f(x: np.clongdouble) -> np.longdouble: ... + + (`gh-28884 `__) + + +Expired deprecations +==================== + +* Remove deprecated macros like ``NPY_OWNDATA`` from Cython interfaces in favor + of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) + + (`gh-28254 `__) + +* Remove ``numpy/npy_1_7_deprecated_api.h`` and C macros like ``NPY_OWNDATA`` + in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) + + (`gh-28254 `__) + +* Remove alias ``generate_divbyzero_error`` to + ``npy_set_floatstatus_divbyzero`` and ``generate_overflow_error`` to + ``npy_set_floatstatus_overflow`` (deprecated since 1.10) + + (`gh-28254 `__) + +* Remove ``np.tostring`` (deprecated since 1.19) + + (`gh-28254 `__) + +* Raise on ``np.conjugate`` of non-numeric types (deprecated since 1.13) + + (`gh-28254 `__) + +* Raise when using ``np.bincount(...minlength=None)``, use 0 instead + (deprecated since 1.14) + + (`gh-28254 `__) + +* Passing ``shape=None`` to functions with a non-optional shape argument + errors, use ``()`` instead (deprecated since 1.20) + + (`gh-28254 `__) + +* Inexact matches for ``mode`` and ``searchside`` raise (deprecated since 1.20) + + (`gh-28254 `__) + +* Setting ``__array_finalize__ = None`` errors (deprecated since 1.23) + + (`gh-28254 `__) + +* ``np.fromfile`` and ``np.fromstring`` error on bad data, previously they + would guess (deprecated since 1.18) + + (`gh-28254 `__) + +* ``datetime64`` and ``timedelta64`` construction with a tuple no longer + accepts an ``event`` value, either use a two-tuple of (unit, num) or a + 4-tuple of (unit, num, den, 1) (deprecated since 1.14) + + (`gh-28254 `__) + +* When constructing a ``dtype`` from a class with a ``dtype`` attribute, that + attribute must be a dtype-instance rather than a thing that can be parsed as + a dtype instance (deprecated in 1.19). At some point the whole construct of + using a dtype attribute will be deprecated (see #25306) + + (`gh-28254 `__) + +* Passing booleans as partition index errors (deprecated since 1.23) + + (`gh-28254 `__) + +* Out-of-bounds indexes error even on empty arrays (deprecated since 1.20) + + (`gh-28254 `__) + +* ``np.tostring`` has been removed, use ``tobytes`` instead (deprecated since 1.19) + + (`gh-28254 `__) + +* Disallow make a non-writeable array writeable for arrays with a base that do + not own their data (deprecated since 1.17) + + (`gh-28254 `__) + +* ``concatenate()`` with ``axis=None`` uses ``same-kind`` casting by default, + not ``unsafe`` (deprecated since 1.20) + + (`gh-28254 `__) + +* Unpickling a scalar with object dtype errors (deprecated since 1.20) + + (`gh-28254 `__) + +* The binary mode of ``fromstring`` now errors, use ``frombuffer`` instead + (deprecated since 1.14) + + (`gh-28254 `__) + +* Converting ``np.inexact`` or ``np.floating`` to a dtype errors (deprecated + since 1.19) + + (`gh-28254 `__) + +* Converting ``np.complex``, ``np.integer``, ``np.signedinteger``, + ``np.unsignedinteger``, ``np.generic`` to a dtype errors (deprecated since + 1.19) + + (`gh-28254 `__) + +* The Python built-in ``round`` errors for complex scalars. Use ``np.round`` or + ``scalar.round`` instead (deprecated since 1.19) + + (`gh-28254 `__) + +* 'np.bool' scalars can no longer be interpreted as an index (deprecated since 1.19) + + (`gh-28254 `__) + +* Parsing an integer via a float string is no longer supported. (deprecated + since 1.23) To avoid this error you can + * make sure the original data is stored as integers. + * use the ``converters=float`` keyword argument. + * Use ``np.loadtxt(...).astype(np.int64)`` + + (`gh-28254 `__) + +* The use of a length 1 tuple for the ufunc ``signature`` errors. Use ``dtype`` + or fill the tuple with ``None`` (deprecated since 1.19) + + (`gh-28254 `__) + +* Special handling of matrix is in np.outer is removed. Convert to a ndarray + via ``matrix.A`` (deprecated since 1.20) + + (`gh-28254 `__) + +* Removed the ``np.compat`` package source code (removed in 2.0) + + (`gh-28961 `__) + + +C API changes +============= + +* ``NpyIter_GetTransferFlags`` is now available to check if + the iterator needs the Python API or if casts may cause floating point + errors (FPE). FPEs can for example be set when casting ``float64(1e300)`` + to ``float32`` (overflow to infinity) or a NaN to an integer (invalid value). + + (`gh-27883 `__) + +* ``NpyIter`` now has no limit on the number of operands it supports. + + (`gh-28080 `__) + +New ``NpyIter_GetTransferFlags`` and ``NpyIter_IterationNeedsAPI`` change +------------------------------------------------------------------------- +NumPy now has the new ``NpyIter_GetTransferFlags`` function as a more precise +way checking of iterator/buffering needs. I.e. whether the Python API/GIL is +required or floating point errors may occur. +This function is also faster if you already know your needs without buffering. + +The ``NpyIter_IterationNeedsAPI`` function now performs all the checks that were +previously performed at setup time. While it was never necessary to call it +multiple times, doing so will now have a larger cost. + +(`gh-27998 `__) + + +New Features +============ + +* The type parameter of ``np.dtype`` now defaults to ``typing.Any``. + This way, static type-checkers will infer ``dtype: np.dtype`` as + ``dtype: np.dtype[Any]``, without reporting an error. + + (`gh-28669 `__) + +* Static type-checkers now interpret: + + - ``_: np.ndarray`` as ``_: npt.NDArray[typing.Any]``. + - ``_: np.flatiter`` as ``_: np.flatiter[np.ndarray]``. + + This is because their type parameters now have default values. + + (`gh-28940 `__) + +NumPy now registers its pkg-config paths with the pkgconf_ PyPI package +----------------------------------------------------------------------- +The pkgconf_ PyPI package provides an interface for projects like NumPy to +register their own paths to be added to the pkg-config search path. This means +that when using pkgconf_ from PyPI, NumPy will be discoverable without needing +for any custom environment configuration. + +.. attention:: Attention + + This only applies when using the pkgconf_ package from PyPI_, or put another + way, this only applies when installing pkgconf_ via a Python package + manager. + + If you are using ``pkg-config`` or ``pkgconf`` provided by your system, or + any other source that does not use the pkgconf-pypi_ project, the NumPy + pkg-config directory will not be automatically added to the search path. In + these situations, you might want to use ``numpy-config``. + + +.. _pkgconf: https://github.com/pypackaging-native/pkgconf-pypi +.. _PyPI: https://pypi.org/ +.. _pkgconf-pypi: https://github.com/pypackaging-native/pkgconf-pypi + +(`gh-28214 `__) + +Allow ``out=...`` in ufuncs to ensure array result +-------------------------------------------------- +NumPy has the sometimes difficult behavior that it currently usually +returns scalars rather than 0-D arrays (even if the inputs were 0-D arrays). +This is especially problematic for non-numerical dtypes (e.g. ``object``). + +For ufuncs (i.e. most simple math functions) it is now possible to use +``out=...`` (literally \`...\`, e.g. ``out=Ellipsis``) which is identical in +behavior to ``out`` not being passed, but will ensure a non-scalar return. +This spelling is borrowed from ``arr1d[0, ...]`` where the ``...`` also ensures +a non-scalar return. + +Other functions with an ``out=`` kwarg should gain support eventually. +Downstream libraries that interoperate via ``__array_ufunc__`` or +``__array_function__`` may need to adapt to support this. + +(`gh-28576 `__) + +Building NumPy with OpenMP Parallelization +------------------------------------------ +NumPy now supports OpenMP parallel processing capabilities when built with the +``-Denable_openmp=true`` Meson build flag. This feature is disabled by default. +When enabled, ``np.sort`` and ``np.argsort`` functions can utilize OpenMP for +parallel thread execution, improving performance for these operations. + +(`gh-28619 `__) + +Interactive examples in the NumPy documentation +----------------------------------------------- +The NumPy documentation includes a number of examples that +can now be run interactively in your browser using WebAssembly +and Pyodide. + +Please note that the examples are currently experimental in +nature and may not work as expected for all methods in the +public API. + +(`gh-26745 `__) + + +Improvements +============ + +* Scalar comparisons between non-comparable dtypes such as + ``np.array(1) == np.array('s')`` now return a NumPy bool instead of + a Python bool. + + (`gh-27288 `__) + +* ``np.nditer`` now has no limit on the number of supported operands + (C-integer). + + (`gh-28080 `__) + +* No-copy pickling is now supported for any + array that can be transposed to a C-contiguous array. + + (`gh-28105 `__) + +* The ``__repr__`` for user-defined dtypes now prefers the ``__name__`` of the + custom dtype over a more generic name constructed from its ``kind`` and + ``itemsize``. + + (`gh-28250 `__) + +* ``np.dot`` now reports floating point exceptions. + + (`gh-28442 `__) + +* ``np.dtypes.StringDType`` is now a + `generic type `_ which + accepts a type argument for ``na_object`` that defaults to ``typing.Never``. + For example, ``StringDType(na_object=None)`` returns a ``StringDType[None]``, + and ``StringDType()`` returns a ``StringDType[typing.Never]``. + + (`gh-28856 `__) + +Added warnings to ``np.isclose`` +-------------------------------- +Added warning messages if at least one of atol or rtol are either ``np.nan`` or +``np.inf`` within ``np.isclose``. + +* Warnings follow the user's ``np.seterr`` settings + +(`gh-28205 `__) + + +Performance improvements and changes +==================================== + +Performance improvements to ``np.unique`` +----------------------------------------- +``np.unique`` now tries to use a hash table to find unique values instead of +sorting values before finding unique values. This is limited to certain dtypes +for now, and the function is now faster for those dtypes. The function now also +exposes a ``sorted`` parameter to allow returning unique values as they were +found, instead of sorting them afterwards. + +(`gh-26018 `__) + +Performance improvements to ``np.sort`` and ``np.argsort`` +---------------------------------------------------------- +``np.sort`` and ``np.argsort`` functions now can leverage OpenMP for parallel +thread execution, resulting in up to 3.5x speedups on x86 architectures with +AVX2 or AVX-512 instructions. This opt-in feature requires NumPy to be built +with the -Denable_openmp Meson flag. Users can control the number of threads +used by setting the OMP_NUM_THREADS environment variable. + +(`gh-28619 `__) + +Performance improvements for ``np.float16`` casts +------------------------------------------------- +Earlier, floating point casts to and from ``np.float16`` types +were emulated in software on all platforms. + +Now, on ARM devices that support Neon float16 intrinsics (such as +recent Apple Silicon), the native float16 path is used to achieve +the best performance. + +(`gh-28769 `__) + + +Changes +======= + +* The vector norm ``ord=inf`` and the matrix norms ``ord={1, 2, inf, 'nuc'}`` + now always returns zero for empty arrays. Empty arrays have at least one axis + of size zero. This affects ``np.linalg.norm``, ``np.linalg.vector_norm``, and + ``np.linalg.matrix_norm``. Previously, NumPy would raises errors or return + zero depending on the shape of the array. + + (`gh-28343 `__) + +* A spelling error in the error message returned when converting a string to a + float with the method ``np.format_float_positional`` has been fixed. + + (`gh-28569 `__) + +* NumPy's ``__array_api_version__`` was upgraded from ``2023.12`` to ``2024.12``. + +* ``numpy.count_nonzero`` for ``axis=None`` (default) now returns a NumPy scalar + instead of a Python integer. + +* The parameter ``axis`` in ``numpy.take_along_axis`` function has now a default + value of ``-1``. + + (`gh-28615 `__) + +* Printing of ``np.float16`` and ``np.float32`` scalars and arrays have been improved by + adjusting the transition to scientific notation based on the floating point precision. + A new legacy ``np.printoptions`` mode ``'2.2'`` has been added for backwards compatibility. + + (`gh-28703 `__) + +* Multiplication between a string and integer now raises OverflowError instead + of MemoryError if the result of the multiplication would create a string that + is too large to be represented. This follows Python's behavior. + + (`gh-29060 `__) + +``unique_values`` may return unsorted data +------------------------------------------ +The relatively new function (added in NumPy 2.0) ``unique_values`` may now +return unsorted results. Just as ``unique_counts`` and ``unique_all`` these +never guaranteed a sorted result, however, the result was sorted until now. In +cases where these do return a sorted result, this may change in future releases +to improve performance. + +(`gh-26018 `__) + +Changes to the main iterator and potential numerical changes +------------------------------------------------------------ +The main iterator, used in math functions and via ``np.nditer`` from Python and +``NpyIter`` in C, now behaves differently for some buffered iterations. This +means that: + +* The buffer size used will often be smaller than the maximum buffer sized + allowed by the ``buffersize`` parameter. + +* The "growinner" flag is now honored with buffered reductions when no operand + requires buffering. + +For ``np.sum()`` such changes in buffersize may slightly change numerical +results of floating point operations. Users who use "growinner" for custom +reductions could notice changes in precision (for example, in NumPy we removed +it from ``einsum`` to avoid most precision changes and improve precision for +some 64bit floating point inputs). + +(`gh-27883 `__) + +The minimum supported GCC version is now 9.3.0 +---------------------------------------------- +The minimum supported version was updated from 8.4.0 to 9.3.0, primarily in +order to reduce the chance of platform-specific bugs in old GCC versions from +causing issues. + +(`gh-28102 `__) + +Changes to automatic bin selection in numpy.histogram +----------------------------------------------------- +The automatic bin selection algorithm in ``numpy.histogram`` has been modified +to avoid out-of-memory errors for samples with low variation. For full control +over the selected bins the user can use set the ``bin`` or ``range`` parameters +of ``numpy.histogram``. + +(`gh-28426 `__) + +Build manylinux_2_28 wheels +--------------------------- +Wheels for linux systems will use the ``manylinux_2_28`` tag (instead of the +``manylinux2014`` tag), which means dropping support for redhat7/centos7, +amazonlinux2, debian9, ubuntu18.04, and other pre-glibc2.28 operating system +versions, as per the `PEP 600 support table`_. + +.. _`PEP 600 support table`: https://github.com/mayeut/pep600_compliance?tab=readme-ov-file#pep600-compliance-check + +(`gh-28436 `__) +Remove use of -Wl,-ld_classic on macOS +-------------------------------------- +Remove use of -Wl,-ld_classic on macOS. This hack is no longer needed by Spack, +and results in libraries that cannot link to other libraries built with ld +(new). -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +(`gh-28713 `__) -.. **Content from release note snippets in doc/release/upcoming_changes:** +Re-enable overriding functions in the ``numpy.strings`` +------------------------------------------------------- +Re-enable overriding functions in the ``numpy.strings`` module. -.. include:: notes-towncrier.rst +(`gh-28741 `__) From 3467b99ad3ff2d04e272acb2eb6cc62c78dcc7f7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 7 Jun 2025 23:54:21 +0200 Subject: [PATCH 0084/1018] TYP: update typing stubs for ``_pyinstaller/hook-numpy.py`` --- numpy/_pyinstaller/hook-numpy.pyi | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/numpy/_pyinstaller/hook-numpy.pyi b/numpy/_pyinstaller/hook-numpy.pyi index 2642996dad7e..6da4914d7e5a 100644 --- a/numpy/_pyinstaller/hook-numpy.pyi +++ b/numpy/_pyinstaller/hook-numpy.pyi @@ -1,13 +1,6 @@ from typing import Final -# from `PyInstaller.compat` -is_conda: Final[bool] -is_pure_conda: Final[bool] +binaries: Final[list[tuple[str, str]]] = ... -# from `PyInstaller.utils.hooks` -def is_module_satisfies(requirements: str, version: None = None, version_attr: None = None) -> bool: ... - -binaries: Final[list[tuple[str, str]]] - -hiddenimports: Final[list[str]] -excludedimports: Final[list[str]] +hiddenimports: Final[list[str]] = ... +excludedimports: Final[list[str]] = ... From f8204f5e0f338ffad588e56704e596e6678dc4f4 Mon Sep 17 00:00:00 2001 From: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com> Date: Sun, 8 Jun 2025 00:01:43 +0200 Subject: [PATCH 0085/1018] DOC: Document assertion comparison behavior between scalar and empty array Noted in #27457. The minimal thing we should do is document this behavior. While at it, I slightly homogenized the notes. --- numpy/testing/_private/utils.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index d7ceaeab72cc..9bc60d28b160 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -293,9 +293,10 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): Notes ----- - By default, when one of `actual` and `desired` is a scalar and the other is - an array, the function checks that each element of the array is equal to - the scalar. This behaviour can be disabled by setting ``strict==True``. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -980,9 +981,10 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Notes ----- - When one of `actual` and `desired` is a scalar and the other is array_like, - the function checks that each element of the array_like object is equal to - the scalar. This behaviour can be disabled with the `strict` parameter. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -1651,10 +1653,10 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, Notes ----- - When one of `actual` and `desired` is a scalar and the other is - array_like, the function performs the comparison as if the scalar were - broadcasted to the shape of the array. - This behaviour can be disabled with the `strict` parameter. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function performs the comparison as if the scalar were broadcasted to the shape + of the array. Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- From 7bf85704f2e52589ce18b5124e29778227a36feb Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 8 Jun 2025 02:40:31 +0200 Subject: [PATCH 0086/1018] TYP: add missing ``numpy.lib`` exports --- numpy/lib/__init__.pyi | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index 8532ef8d9fb9..6185a494d035 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -1,15 +1,30 @@ from numpy._core.function_base import add_newdoc from numpy._core.multiarray import add_docstring, tracemalloc_domain -from . import ( # noqa: F401 - array_utils, - format, - introspect, - mixins, - npyio, - scimath, - stride_tricks, -) +# all submodules of `lib` are accessible at runtime through `__getattr__`, +# so we implicitly re-export them here +from . import _array_utils_impl as _array_utils_impl +from . import _arraypad_impl as _arraypad_impl +from . import _arraysetops_impl as _arraysetops_impl +from . import _arrayterator_impl as _arrayterator_impl +from . import _datasource as _datasource +from . import _format_impl as _format_impl +from . import _function_base_impl as _function_base_impl +from . import _histograms_impl as _histograms_impl +from . import _index_tricks_impl as _index_tricks_impl +from . import _iotools as _iotools +from . import _nanfunctions_impl as _nanfunctions_impl +from . import _npyio_impl as _npyio_impl +from . import _polynomial_impl as _polynomial_impl +from . import _scimath_impl as _scimath_impl +from . import _shape_base_impl as _shape_base_impl +from . import _stride_tricks_impl as _stride_tricks_impl +from . import _twodim_base_impl as _twodim_base_impl +from . import _type_check_impl as _type_check_impl +from . import _ufunclike_impl as _ufunclike_impl +from . import _utils_impl as _utils_impl +from . import _version as _version +from . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks from ._arrayterator_impl import Arrayterator from ._version import NumpyVersion @@ -18,6 +33,7 @@ __all__ = [ "add_docstring", "add_newdoc", "array_utils", + "format", "introspect", "mixins", "NumpyVersion", From 128ab7f72ae83735dca2f27d2682503915e9d578 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 8 Jun 2025 03:35:16 +0200 Subject: [PATCH 0087/1018] TYP: add ``containsderivedtypes`` to ``f2py.auxfuncs.__all__`` --- numpy/f2py/auxfuncs.pyi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index 1212f229c660..f2ff09faf33b 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -10,6 +10,7 @@ from .cfuncs import errmess __all__ = [ "applyrules", "containscommon", + "containsderivedtypes", "debugcapi", "dictappend", "errmess", @@ -200,7 +201,7 @@ def isintent_inplace(var: _Var) -> bool: ... def isintent_aux(var: _Var) -> bool: ... # -def containsderivedtypes(rout: _ROut) -> _Bool: ... +def containsderivedtypes(rout: _ROut) -> L[0, 1]: ... def containscommon(rout: _ROut) -> _Bool: ... def hasexternals(rout: _ROut) -> bool: ... def hasresultnote(rout: _ROut) -> _Bool: ... From cd538067a4c6747aeddd68e5b0df9543da49a21c Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 8 Jun 2025 03:37:24 +0200 Subject: [PATCH 0088/1018] TYP: remove ``run_command`` annotations in ``f2py.diagnose`` --- numpy/f2py/diagnose.pyi | 3 --- 1 file changed, 3 deletions(-) diff --git a/numpy/f2py/diagnose.pyi b/numpy/f2py/diagnose.pyi index 29cc2b4988b3..b88194ac6bff 100644 --- a/numpy/f2py/diagnose.pyi +++ b/numpy/f2py/diagnose.pyi @@ -1,4 +1 @@ -from _typeshed import StrOrBytesPath - -def run_command(cmd: StrOrBytesPath) -> None: ... def run() -> None: ... From 61512bcef563f799dafff6266631985764386ffa Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 8 Jun 2025 04:10:59 +0200 Subject: [PATCH 0089/1018] BUG: Missing array-api ``capabilities()`` key --- numpy/_array_api_info.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 6ea9e13587f4..067e38798718 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -94,14 +94,14 @@ def capabilities(self): >>> info = np.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, - 'data-dependent shapes': True} + 'data-dependent shapes': True, + 'max dimensions': 64} """ return { "boolean indexing": True, "data-dependent shapes": True, - # 'max rank' will be part of the 2024.12 standard - # "max rank": 64, + "max dimensions": 64, } def default_device(self): From 21c2e707dad89fb2498040c4badf231882f0404d Mon Sep 17 00:00:00 2001 From: V-R-S Date: Sun, 8 Jun 2025 17:02:03 +0000 Subject: [PATCH 0090/1018] TST: migrating from pytz to zoneinfo + tzdata (where needed) For migration from pytz to zoneinfo function get_tzoffset_from_pytzinfo from numpy/_core/src/multiarray/datetime.c is modified to use astimezone instead of fromutc. As the object ZoneInfo is not directly compatible to be used with datetime object. Hence, something like this would result in an exception. from datetime import datetime from zoneinfo import ZoneInfo a = datetime(2025, 6, 7, 10, 0, 0) zoneInfo = ZoneInfo("US/Central") b = zoneInfo.fromutc(a) ValueError: fromutc: dt.tzinfo is not self The function astimezone can be used with both pytz.timezone object and zoneinfo.ZoneInfo object But, if we want to use the datetime object consistently we cannot let it be a naive type i.e. without a timezone. As the default behaviour of astimezone would take the system timezone if the datetime object is not timezone aware. Hence, I had to also change the call to create datetime object to take UTC timezone. See #29064 --- environment.yml | 2 +- numpy/_core/src/multiarray/_datetime.h | 4 ++-- numpy/_core/src/multiarray/datetime.c | 10 +++++----- requirements/emscripten_test_requirements.txt | 2 +- requirements/test_requirements.txt | 3 ++- 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/environment.yml b/environment.yml index d2964bf78368..5f1ee5e81a5f 100644 --- a/environment.yml +++ b/environment.yml @@ -49,4 +49,4 @@ dependencies: - gitpython # Used in some tests - cffi - - pytz + - tzdata diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index dd25e1ffd6cc..112c57433094 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -174,8 +174,8 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple, npy_bool from_pickle); /* - * Gets a tzoffset in minutes by calling the fromutc() function on - * the Python datetime.tzinfo object. + * Gets a tzoffset in minutes by calling the astimezone() function on + * the Python datetime.datetime object. */ NPY_NO_EXPORT int get_tzoffset_from_pytzinfo(PyObject *timezone, npy_datetimestruct *dts); diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 9c024dbcd91c..d820474532ca 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2245,8 +2245,8 @@ NpyDatetime_ConvertPyDateTimeToDatetimeStruct( } /* - * Gets a tzoffset in minutes by calling the fromutc() function on - * the Python datetime.tzinfo object. + * Gets a tzoffset in minutes by calling the astimezone() function on + * the Python datetime.datetime object. */ NPY_NO_EXPORT int get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts) @@ -2255,14 +2255,14 @@ get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts) npy_datetimestruct loc_dts; /* Create a Python datetime to give to the timezone object */ - dt = PyDateTime_FromDateAndTime((int)dts->year, dts->month, dts->day, - dts->hour, dts->min, 0, 0); + dt = PyDateTimeAPI->DateTime_FromDateAndTime((int)dts->year, dts->month, dts->day, + dts->hour, dts->min, 0, 0, PyDateTime_TimeZone_UTC, PyDateTimeAPI->DateTimeType); if (dt == NULL) { return -1; } /* Convert the datetime from UTC to local time */ - loc_dt = PyObject_CallMethod(timezone_obj, "fromutc", "O", dt); + loc_dt = PyObject_CallMethod(dt, "astimezone", "O", timezone_obj); Py_DECREF(dt); if (loc_dt == NULL) { return -1; diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 18cfb219034d..019a69da687a 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ hypothesis==6.81.1 pytest==7.4.0 -pytz==2023.3.post1 +tzdata pytest-xdist diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 4fb1d47bf50d..17260753db4a 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -4,7 +4,6 @@ setuptools==65.5.1 ; python_version < '3.12' setuptools ; python_version >= '3.12' hypothesis==6.104.1 pytest==7.4.0 -pytz==2023.3.post1 pytest-cov==4.1.0 meson ninja; sys_platform != "emscripten" @@ -17,3 +16,5 @@ mypy==1.16.0; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer +tzdata + From e48fdef5d7bf8ba2f90972c531cdfa07481ae535 Mon Sep 17 00:00:00 2001 From: David Seifert Date: Sun, 8 Jun 2025 19:24:04 +0200 Subject: [PATCH 0091/1018] BUG: remove `NPY_ALIGNMENT_REQUIRED` * This machinery requires strict-aliasing UB and isn't needed anymore with any GCC from the last 15 years. This might also fix #25004. Fixes: #28991 --- numpy/_core/include/numpy/npy_cpu.h | 12 ------------ numpy/_core/src/multiarray/common.h | 15 ++++----------- numpy/_core/src/multiarray/compiled_base.c | 14 +++++--------- numpy/_core/src/multiarray/item_selection.c | 7 +++++-- .../src/multiarray/lowlevel_strided_loops.c.src | 6 +----- 5 files changed, 15 insertions(+), 39 deletions(-) diff --git a/numpy/_core/include/numpy/npy_cpu.h b/numpy/_core/include/numpy/npy_cpu.h index 72f7331a0267..52e9d5996bd1 100644 --- a/numpy/_core/include/numpy/npy_cpu.h +++ b/numpy/_core/include/numpy/npy_cpu.h @@ -120,16 +120,4 @@ information about your platform (OS, CPU and compiler) #endif -/* - * Except for the following architectures, memory access is limited to the natural - * alignment of data types otherwise it may lead to bus error or performance regression. - * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt. -*/ -#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__) - #define NPY_ALIGNMENT_REQUIRED 0 -#endif -#ifndef NPY_ALIGNMENT_REQUIRED - #define NPY_ALIGNMENT_REQUIRED 1 -#endif - #endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */ diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index e356b8251931..a18f74bda71a 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -11,6 +11,7 @@ #include "npy_static_data.h" #include "npy_import.h" #include +#include #ifdef __cplusplus extern "C" { @@ -230,15 +231,6 @@ npy_uint_alignment(int itemsize) * compared to memchr it returns one stride past end instead of NULL if needle * is not found. */ -#ifdef __clang__ - /* - * The code below currently makes use of !NPY_ALIGNMENT_REQUIRED, which - * should be OK but causes the clang sanitizer to warn. It may make - * sense to modify the code to avoid this "unaligned" access but - * it would be good to carefully check the performance changes. - */ - __attribute__((no_sanitize("alignment"))) -#endif static inline char * npy_memchr(char * haystack, char needle, npy_intp stride, npy_intp size, npy_intp * psubloopsize, int invert) @@ -259,11 +251,12 @@ npy_memchr(char * haystack, char needle, } else { /* usually find elements to skip path */ - if (!NPY_ALIGNMENT_REQUIRED && needle == 0 && stride == 1) { + if (needle == 0 && stride == 1) { /* iterate until last multiple of 4 */ char * block_end = haystack + size - (size % sizeof(unsigned int)); while (p < block_end) { - unsigned int v = *(unsigned int*)p; + unsigned int v; + memcpy(&v, p, sizeof(v)); if (v != 0) { break; } diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 86b60cf75944..fee0d4a61a78 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -1620,19 +1620,15 @@ pack_inner(const char *inptr, bb[1] = npyv_tobits_b8(npyv_cmpneq_u8(v1, v_zero)); bb[2] = npyv_tobits_b8(npyv_cmpneq_u8(v2, v_zero)); bb[3] = npyv_tobits_b8(npyv_cmpneq_u8(v3, v_zero)); - if(out_stride == 1 && - (!NPY_ALIGNMENT_REQUIRED || isAligned)) { - npy_uint64 *ptr64 = (npy_uint64*)outptr; + if(out_stride == 1 && isAligned) { #if NPY_SIMD_WIDTH == 16 - npy_uint64 bcomp = bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48); - ptr64[0] = bcomp; + npy_uint64 arr[1] = {bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48)}; #elif NPY_SIMD_WIDTH == 32 - ptr64[0] = bb[0] | (bb[1] << 32); - ptr64[1] = bb[2] | (bb[3] << 32); + npy_uint64 arr[2] = {bb[0] | (bb[1] << 32), bb[2] | (bb[3] << 32)}; #else - ptr64[0] = bb[0]; ptr64[1] = bb[1]; - ptr64[2] = bb[2]; ptr64[3] = bb[3]; + npy_uint64 arr[4] = {bb[0], bb[1], bb[2], bb[3]}; #endif + memcpy(outptr, arr, sizeof(arr)); outptr += vstepx4; } else { for(int i = 0; i < 4; i++) { diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index d2db10633810..5c036b704774 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -4,6 +4,7 @@ #define PY_SSIZE_T_CLEAN #include #include +#include #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -2525,11 +2526,13 @@ count_nonzero_u8(const char *data, npy_intp bstride, npy_uintp len) len -= len_m; count = len_m - zcount; #else - if (!NPY_ALIGNMENT_REQUIRED || npy_is_aligned(data, sizeof(npy_uint64))) { + if (npy_is_aligned(data, sizeof(npy_uint64))) { int step = 6 * sizeof(npy_uint64); int left_bytes = len % step; for (const char *end = data + len; data < end - left_bytes; data += step) { - count += count_nonzero_bytes_384((const npy_uint64 *)data); + npy_uint64 arr[6]; + memcpy(arr, data, step); + count += count_nonzero_bytes_384(arr); } len = left_bytes; } diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 01ffd225274f..0c4eb3dd9a8d 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -33,11 +33,7 @@ * instructions (16 byte). * So this flag can only be enabled if autovectorization is disabled. */ -#if NPY_ALIGNMENT_REQUIRED -# define NPY_USE_UNALIGNED_ACCESS 0 -#else -# define NPY_USE_UNALIGNED_ACCESS 0 -#endif +#define NPY_USE_UNALIGNED_ACCESS 0 #define _NPY_NOP1(x) (x) #define _NPY_NOP2(x) (x) From f0e082de06ee5ce1ad120a91a5d3053b1a6032d9 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 8 Jun 2025 19:24:05 +0200 Subject: [PATCH 0092/1018] DOC: Document the removal of the NPY_ALIGNMENT_REQUIRED macro. --- doc/release/upcoming_changes/29094.compatibility.rst | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 doc/release/upcoming_changes/29094.compatibility.rst diff --git a/doc/release/upcoming_changes/29094.compatibility.rst b/doc/release/upcoming_changes/29094.compatibility.rst new file mode 100644 index 000000000000..961ee6504dae --- /dev/null +++ b/doc/release/upcoming_changes/29094.compatibility.rst @@ -0,0 +1,7 @@ +The Macro NPY_ALIGNMENT_REQUIRED has been removed +------------------------------------------------- +The macro was defined in the `npy_cpu.h` file, so might be regarded as +semipublic. As it turns out, with modern compilers and hardware it is almost +always the case that alignment is required, so numpy no longer uses the macro. +It is unlikely anyone uses it, but you might want to compile with the `-Wundef` +flag or equivalent to be sure. From da3c2b43453786194f04b7c1253794f19e54256e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Mon, 9 Jun 2025 12:02:26 -0300 Subject: [PATCH 0093/1018] DOC: Remove version switcher colors [skip actions][skip azp][skip cirrus] --- doc/source/_static/numpy.css | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 9df2f6c546c5..d086215dd638 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -22,18 +22,6 @@ body { /* Version switcher colors from PyData Sphinx Theme */ -.version-switcher__button[data-active-version-name*="devdocs"] { - background-color: var(--pst-color-warning); - border-color: var(--pst-color-warning); - opacity: 0.9; -} - -.version-switcher__button:not([data-active-version-name*="stable"]):not([data-active-version-name*="dev"]):not([data-active-version-name*="pull"]) { - background-color: var(--pst-color-danger); - border-color: var(--pst-color-danger); - opacity: 0.9; -} - .version-switcher__menu a.list-group-item { font-size: small; } From a8d35009a6a3294423448b63233870f128b7002a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Mon, 9 Jun 2025 12:14:18 -0300 Subject: [PATCH 0094/1018] MAINT: Update comment for clarity --- doc/source/_static/numpy.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index d086215dd638..1555dafb5539 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -20,7 +20,7 @@ body { width: 15%; } -/* Version switcher colors from PyData Sphinx Theme */ +/* Version switcher from PyData Sphinx Theme */ .version-switcher__menu a.list-group-item { font-size: small; From aed7a608f579511e60bd036119990e4035cbe916 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 9 Jun 2025 17:14:19 +0200 Subject: [PATCH 0095/1018] CI: Run mypy with Python 3.13 --- .github/workflows/mypy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 36e89504def7..81fa57239b9b 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -46,9 +46,9 @@ jobs: fail-fast: false matrix: os_python: + - [macos-latest, '3.13'] - [ubuntu-latest, '3.12'] - [windows-latest, '3.11'] - - [macos-latest, '3.11'] steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: From 7d950ec5525fbdaf8b9752364bf52de3cd127436 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 9 Jun 2025 22:18:29 +0300 Subject: [PATCH 0096/1018] DOC: tweak release walkthrough for numpy.org news blurb [skip actions][skip azp][skip cirrus] --- doc/RELEASE_WALKTHROUGH.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index 702803172477..6d2194b5c4e6 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -344,6 +344,8 @@ This assumes that you have forked ``_:: to the previous links for example. - For the ``*.0`` release in a cycle, add a new section at the top with a short description of the new features and point the news link to it. +- Edit the newsHeader and date fields at the top of news.md +- Also edit the butttonText on line 14 in content/en/config.yaml commit and push:: From 81b2a67fac4782498796d4012208593640faa5ec Mon Sep 17 00:00:00 2001 From: V-R-S Date: Tue, 10 Jun 2025 11:40:25 +0000 Subject: [PATCH 0097/1018] Adding refactored tests . . --- numpy/_core/tests/test_datetime.py | 27 ++++++++++---------------- numpy/_core/tests/test_deprecations.py | 6 ------ 2 files changed, 10 insertions(+), 23 deletions(-) diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 1cbacb8a26a8..a452259ec5c9 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1,5 +1,6 @@ import datetime import pickle +from zoneinfo import ZoneInfo import pytest @@ -16,13 +17,6 @@ suppress_warnings, ) -# Use pytz to test out various time zones if available -try: - from pytz import timezone as tz - _has_pytz = True -except ImportError: - _has_pytz = False - try: RecursionError except NameError: @@ -1886,7 +1880,6 @@ def test_datetime_as_string(self): np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'), '2032-01-01') - @pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.") def test_datetime_as_string_timezone(self): # timezone='local' vs 'UTC' a = np.datetime64('2010-03-15T06:30', 'm') @@ -1901,29 +1894,29 @@ def test_datetime_as_string_timezone(self): b = np.datetime64('2010-02-15T06:30', 'm') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Central')), '2010-03-15T01:30-0500') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Eastern')), '2010-03-15T02:30-0400') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Pacific')), '2010-03-14T23:30-0700') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Central')), '2010-02-15T00:30-0600') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Eastern')), '2010-02-15T01:30-0500') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Pacific')), '2010-02-14T22:30-0800') # Dates to strings with a timezone attached is disabled by default assert_raises(TypeError, np.datetime_as_string, a, unit='D', - timezone=tz('US/Pacific')) + timezone=ZoneInfo('US/Pacific')) # Check that we can print out the date in the specified time zone assert_equal(np.datetime_as_string(a, unit='D', - timezone=tz('US/Pacific'), casting='unsafe'), + timezone=ZoneInfo('US/Pacific'), casting='unsafe'), '2010-03-14') assert_equal(np.datetime_as_string(b, unit='D', - timezone=tz('US/Central'), casting='unsafe'), + timezone=ZoneInfo('US/Central'), casting='unsafe'), '2010-02-15') def test_datetime_arange(self): diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index d90c15565c22..cb552357fc96 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -13,12 +13,6 @@ import numpy as np from numpy.testing import assert_raises, temppath -try: - import pytz # noqa: F401 - _has_pytz = True -except ImportError: - _has_pytz = False - class _DeprecationTestCase: # Just as warning: warnings uses re.match, so the start of this message From 0f1a6f81d3e2684802f59cd0d3da5b4ceac770f3 Mon Sep 17 00:00:00 2001 From: V-R-S Date: Tue, 10 Jun 2025 14:44:48 +0000 Subject: [PATCH 0098/1018] Removing additional references to pytz . . --- .github/workflows/linux.yml | 2 +- numpy/_core/multiarray.py | 6 +++--- numpy/_core/tests/test_datetime.py | 8 +++++++- requirements/doc_requirements.txt | 2 -- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 668c1191d055..a0e549d86775 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -271,7 +271,7 @@ jobs: # - name: Check docstests # shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' # run: | - # pip install scipy-doctest>=1.8.0 hypothesis==6.104.1 matplotlib scipy pytz pandas + # pip install scipy-doctest>=1.8.0 hypothesis==6.104.1 matplotlib scipy pandas # spin check-docs -v # spin check-tutorials -v diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 236ca7e7c9aa..5599494720b6 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -1723,7 +1723,7 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): Examples -------- >>> import numpy as np - >>> import pytz + >>> from zoneinfo import ZoneInfo >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') >>> d array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', @@ -1736,9 +1736,9 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) + >>> np.datetime_as_string(d, timezone=ZoneInfo('US/Eastern')) array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='=1.8.0 # interactive documentation utilities From 29c85f376bc41095cdc0c7060bb38605e34e040b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 19:58:40 +0200 Subject: [PATCH 0099/1018] TYP: Accept dispatcher function with optional returns in ``_core.overrides`` (#29171) Co-authored-by: Sebastian Berg --- numpy/_core/overrides.pyi | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/numpy/_core/overrides.pyi b/numpy/_core/overrides.pyi index 05453190efd4..91d624203e81 100644 --- a/numpy/_core/overrides.pyi +++ b/numpy/_core/overrides.pyi @@ -1,11 +1,11 @@ from collections.abc import Callable, Iterable -from typing import Any, Final, NamedTuple, ParamSpec, TypeVar - -from numpy._typing import _SupportsArrayFunc +from typing import Any, Final, NamedTuple, ParamSpec, TypeAlias, TypeVar _T = TypeVar("_T") _Tss = ParamSpec("_Tss") -_FuncT = TypeVar("_FuncT", bound=Callable[..., object]) +_FuncLikeT = TypeVar("_FuncLikeT", bound=type | Callable[..., object]) + +_Dispatcher: TypeAlias = Callable[_Tss, Iterable[object]] ### @@ -18,14 +18,11 @@ class ArgSpec(NamedTuple): keywords: str | None defaults: tuple[Any, ...] -def get_array_function_like_doc(public_api: Callable[..., Any], docstring_template: str = "") -> str: ... -def finalize_array_function_like(public_api: _FuncT) -> _FuncT: ... +def get_array_function_like_doc(public_api: Callable[..., object], docstring_template: str = "") -> str: ... +def finalize_array_function_like(public_api: _FuncLikeT) -> _FuncLikeT: ... # -def verify_matching_signatures( - implementation: Callable[_Tss, object], - dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]], -) -> None: ... +def verify_matching_signatures(implementation: Callable[_Tss, object], dispatcher: _Dispatcher[_Tss]) -> None: ... # NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with # the original wrapped callable stored in the `._implementation` attribute. It checks @@ -33,11 +30,11 @@ def verify_matching_signatures( # specifies. Since the dispatcher only returns an iterable of passed array-like args, # this overridable behaviour is impossible to annotate. def array_function_dispatch( - dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]] | None = None, + dispatcher: _Dispatcher[_Tss] | None = None, module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = False, -) -> Callable[[_FuncT], _FuncT]: ... +) -> Callable[[_FuncLikeT], _FuncLikeT]: ... # def array_function_from_dispatcher( @@ -45,4 +42,4 @@ def array_function_from_dispatcher( module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = True, -) -> Callable[[Callable[_Tss, Iterable[_SupportsArrayFunc]]], Callable[_Tss, _T]]: ... +) -> Callable[[_Dispatcher[_Tss]], Callable[_Tss, _T]]: ... From a17726a2d6ce58d137a68e13711b6fb3e98989bb Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:23:19 +0200 Subject: [PATCH 0100/1018] TYP: ``lib._iotools`` annotation improvements (#29177) --- numpy/lib/_iotools.pyi | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi index 21cfc3b19503..82275940e137 100644 --- a/numpy/lib/_iotools.pyi +++ b/numpy/lib/_iotools.pyi @@ -13,11 +13,12 @@ from typing import ( import numpy as np import numpy.typing as npt +from numpy._typing._dtype_like import _DTypeLikeNested _T = TypeVar("_T") @type_check_only -class _ValidationKwargs(TypedDict, total=False): +class _NameValidatorKwargs(TypedDict, total=False): excludelist: Iterable[str] | None deletechars: Iterable[str] | None case_sensitive: Literal["upper", "lower"] | bool | None @@ -25,7 +26,7 @@ class _ValidationKwargs(TypedDict, total=False): ### -__docformat__: Final[str] = "restructuredtext en" +__docformat__: Final = "restructuredtext en" class ConverterError(Exception): ... class ConverterLockError(ConverterError): ... @@ -98,17 +99,18 @@ class StringConverter: @classmethod def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... +def _decode_line(line: str | bytes, encoding: str) -> str: ... +def _is_string_like(obj: object) -> bool: ... +def _is_bytes_like(obj: object) -> bool: ... +def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... +def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... @overload def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ... @overload def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ... - -# -def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... -def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... def easy_dtype( - ndtype: npt.DTypeLike, - names: Iterable[str] | None = None, + ndtype: str | Sequence[_DTypeLikeNested], + names: str | Sequence[str] | None = None, defaultfmt: str = "f%i", - **validationargs: Unpack[_ValidationKwargs], + **validationargs: Unpack[_NameValidatorKwargs], ) -> np.dtype[np.void]: ... From 9cbddda01babcc67b9aa72e707ea0320cf0ad22f Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:27:11 +0200 Subject: [PATCH 0101/1018] TYP: ``any(None)`` and ``all(None)`` (#29176) --- numpy/_core/fromnumeric.pyi | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 050eb9f75c40..f0f83093c3b1 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -819,9 +819,10 @@ def sum( where: _ArrayLikeBool_co = ..., ) -> _ArrayT: ... +# keep in sync with `any` @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: None = None, out: None = None, keepdims: Literal[False, 0] | _NoValueType = ..., @@ -830,7 +831,7 @@ def all( ) -> np.bool: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: _BoolLike_co | _NoValueType = ..., @@ -839,7 +840,7 @@ def all( ) -> Incomplete: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None, out: _ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., @@ -848,7 +849,7 @@ def all( ) -> _ArrayT: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, out: _ArrayT, @@ -856,9 +857,10 @@ def all( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# keep in sync with `all` @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: None = None, out: None = None, keepdims: Literal[False, 0] | _NoValueType = ..., @@ -867,7 +869,7 @@ def any( ) -> np.bool: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: _BoolLike_co | _NoValueType = ..., @@ -876,7 +878,7 @@ def any( ) -> Incomplete: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None, out: _ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., @@ -885,7 +887,7 @@ def any( ) -> _ArrayT: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, out: _ArrayT, @@ -893,6 +895,7 @@ def any( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# @overload def cumsum( a: _ArrayLike[_ScalarT], From 42b875f72a44beb6acf19b6b39f2628c0cf6599a Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:28:32 +0200 Subject: [PATCH 0102/1018] TYP: Fix invalid inline annotations in ``lib._function_base_impl`` (#29175) * TYP: Fix invalid inline annotations in ``lib._function_base_impl`` * TYP: ``ruff check --fix`` * TYP: prevent inline annotation from causing a circular import error --- numpy/lib/_function_base_impl.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 63346088b6e2..f217af64fb4b 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4711,14 +4711,14 @@ def _inverted_cdf(n, quantiles): def _quantile_ureduce_func( - a: np.array, - q: np.array, - weights: np.array, - axis: int | None = None, - out=None, - overwrite_input: bool = False, - method="linear", -) -> np.array: + a: np.ndarray, + q: np.ndarray, + weights: np.ndarray, + axis: int | None = None, + out: np.ndarray | None = None, + overwrite_input: bool = False, + method: str = "linear", +) -> np.ndarray: if q.ndim > 2: # The code below works fine for nd, but it might not have useful # semantics. For now, keep the supported dimensions the same as it was @@ -4784,13 +4784,13 @@ def _get_indexes(arr, virtual_indexes, valid_values_count): def _quantile( - arr: np.array, - quantiles: np.array, - axis: int = -1, - method="linear", - out=None, - weights=None, -): + arr: "np.typing.ArrayLike", + quantiles: np.ndarray, + axis: int = -1, + method: str = "linear", + out: np.ndarray | None = None, + weights: "np.typing.ArrayLike | None" = None, +) -> np.ndarray: """ Private function that doesn't support extended axis or keepdims. These methods are extended to this function using _ureduce From 1512865bea80182f360318eb471335326897b707 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:31:11 +0200 Subject: [PATCH 0103/1018] TYP: ``numpy._NoValue`` (#29170) --- numpy/__init__.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 41d7411dfdd8..ae26ac2f19f8 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -432,6 +432,8 @@ from numpy._core.shape_base import ( ) from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ +from ._globals import _CopyMode as _CopyMode +from ._globals import _NoValue as _NoValue from numpy.lib import ( scimath as emath, @@ -495,8 +497,6 @@ from numpy.lib._function_base_impl import ( quantile, ) -from numpy._globals import _CopyMode - from numpy.lib._histograms_impl import ( histogram_bin_edges, histogram, From 7d8b1d5d49b6be5bd47ce2a2016283ffc5b43f0d Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:33:36 +0200 Subject: [PATCH 0104/1018] TYP: ``out=...`` in ufuncs (#29169) --- numpy/_typing/_ufunc.pyi | 179 ++++++++++++++++++++++----------------- 1 file changed, 99 insertions(+), 80 deletions(-) diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 104307da89db..790149d9c7fb 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -4,8 +4,9 @@ The signatures of the ufuncs are too varied to reasonably type with a single class. So instead, `ufunc` has been expanded into four private subclasses, one for each combination of `~ufunc.nin` and `~ufunc.nout`. -""" +""" # noqa: PYI021 +from types import EllipsisType from typing import ( Any, Generic, @@ -102,8 +103,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - out: None = ..., + x1: _ScalarLike_co, + /, + out: EllipsisType | None = ..., *, where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., @@ -115,8 +117,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + x1: ArrayLike, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., *, where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., @@ -128,8 +131,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _SupportsArrayUFunc, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + x1: _SupportsArrayUFunc, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., *, where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., @@ -176,7 +180,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: None = None, + out: EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -185,9 +189,9 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def __call__( self, x1: ArrayLike, - x2: NDArray[np.generic], + x2: NDArray[Any], /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -195,10 +199,10 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload # (array, array-like) -> array def __call__( self, - x1: NDArray[np.generic], + x1: NDArray[Any], x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -209,7 +213,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]], + out: NDArray[Any] | tuple[NDArray[Any]], *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -220,7 +224,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -239,7 +243,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i array: ArrayLike, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., + out: NDArray[Any] | EllipsisType | None = ..., keepdims: bool = ..., initial: Any = ..., where: _ArrayLikeBool_co = ..., @@ -250,7 +254,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i array: ArrayLike, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., + out: NDArray[Any] | EllipsisType | None = ..., ) -> NDArray[Any]: ... def reduceat( @@ -259,7 +263,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., + out: NDArray[Any] | EllipsisType | None = ..., ) -> NDArray[Any]: ... @overload # (scalar, scalar) -> scalar @@ -269,7 +273,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: _ScalarLike_co, /, *, - out: None = None, + out: EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> Any: ... @@ -277,21 +281,21 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def outer( self, A: ArrayLike, - B: NDArray[np.generic], + B: NDArray[Any], /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... @overload # (array, array-like) -> array def outer( self, - A: NDArray[np.generic], + A: NDArray[Any], B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... @@ -302,7 +306,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]], + out: NDArray[Any] | tuple[NDArray[Any]], dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... @@ -313,7 +317,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any] | Any: ... @@ -340,10 +344,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = ..., + /, *, + out: EllipsisType | None = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -354,11 +360,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: ArrayLike, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: ArrayLike, + out1: NDArray[Any] | EllipsisType | None = ..., + out2: NDArray[Any] | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., + out: _2Tuple[NDArray[Any]] | EllipsisType = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -369,11 +376,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _SupportsArrayUFunc, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: _SupportsArrayUFunc, + out1: NDArray[Any] | EllipsisType | None = ..., + out2: NDArray[Any] | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., + out: _2Tuple[NDArray[Any]] | EllipsisType = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -410,11 +418,13 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - __x2: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + x2: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = ..., + /, *, + out: EllipsisType | None = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -425,12 +435,13 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: ArrayLike, + x2: ArrayLike, + out1: NDArray[Any] | EllipsisType | None = ..., + out2: NDArray[Any] | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., + out: _2Tuple[NDArray[Any]] | EllipsisType = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -468,9 +479,10 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: None = ..., + x1: ArrayLike, + x2: ArrayLike, + /, + out: EllipsisType | None = ..., *, casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -482,9 +494,10 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]], + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType, *, casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -556,7 +569,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _ScalarLike_co, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co: ... @overload @@ -564,7 +577,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: ArrayLike, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -580,7 +593,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> Any: ... @@ -611,7 +624,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -620,7 +633,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: ArrayLike, x2: ArrayLike, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -638,7 +651,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _SupportsArrayUFunc, x2: _SupportsArrayUFunc | ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @overload @@ -647,7 +660,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: ArrayLike, x2: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @@ -656,11 +669,11 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno @overload def reduce( self, + /, array: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike, out: _ArrayT, - /, keepdims: bool = ..., initial: _ScalarLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -685,7 +698,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., *, keepdims: Literal[True], initial: _ScalarLike_co = ..., @@ -698,7 +711,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., keepdims: bool = ..., initial: _ScalarLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -707,12 +720,12 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno @overload def reduceat( self, + /, array: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex, dtype: DTypeLike, out: _ArrayT, - /, ) -> _ArrayT: ... @overload def reduceat( @@ -733,7 +746,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., ) -> NDArray[np.object_]: ... @overload def reduceat( @@ -743,21 +756,22 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., ) -> Any: ... @overload def accumulate( self, + /, array: ArrayLike, axis: SupportsIndex, dtype: DTypeLike, out: _ArrayT, - /, ) -> _ArrayT: ... @overload def accumulate( self, + /, array: ArrayLike, axis: SupportsIndex = ..., dtype: DTypeLike = ..., @@ -771,7 +785,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., ) -> NDArray[np.object_]: ... @overload @@ -779,8 +793,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: _ScalarLike_co, B: _ScalarLike_co, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -788,8 +803,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: ArrayLike, B: ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -797,7 +813,8 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: ArrayLike, B: ArrayLike, - /, *, + /, + *, out: _ArrayT, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ArrayT: ... @@ -806,8 +823,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: _SupportsArrayUFunc, B: _SupportsArrayUFunc | ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @overload @@ -815,8 +833,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: _ScalarLike_co, B: _SupportsArrayUFunc | ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @@ -841,7 +860,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _ScalarLike_co, /, *xs: _ScalarLike_co, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> _ReturnType_co: ... @overload @@ -852,7 +871,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: ArrayLike, /, *xs: ArrayLike, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -874,7 +893,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> Any: ... @@ -903,7 +922,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: _ScalarLike_co, /, *xs: _ScalarLike_co, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> _2PTuple[_ReturnType_co]: ... @overload @@ -912,7 +931,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: ArrayLike, /, *xs: ArrayLike, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... @overload @@ -930,7 +949,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: _2PTuple[NDArray[Any]] | None = ..., + out: _2PTuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> Any: ... From 7bb70e10fa25d61c6328f6fecff41be647de7cf3 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:36:46 +0200 Subject: [PATCH 0105/1018] TYP: Simplified ``dtype.__new__`` overloads (#29168) --- numpy/__init__.pyi | 271 ++++++++++++----------- numpy/typing/tests/data/reveal/dtype.pyi | 19 +- 2 files changed, 151 insertions(+), 139 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ae26ac2f19f8..0801e97b7061 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -866,8 +866,6 @@ _SignedIntegerCType: TypeAlias = type[ ] # fmt: skip _FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] _IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType -_NumberCType: TypeAlias = _IntegerCType -_GenericCType: TypeAlias = _NumberCType | type[ct.c_bool | ct.c_char | ct.py_object[Any]] # some commonly used builtin types that are known to result in a # `dtype[object_]`, when their *type* is passed to the `dtype` constructor @@ -1067,10 +1065,6 @@ class _SupportsFileMethods(SupportsFlush, Protocol): @type_check_only class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... -@type_check_only -class _SupportsItem(Protocol[_T_co]): - def item(self, /) -> _T_co: ... - @type_check_only class _SupportsDLPack(Protocol[_T_contra]): def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... @@ -1177,7 +1171,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[float64] | None, + dtype: type[float64 | ct.c_double] | _Float64Codes | _DoubleCodes | None, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ... @@ -1207,36 +1201,31 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[builtins.bool | np.bool], + dtype: type[builtins.bool | np.bool | ct.c_bool] | _BoolCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[np.bool]: ... - # NOTE: `_: type[int]` also accepts `type[int | bool]` @overload def __new__( cls, - dtype: type[int | int_ | np.bool], + dtype: type[int], # also accepts `type[builtins.bool]` align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[int_ | np.bool]: ... - # NOTE: `_: type[float]` also accepts `type[float | int | bool]` - # NOTE: `float64` inherits from `float` at runtime; but this isn't - # reflected in these stubs. So an explicit `float64` is required here. @overload def __new__( cls, - dtype: type[float | float64 | int_ | np.bool] | None, + dtype: type[float], # also accepts `type[int | bool]` align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[float64 | int_ | np.bool]: ... - # NOTE: `_: type[complex]` also accepts `type[complex | float | int | bool]` @overload def __new__( cls, - dtype: type[complex | complex128 | float64 | int_ | np.bool], + dtype: type[complex], # also accepts `type[float | int | bool]` align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1244,7 +1233,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[bytes], # also includes `type[bytes_]` + dtype: type[bytes | ct.c_char] | _BytesCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1252,7 +1241,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[str], # also includes `type[str_]` + dtype: type[str] | _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1266,7 +1255,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[memoryview | void], + dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1276,127 +1265,182 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[_BuiltinObjectLike | object_], + dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[object_]: ... - # Unions of builtins. + # `unsignedinteger` string-based representations and ctypes @overload def __new__( cls, - dtype: type[bytes | str], + dtype: _UInt8Codes | _UByteCodes | type[ct.c_uint8 | ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[character]: ... + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint8]: ... @overload def __new__( cls, - dtype: type[bytes | str | memoryview], + dtype: _UInt16Codes | _UShortCodes | type[ct.c_uint16 | ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[flexible]: ... + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint16]: ... @overload def __new__( cls, - dtype: type[complex | bytes | str | memoryview | _BuiltinObjectLike], + dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[np.bool | int_ | float64 | complex128 | flexible | object_]: ... - - # `unsignedinteger` string-based representations and ctypes - @overload - def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint8]: ... - @overload - def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint16]: ... - @overload - def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint32]: ... - @overload - def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint64]: ... - @overload - def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ubyte]: ... - @overload - def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ushort]: ... - @overload - def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintc]: ... - # NOTE: We're assuming here that `uint_ptr_t == size_t`, - # an assumption that does not hold in rare cases (same for `ssize_t`) + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint32]: ... @overload - def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintp]: ... + def __new__( + cls, + dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint64]: ... @overload - def __new__(cls, dtype: _ULongCodes | type[ct.c_ulong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulong]: ... + def __new__( + cls, + dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uintp]: ... @overload - def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulonglong]: ... + def __new__( + cls, + dtype: _ULongCodes | type[ct.c_ulong], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[ulong]: ... # `signedinteger` string-based representations and ctypes @overload - def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int8]: ... - @overload - def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int16]: ... - @overload - def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int32]: ... - @overload - def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int64]: ... - @overload - def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[byte]: ... + def __new__( + cls, + dtype: _Int8Codes | _ByteCodes | type[ct.c_int8 | ct.c_byte], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int8]: ... @overload - def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[short]: ... + def __new__( + cls, + dtype: _Int16Codes | _ShortCodes | type[ct.c_int16 | ct.c_short], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int16]: ... @overload - def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intc]: ... + def __new__( + cls, + dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int32]: ... @overload - def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intp]: ... + def __new__( + cls, + dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int64]: ... @overload - def __new__(cls, dtype: _LongCodes | type[ct.c_long], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[long]: ... + def __new__( + cls, + dtype: _IntPCodes | type[intp | ct.c_ssize_t], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[intp]: ... @overload - def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longlong]: ... + def __new__( + cls, + dtype: _LongCodes | type[ct.c_long], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[long]: ... # `floating` string-based representations and ctypes @overload - def __new__(cls, dtype: _Float16Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float16]: ... - @overload - def __new__(cls, dtype: _Float32Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float32]: ... - @overload - def __new__(cls, dtype: _Float64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... - @overload - def __new__(cls, dtype: _HalfCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[half]: ... - @overload - def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[single]: ... + def __new__( + cls, + dtype: _Float16Codes | _HalfCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float16]: ... @overload - def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[double]: ... + def __new__( + cls, + dtype: _Float32Codes | _SingleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float32]: ... + # float64 codes are covered by overload 1 @overload - def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longdouble]: ... + def __new__( + cls, + dtype: _LongDoubleCodes | type[ct.c_longdouble], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[longdouble]: ... # `complexfloating` string-based representations @overload - def __new__(cls, dtype: _Complex64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex64]: ... - @overload - def __new__(cls, dtype: _Complex128Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... - @overload - def __new__(cls, dtype: _CSingleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[csingle]: ... + def __new__( + cls, + dtype: _Complex64Codes | _CSingleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex64]: ... @overload - def __new__(cls, dtype: _CDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[cdouble]: ... + def __new__( + cls, + dtype: _Complex128Codes | _CDoubleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex128]: ... @overload - def __new__(cls, dtype: _CLongDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[clongdouble]: ... + def __new__( + cls, + dtype: _CLongDoubleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[clongdouble]: ... # Miscellaneous string-based representations and ctypes @overload - def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ... - @overload - def __new__(cls, dtype: _TD64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[timedelta64]: ... - @overload - def __new__(cls, dtype: _DT64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[datetime64]: ... - @overload - def __new__(cls, dtype: _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... - @overload - def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... - @overload - def __new__(cls, dtype: _VoidCodes | _VoidDTypeLike, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ... + def __new__( + cls, + dtype: _TD64Codes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[timedelta64]: ... @overload - def __new__(cls, dtype: _ObjectCodes | type[ct.py_object[Any]], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[object_]: ... + def __new__( + cls, + dtype: _DT64Codes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[datetime64]: ... # `StringDType` requires special treatment because it has no scalar type @overload @@ -1460,35 +1504,11 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: _NumberCodes | _NumberCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[number]: ... - @overload - def __new__( - cls, - dtype: _CharacterCodes | type[ct.c_char], + dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[character]: ... - @overload - def __new__( - cls, - dtype: _FlexibleCodes | type[ct.c_char], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[flexible]: ... - @overload - def __new__( - cls, - dtype: _GenericCodes | _GenericCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[generic]: ... # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... @overload @@ -1501,10 +1521,11 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): ) -> dtype: ... # Catch-all overload for object-likes - # NOTE: `object_ | Any` is *not* equivalent to `Any` -- it describes some - # (static) type `T` s.t. `object_ <: T <: builtins.object` (`<:` denotes - # the subtyping relation, the (gradual) typing analogue of `issubclass()`). - # https://typing.readthedocs.io/en/latest/spec/concepts.html#union-types + # NOTE: `object_ | Any` is NOT equivalent to `Any`. It is specified to behave + # like a "sum type" (a.k.a. variant type, discriminated union, or tagged union). + # So the union of a type and `Any` is not the same "union type" that all other + # unions are (by definition). + # https://typing.python.org/en/latest/spec/concepts.html#union-types @overload def __new__( cls, diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 721d2708737f..1794c944b3ae 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -14,12 +14,8 @@ dtype_U: np.dtype[np.str_] dtype_V: np.dtype[np.void] dtype_i8: np.dtype[np.int64] -py_int_co: type[int] -py_float_co: type[float] -py_complex_co: type[complex] py_object: type[_PyObjectLike] py_character: type[str | bytes] -py_flexible: type[str | bytes | memoryview] ct_floating: type[ct.c_float | ct.c_double | ct.c_longdouble] ct_number: type[ct.c_uint8 | ct.c_float] @@ -48,19 +44,16 @@ assert_type(np.dtype("str"), np.dtype[np.str_]) # Python types assert_type(np.dtype(bool), np.dtype[np.bool]) -assert_type(np.dtype(py_int_co), np.dtype[np.int_ | np.bool]) assert_type(np.dtype(int), np.dtype[np.int_ | np.bool]) -assert_type(np.dtype(py_float_co), np.dtype[np.float64 | np.int_ | np.bool]) assert_type(np.dtype(float), np.dtype[np.float64 | np.int_ | np.bool]) -assert_type(np.dtype(py_complex_co), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) assert_type(np.dtype(complex), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) assert_type(np.dtype(py_object), np.dtype[np.object_]) assert_type(np.dtype(str), np.dtype[np.str_]) assert_type(np.dtype(bytes), np.dtype[np.bytes_]) -assert_type(np.dtype(py_character), np.dtype[np.character]) assert_type(np.dtype(memoryview), np.dtype[np.void]) -assert_type(np.dtype(py_flexible), np.dtype[np.flexible]) +assert_type(np.dtype(py_character), np.dtype[np.character]) +# object types assert_type(np.dtype(list), np.dtype[np.object_]) assert_type(np.dtype(dt.datetime), np.dtype[np.object_]) assert_type(np.dtype(dt.timedelta), np.dtype[np.object_]) @@ -75,12 +68,9 @@ assert_type(np.dtype("l"), np.dtype[np.long]) assert_type(np.dtype("longlong"), np.dtype[np.longlong]) assert_type(np.dtype(">g"), np.dtype[np.longdouble]) assert_type(np.dtype(cs_integer), np.dtype[np.integer]) -assert_type(np.dtype(cs_number), np.dtype[np.number]) -assert_type(np.dtype(cs_flex), np.dtype[np.flexible]) -assert_type(np.dtype(cs_generic), np.dtype[np.generic]) # ctypes -assert_type(np.dtype(ct.c_double), np.dtype[np.double]) +assert_type(np.dtype(ct.c_double), np.dtype[np.float64]) # see numpy/numpy#29155 assert_type(np.dtype(ct.c_longlong), np.dtype[np.longlong]) assert_type(np.dtype(ct.c_uint32), np.dtype[np.uint32]) assert_type(np.dtype(ct.c_bool), np.dtype[np.bool]) @@ -90,7 +80,7 @@ assert_type(np.dtype(ct.py_object), np.dtype[np.object_]) # Special case for None assert_type(np.dtype(None), np.dtype[np.float64]) -# Dypes of dtypes +# dtypes of dtypes assert_type(np.dtype(np.dtype(np.float64)), np.dtype[np.float64]) assert_type(np.dtype(dt_inexact), np.dtype[np.inexact]) @@ -99,6 +89,7 @@ assert_type(np.dtype("S8"), np.dtype) # Void assert_type(np.dtype(("U", 10)), np.dtype[np.void]) +assert_type(np.dtype({"formats": (int, "u8"), "names": ("n", "B")}), np.dtype[np.void]) # StringDType assert_type(np.dtype(dt_string), StringDType) From ceb858fe26aa39ddc973539d0271886e336198f1 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:44:30 +0200 Subject: [PATCH 0106/1018] TYP: fix and improve ``numpy.lib._utils_impl`` (#29181) --- numpy/lib/_utils_impl.pyi | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index 00ed47c9fb67..6fbd26f88084 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -1,10 +1,17 @@ +from typing import LiteralString + from _typeshed import SupportsWrite +from typing_extensions import TypeVar -from numpy._typing import DTypeLike +import numpy as np __all__ = ["get_include", "info", "show_runtime"] -def get_include() -> str: ... +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) + +def get_include() -> LiteralString: ... def show_runtime() -> None: ... -def info(object: object = ..., maxwidth: int = ..., output: SupportsWrite[str] | None = ..., toplevel: str = ...) -> None: ... -def drop_metadata(dtype: DTypeLike, /) -> DTypeLike: ... +def info( + object: object = None, maxwidth: int = 76, output: SupportsWrite[str] | None = None, toplevel: str = "numpy" +) -> None: ... +def drop_metadata(dtype: _DTypeT, /) -> _DTypeT: ... From ea91f1847bc82cd7379102571bdb02bb0cd396bc Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:47:23 +0200 Subject: [PATCH 0107/1018] TYP: ``double = float64`` and ``cdouble = complex128`` (#29155) --- numpy/__init__.pyi | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0801e97b7061..d71a76569ad7 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4236,9 +4236,9 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] -half: TypeAlias = floating[_NBitHalf] -single: TypeAlias = floating[_NBitSingle] -double: TypeAlias = floating[_NBitDouble] +half: TypeAlias = float16 +single: TypeAlias = float32 +double: TypeAlias = float64 longdouble: TypeAlias = floating[_NBitLongDouble] # The main reason for `complexfloating` having two typevars is cosmetic. @@ -4336,7 +4336,7 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): self, other: number[_NBit], mod: None = None, / ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... -complex64: TypeAlias = complexfloating[_32Bit, _32Bit] +complex64: TypeAlias = complexfloating[_32Bit] class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc] @overload @@ -4397,9 +4397,9 @@ class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... -csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] -cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] -clongdouble: TypeAlias = complexfloating[_NBitLongDouble, _NBitLongDouble] +csingle: TypeAlias = complex64 +cdouble: TypeAlias = complex128 +clongdouble: TypeAlias = complexfloating[_NBitLongDouble] class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): @property From 18cbd6d1303b6c445ba451a707017be4723d28d0 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 21:25:53 +0200 Subject: [PATCH 0108/1018] TYP: Fix missing ``_core.numeric`` (re-)exports (#29166) * TYP: Sync `_core/numerictypes.pyi` with NumType's `numpy-stubs` * TYP: Fix missing ``_core.numeric`` (re-)exports * TYP: appease ``ruff`` --- numpy/_core/numeric.pyi | 784 +++++++++++++++++++++++++++-------- numpy/_core/numerictypes.pyi | 87 ++-- 2 files changed, 661 insertions(+), 210 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 919fe1917197..b54fa856b007 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1,3 +1,4 @@ +from builtins import bool as py_bool from collections.abc import Callable, Sequence from typing import ( Any, @@ -20,32 +21,12 @@ from numpy import ( True_, _OrderCF, _OrderKACF, - # re-exports bitwise_not, - broadcast, - complexfloating, - dtype, - flatiter, - float64, - floating, - from_dlpack, - # other - generic, inf, - int_, - intp, little_endian, - matmul, nan, - ndarray, - nditer, newaxis, - object_, - signedinteger, - timedelta64, ufunc, - unsignedinteger, - vecdot, ) from numpy._typing import ( ArrayLike, @@ -67,38 +48,113 @@ from numpy._typing import ( _SupportsArrayFunc, _SupportsDType, ) +from numpy.lib._array_utils_impl import normalize_axis_tuple as normalize_axis_tuple -from .fromnumeric import all as all -from .fromnumeric import any as any -from .fromnumeric import argpartition as argpartition -from .fromnumeric import matrix_transpose as matrix_transpose -from .fromnumeric import mean as mean +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + var, +) +from .multiarray import ALLOW_THREADS as ALLOW_THREADS +from .multiarray import BUFSIZE as BUFSIZE +from .multiarray import CLIP as CLIP +from .multiarray import MAXDIMS as MAXDIMS +from .multiarray import MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS +from .multiarray import MAY_SHARE_EXACT as MAY_SHARE_EXACT +from .multiarray import RAISE as RAISE +from .multiarray import WRAP as WRAP from .multiarray import ( - # other _Array, _ConstructorEmpty, _KwargsEmpty, - # re-exports arange, array, asanyarray, asarray, ascontiguousarray, asfortranarray, + broadcast, can_cast, concatenate, copyto, dot, + dtype, empty, empty_like, + flatiter, + from_dlpack, frombuffer, fromfile, fromiter, fromstring, inner, lexsort, + matmul, may_share_memory, min_scalar_type, + ndarray, + nditer, nested_iters, promote_types, putmask, @@ -108,85 +164,473 @@ from .multiarray import ( where, zeros, ) +from .multiarray import normalize_axis_index as normalize_axis_index +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .umath import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecdot, + vecmat, +) __all__ = [ - "newaxis", - "ndarray", - "flatiter", - "nditer", - "nested_iters", - "ufunc", + "False_", + "ScalarType", + "True_", + "absolute", + "add", + "all", + "allclose", + "amax", + "amin", + "any", "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", "array", - "asarray", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", "asanyarray", + "asarray", "ascontiguousarray", "asfortranarray", - "zeros", - "count_nonzero", - "empty", - "broadcast", - "dtype", - "fromstring", - "fromfile", - "frombuffer", - "from_dlpack", - "where", - "argwhere", - "copyto", - "concatenate", - "lexsort", "astype", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_not", + "bitwise_or", + "bitwise_xor", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", "can_cast", - "promote_types", - "min_scalar_type", - "result_type", - "isfortran", - "empty_like", - "zeros_like", - "ones_like", - "correlate", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concatenate", + "conj", + "conjugate", "convolve", - "inner", - "dot", - "outer", - "vdot", - "roll", - "rollaxis", - "moveaxis", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", "cross", - "tensordot", - "little_endian", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", "fromiter", - "array_equal", - "array_equiv", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hypot", + "identity", "indices", - "fromfunction", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", "isscalar", - "binary_repr", - "base_repr", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", "ones", - "identity", - "allclose", + "ones_like", + "outer", + "partition", + "pi", + "positive", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", "putmask", - "flatnonzero", - "inf", - "nan", - "False_", - "True_", - "bitwise_not", - "full", - "full_like", - "matmul", - "vecdot", + "rad2deg", + "radians", + "ravel", + "reciprocal", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", "shares_memory", - "may_share_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "where", + "zeros", + "zeros_like", ] _T = TypeVar("_T") _ScalarT = TypeVar("_ScalarT", bound=generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_DTypeT = TypeVar("_DTypeT", bound=dtype) _ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) _ShapeT = TypeVar("_ShapeT", bound=_Shape) _AnyShapeT = TypeVar( @@ -201,88 +645,90 @@ _AnyShapeT = TypeVar( _CorrelateMode: TypeAlias = L["valid", "same", "full"] +# keep in sync with `ones_like` @overload def zeros_like( a: _ArrayT, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def zeros_like( a: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def zeros_like( - a: Any, + a: object, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def zeros_like( - a: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... ones: Final[_ConstructorEmpty] +# keep in sync with `zeros_like` @overload def ones_like( a: _ArrayT, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def ones_like( a: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def ones_like( - a: Any, + a: object, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def ones_like( - a: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... # TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview @@ -389,46 +835,46 @@ def full( @overload def full_like( a: _ArrayT, - fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def full_like( a: _ArrayLike[_ScalarT], - fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def full_like( - a: Any, - fill_value: Any, + a: object, + fill_value: object, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def full_like( - a: Any, - fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + fill_value: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... # @@ -441,10 +887,10 @@ def count_nonzero( a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True] ) -> NDArray[np.intp]: ... @overload -def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: bool = False) -> Any: ... +def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: py_bool = False) -> Any: ... # -def isfortran(a: NDArray[Any] | generic) -> bool: ... +def isfortran(a: NDArray[Any] | generic) -> py_bool: ... def argwhere(a: ArrayLike) -> NDArray[intp]: ... @@ -836,31 +1282,31 @@ def identity( def allclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., -) -> bool: ... + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> py_bool: ... @overload def isclose( a: _ScalarLike_co, b: _ScalarLike_co, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, ) -> np.bool: ... @overload def isclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, ) -> NDArray[np.bool]: ... -def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ... +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: py_bool = False) -> py_bool: ... -def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... +def array_equiv(a1: ArrayLike, a2: ArrayLike) -> py_bool: ... @overload def astype( @@ -868,8 +1314,8 @@ def astype( dtype: _DTypeLike[_ScalarT], /, *, - copy: bool = ..., - device: L["cpu"] | None = ..., + copy: py_bool = True, + device: L["cpu"] | None = None, ) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... @overload def astype( @@ -877,6 +1323,6 @@ def astype( dtype: DTypeLike, /, *, - copy: bool = ..., - device: L["cpu"] | None = ..., + copy: py_bool = True, + device: L["cpu"] | None = None, ) -> ndarray[_ShapeT, dtype]: ... diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 753fe34800d5..b649b8f91cd1 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -1,5 +1,5 @@ -import builtins -from typing import Any, TypedDict, type_check_only +from builtins import bool as py_bool +from typing import Final, TypedDict, type_check_only from typing import Literal as L import numpy as np @@ -13,6 +13,8 @@ from numpy import ( clongdouble, complex64, complex128, + complex192, + complex256, complexfloating, csingle, datetime64, @@ -22,6 +24,8 @@ from numpy import ( float16, float32, float64, + float96, + float128, floating, generic, half, @@ -59,9 +63,8 @@ from numpy import ( void, ) from numpy._typing import DTypeLike -from numpy._typing._extended_precision import complex192, complex256, float96, float128 -from ._type_aliases import sctypeDict # noqa: F401 +from ._type_aliases import sctypeDict as sctypeDict from .multiarray import ( busday_count, busday_offset, @@ -152,41 +155,43 @@ class _TypeCodes(TypedDict): Datetime: L['Mm'] All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] -def isdtype(dtype: dtype | type[Any], kind: DTypeLike | tuple[DTypeLike, ...]) -> builtins.bool: ... +def isdtype(dtype: dtype | type, kind: DTypeLike | tuple[DTypeLike, ...]) -> py_bool: ... +def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> py_bool: ... -def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> builtins.bool: ... - -typecodes: _TypeCodes -ScalarType: tuple[ - type[int], - type[float], - type[complex], - type[builtins.bool], - type[bytes], - type[str], - type[memoryview], - type[np.bool], - type[csingle], - type[cdouble], - type[clongdouble], - type[half], - type[single], - type[double], - type[longdouble], - type[byte], - type[short], - type[intc], - type[long], - type[longlong], - type[timedelta64], - type[datetime64], - type[object_], - type[bytes_], - type[str_], - type[ubyte], - type[ushort], - type[uintc], - type[ulong], - type[ulonglong], - type[void], -] +typecodes: Final[_TypeCodes] = ... +ScalarType: Final[ + tuple[ + type[int], + type[float], + type[complex], + type[py_bool], + type[bytes], + type[str], + type[memoryview], + type[np.bool], + type[csingle], + type[cdouble], + type[clongdouble], + type[half], + type[single], + type[double], + type[longdouble], + type[byte], + type[short], + type[intc], + type[long], + type[longlong], + type[timedelta64], + type[datetime64], + type[object_], + type[bytes_], + type[str_], + type[ubyte], + type[ushort], + type[uintc], + type[ulong], + type[ulonglong], + type[void], + ] +] = ... +typeDict: Final = sctypeDict From 8ce860ffaeecf2a8d3939a6201994a44e58584e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Jun 2025 13:56:41 -0600 Subject: [PATCH 0109/1018] MAINT: Bump pypa/cibuildwheel from 3.0.0b4 to 3.0.0 (#29180) Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.0.0b4 to 3.0.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971...5f22145df44122af0f5a201f93cf0207171beca7) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.0.0 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 86628f6882cd..453a67088adf 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971 # v3.0.0b4 + - uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index f74be5f4a455..68352eb1fc7c 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -177,7 +177,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971 # v3.0.0b4 + uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From a3ec9af71069a88d511c71d56993b4cc24f22bdd Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 22:29:37 +0200 Subject: [PATCH 0110/1018] CI: Bump `array-api-tests` to `v2025.05.23` (#29149) * CI: Bump `array-api-tests` to `v2025.05.23` * TST: xfail new array-api test failures * TST: increase ``--max-examples`` from 100 to 500 for ``array-api-tests`` * CI: apply review suggestions Co-authored-by: Evgeni Burovski --------- Co-authored-by: Evgeni Burovski --- .github/workflows/linux.yml | 10 +++++----- tools/ci/array-api-xfails.txt | 15 +++++++++++++++ 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index a0e549d86775..3452724841c3 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -156,7 +156,7 @@ jobs: # TODO: gcov env: PYTHONOPTIMIZE: 2 - + aarch64_test: needs: [smoke_test] @@ -204,7 +204,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - + - name: Creates new container run: | docker run --name the_container --interactive \ @@ -221,7 +221,7 @@ jobs: docker run --rm -e "TERM=xterm-256color" \ -v $(pwd):/numpy the_container \ /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build + cd /numpy && spin build '" - name: Meson Log @@ -324,7 +324,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: repository: data-apis/array-api-tests - ref: 'c48410f96fc58e02eea844e6b7f6cc01680f77ce' # Latest commit as of 2025-04-01 + ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 submodules: 'true' path: 'array-api-tests' persist-credentials: false @@ -346,7 +346,7 @@ jobs: PYTHONWARNINGS: 'ignore::UserWarning::,ignore::DeprecationWarning::,ignore::RuntimeWarning::' run: | cd ${GITHUB_WORKSPACE}/array-api-tests - pytest array_api_tests -v -c pytest.ini --ci --max-examples=100 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt + pytest array_api_tests -v -c pytest.ini -n 4 --max-examples=1000 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt custom_checks: needs: [smoke_test] diff --git a/tools/ci/array-api-xfails.txt b/tools/ci/array-api-xfails.txt index 98c3895ced06..8370099015c5 100644 --- a/tools/ci/array-api-xfails.txt +++ b/tools/ci/array-api-xfails.txt @@ -1,5 +1,20 @@ # finfo return type misalignment array_api_tests/test_data_type_functions.py::test_finfo[float32] +array_api_tests/test_data_type_functions.py::test_finfo[complex64] + +# finfo: data type not inexact +array_api_tests/test_data_type_functions.py::test_finfo[float64] +array_api_tests/test_data_type_functions.py::test_finfo[complex128] + +# iinfo: Invalid integer data type 'O' +array_api_tests/test_data_type_functions.py::test_iinfo[int8] +array_api_tests/test_data_type_functions.py::test_iinfo[uint8] +array_api_tests/test_data_type_functions.py::test_iinfo[int16] +array_api_tests/test_data_type_functions.py::test_iinfo[uint16] +array_api_tests/test_data_type_functions.py::test_iinfo[int32] +array_api_tests/test_data_type_functions.py::test_iinfo[uint32] +array_api_tests/test_data_type_functions.py::test_iinfo[int64] +array_api_tests/test_data_type_functions.py::test_iinfo[uint64] # 'shape' arg is present. 'newshape' is retained for backward compat. array_api_tests/test_signatures.py::test_func_signature[reshape] From 3d1cdb476d12402f28548a32db3f35c4212bf3e2 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 22:53:25 +0200 Subject: [PATCH 0111/1018] TYP: fix ``ravel_multi_index`` false rejections (#29184) --- numpy/_core/multiarray.pyi | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index e4f869b9beae..13a3f0077ce0 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -499,34 +499,28 @@ def array( like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... +# @overload -def unravel_index( # type: ignore[misc] - indices: _IntLike_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[intp, ...]: ... -@overload -def unravel_index( - indices: _ArrayLikeInt_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[NDArray[intp], ...]: ... - -@overload -def ravel_multi_index( # type: ignore[misc] - multi_index: Sequence[_IntLike_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., +def ravel_multi_index( + multi_index: SupportsLenAndGetItem[_IntLike_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> intp: ... @overload def ravel_multi_index( - multi_index: Sequence[_ArrayLikeInt_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., + multi_index: SupportsLenAndGetItem[_ArrayLikeInt_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> NDArray[intp]: ... +# +@overload +def unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[intp, ...]: ... +@overload +def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[NDArray[intp], ...]: ... + # NOTE: Allow any sequence of array-like objects @overload def concatenate( # type: ignore[misc] From 2c190768902c4dcf6454bc41df1b0e378099d42c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 23:07:11 +0200 Subject: [PATCH 0112/1018] STY: ruff/isort config tweaks (#29183) * DEV: import-related ruff config tweaks * STY: run ``ruff check --fix`` to fix the new ``I001`` errors --- numpy/_core/_dtype.pyi | 1 - numpy/_core/_internal.pyi | 1 - numpy/_core/_ufunc_config.pyi | 3 +-- numpy/_core/arrayprint.pyi | 1 - numpy/_core/defchararray.pyi | 1 - numpy/_core/fromnumeric.pyi | 3 +-- numpy/_core/function_base.pyi | 3 +-- numpy/_core/multiarray.pyi | 3 +-- numpy/_core/records.pyi | 3 +-- numpy/_core/tests/test_api.py | 2 +- numpy/_core/tests/test_argparse.py | 4 ++-- numpy/_core/tests/test_array_coercion.py | 4 ++-- numpy/_core/tests/test_arraymethod.py | 2 +- numpy/_core/tests/test_casting_unittests.py | 2 +- numpy/_core/tests/test_conversion_utils.py | 2 +- numpy/_core/tests/test_cpu_dispatcher.py | 3 +-- numpy/_core/tests/test_cpu_features.py | 1 + numpy/_core/tests/test_custom_dtypes.py | 4 ++-- numpy/_core/tests/test_deprecations.py | 4 ++-- numpy/_core/tests/test_dtype.py | 4 ++-- numpy/_core/tests/test_extint128.py | 2 +- numpy/_core/tests/test_hashtable.py | 1 + numpy/_core/tests/test_indexing.py | 2 +- numpy/_core/tests/test_mem_overlap.py | 2 +- numpy/_core/tests/test_multiarray.py | 4 ++-- numpy/_core/tests/test_nditer.py | 2 +- numpy/_core/tests/test_numeric.py | 2 +- numpy/_core/tests/test_scalarbuffer.py | 4 ++-- numpy/_core/tests/test_scalarmath.py | 2 +- numpy/_core/tests/test_simd.py | 2 +- numpy/_core/tests/test_ufunc.py | 6 +++--- numpy/_core/tests/test_umath_accuracy.py | 2 +- numpy/_core/tests/test_umath_complex.py | 5 +++-- numpy/_typing/_nbit_base.pyi | 1 - numpy/_utils/__init__.pyi | 3 +-- numpy/_utils/_inspect.pyi | 3 +-- numpy/_utils/_pep440.pyi | 1 - numpy/ctypeslib/_ctypeslib.pyi | 3 +-- numpy/dtypes.pyi | 1 - numpy/f2py/_backends/_meson.pyi | 1 - numpy/f2py/_src_pyf.pyi | 3 +-- numpy/f2py/auxfuncs.pyi | 3 +-- numpy/f2py/crackfortran.pyi | 3 +-- numpy/f2py/f2py2e.pyi | 1 - numpy/f2py/rules.pyi | 1 - numpy/f2py/symbolic.pyi | 1 - numpy/fft/helper.pyi | 1 - numpy/lib/_arraysetops_impl.pyi | 1 - numpy/lib/_arrayterator_impl.pyi | 1 - numpy/lib/_datasource.pyi | 3 +-- numpy/lib/_format_impl.pyi | 3 +-- numpy/lib/_function_base_impl.pyi | 3 +-- numpy/lib/_index_tricks_impl.pyi | 3 +-- numpy/lib/_npyio_impl.pyi | 15 +++++++-------- numpy/lib/_shape_base_impl.pyi | 1 - numpy/lib/_type_check_impl.pyi | 3 +-- numpy/lib/_user_array_impl.pyi | 3 +-- numpy/lib/_utils_impl.pyi | 3 +-- numpy/lib/recfunctions.pyi | 3 +-- numpy/ma/core.pyi | 3 +-- numpy/polynomial/_polybase.pyi | 1 - numpy/random/bit_generator.pyi | 3 +-- numpy/testing/_private/utils.pyi | 5 ++--- numpy/testing/overrides.pyi | 1 - numpy/testing/print_coercion_tables.pyi | 1 - ruff.toml | 15 +++++++++++++++ 66 files changed, 80 insertions(+), 104 deletions(-) diff --git a/numpy/_core/_dtype.pyi b/numpy/_core/_dtype.pyi index 6cdd77b22e07..adb38583783f 100644 --- a/numpy/_core/_dtype.pyi +++ b/numpy/_core/_dtype.pyi @@ -1,6 +1,5 @@ from typing import Final, TypeAlias, TypedDict, overload, type_check_only from typing import Literal as L - from typing_extensions import ReadOnly, TypeVar import numpy as np diff --git a/numpy/_core/_internal.pyi b/numpy/_core/_internal.pyi index 3038297b6328..04c26ca284db 100644 --- a/numpy/_core/_internal.pyi +++ b/numpy/_core/_internal.pyi @@ -2,7 +2,6 @@ import ctypes as ct import re from collections.abc import Callable, Iterable from typing import Any, Final, Generic, Self, overload - from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/_core/_ufunc_config.pyi b/numpy/_core/_ufunc_config.pyi index 1a6613154072..86df9827d652 100644 --- a/numpy/_core/_ufunc_config.pyi +++ b/numpy/_core/_ufunc_config.pyi @@ -1,8 +1,7 @@ +from _typeshed import SupportsWrite from collections.abc import Callable from typing import Any, Literal, TypeAlias, TypedDict, type_check_only -from _typeshed import SupportsWrite - from numpy import errstate as errstate _ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index fec03a6f265c..967cc09e6a25 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -13,7 +13,6 @@ from typing import ( overload, type_check_only, ) - from typing_extensions import deprecated import numpy as np diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 43005745bfab..26a5af432824 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1,6 +1,5 @@ from typing import Any, Self, SupportsIndex, SupportsInt, TypeAlias, overload from typing import Literal as L - from typing_extensions import TypeVar import numpy as np diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index f0f83093c3b1..2aedc727e6dc 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1,4 +1,5 @@ # ruff: noqa: ANN401 +from _typeshed import Incomplete from collections.abc import Sequence from typing import ( Any, @@ -11,8 +12,6 @@ from typing import ( overload, type_check_only, ) - -from _typeshed import Incomplete from typing_extensions import deprecated import numpy as np diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 44d1311f5b44..600265b1fd0a 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,8 +1,7 @@ +from _typeshed import Incomplete from typing import Literal as L from typing import SupportsIndex, TypeAlias, TypeVar, overload -from _typeshed import Incomplete - import numpy as np from numpy._typing import ( DTypeLike, diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 13a3f0077ce0..91ff666688bb 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1,5 +1,6 @@ # TODO: Sort out any and all missing functions in this namespace import datetime as dt +from _typeshed import StrOrBytesPath, SupportsLenAndGetItem from collections.abc import Callable, Iterable, Sequence from typing import ( Any, @@ -18,8 +19,6 @@ from typing import ( from typing import ( Literal as L, ) - -from _typeshed import StrOrBytesPath, SupportsLenAndGetItem from typing_extensions import CapsuleType import numpy as np diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 93177b2d3f75..ead165918478 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,5 +1,6 @@ # ruff: noqa: ANN401 # pyright: reportSelfClsParameterName=false +from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence from typing import ( Any, @@ -11,8 +12,6 @@ from typing import ( overload, type_check_only, ) - -from _typeshed import StrOrBytesPath from typing_extensions import TypeVar import numpy as np diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index 25990536809b..d427ac0399a2 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -1,10 +1,10 @@ import sys import pytest -from numpy._core._rational_tests import rational import numpy as np import numpy._core.umath as ncu +from numpy._core._rational_tests import rational from numpy.testing import ( HAS_REFCOUNT, assert_, diff --git a/numpy/_core/tests/test_argparse.py b/numpy/_core/tests/test_argparse.py index 7f949c1059eb..0c49ec00277e 100644 --- a/numpy/_core/tests/test_argparse.py +++ b/numpy/_core/tests/test_argparse.py @@ -14,14 +14,14 @@ def func(arg1, /, arg2, *, arg3): import threading import pytest + +import numpy as np from numpy._core._multiarray_tests import ( argparse_example_function as func, ) from numpy._core._multiarray_tests import ( threaded_argparse_example_function as thread_func, ) - -import numpy as np from numpy.testing import IS_WASM diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 883aee63ac3a..a3939daa8904 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -6,12 +6,12 @@ from itertools import permutations, product -import numpy._core._multiarray_umath as ncu import pytest -from numpy._core._rational_tests import rational from pytest import param import numpy as np +import numpy._core._multiarray_umath as ncu +from numpy._core._rational_tests import rational from numpy.testing import IS_64BIT, IS_PYPY, assert_array_equal diff --git a/numpy/_core/tests/test_arraymethod.py b/numpy/_core/tests/test_arraymethod.py index d8baef7e7fbf..5b3d51585718 100644 --- a/numpy/_core/tests/test_arraymethod.py +++ b/numpy/_core/tests/test_arraymethod.py @@ -7,9 +7,9 @@ from typing import Any import pytest -from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl import numpy as np +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl class TestResolveDescriptors: diff --git a/numpy/_core/tests/test_casting_unittests.py b/numpy/_core/tests/test_casting_unittests.py index f8441ea9d0d7..91ecc0dc75b0 100644 --- a/numpy/_core/tests/test_casting_unittests.py +++ b/numpy/_core/tests/test_casting_unittests.py @@ -12,9 +12,9 @@ import textwrap import pytest -from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl import numpy as np +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl from numpy.lib.stride_tricks import as_strided from numpy.testing import assert_array_equal diff --git a/numpy/_core/tests/test_conversion_utils.py b/numpy/_core/tests/test_conversion_utils.py index 03ba33957821..d63ca9e58df5 100644 --- a/numpy/_core/tests/test_conversion_utils.py +++ b/numpy/_core/tests/test_conversion_utils.py @@ -3,9 +3,9 @@ """ import re -import numpy._core._multiarray_tests as mt import pytest +import numpy._core._multiarray_tests as mt from numpy._core.multiarray import CLIP, RAISE, WRAP from numpy.testing import assert_raises diff --git a/numpy/_core/tests/test_cpu_dispatcher.py b/numpy/_core/tests/test_cpu_dispatcher.py index 0a47685d0397..fc9d5e3147e0 100644 --- a/numpy/_core/tests/test_cpu_dispatcher.py +++ b/numpy/_core/tests/test_cpu_dispatcher.py @@ -1,10 +1,9 @@ +from numpy._core import _umath_tests from numpy._core._multiarray_umath import ( __cpu_baseline__, __cpu_dispatch__, __cpu_features__, ) - -from numpy._core import _umath_tests from numpy.testing import assert_equal diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index d1e3dc610d49..ecc806e9c0e5 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -6,6 +6,7 @@ import sys import pytest + from numpy._core._multiarray_umath import ( __cpu_baseline__, __cpu_dispatch__, diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index 66e6de35b427..3336286d8c98 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -1,12 +1,12 @@ from tempfile import NamedTemporaryFile import pytest + +import numpy as np from numpy._core._multiarray_umath import ( _discover_array_parameters as discover_array_params, ) from numpy._core._multiarray_umath import _get_sfloat_dtype - -import numpy as np from numpy.testing import assert_array_equal SF = _get_sfloat_dtype() diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index cb552357fc96..c4acbf9d2d69 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -6,11 +6,11 @@ import contextlib import warnings -import numpy._core._struct_ufunc_tests as struct_ufunc import pytest -from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 import numpy as np +import numpy._core._struct_ufunc_tests as struct_ufunc +from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 from numpy.testing import assert_raises, temppath diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 684672a9b71f..d9bd17c48434 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -11,11 +11,11 @@ import hypothesis import pytest from hypothesis.extra import numpy as hynp -from numpy._core._multiarray_tests import create_custom_field_dtype -from numpy._core._rational_tests import rational import numpy as np import numpy.dtypes +from numpy._core._multiarray_tests import create_custom_field_dtype +from numpy._core._rational_tests import rational from numpy.testing import ( HAS_REFCOUNT, IS_PYSTON, diff --git a/numpy/_core/tests/test_extint128.py b/numpy/_core/tests/test_extint128.py index 1a05151ac6be..6e4d74b81d39 100644 --- a/numpy/_core/tests/test_extint128.py +++ b/numpy/_core/tests/test_extint128.py @@ -2,10 +2,10 @@ import itertools import operator -import numpy._core._multiarray_tests as mt import pytest import numpy as np +import numpy._core._multiarray_tests as mt from numpy.testing import assert_equal, assert_raises INT64_MAX = np.iinfo(np.int64).max diff --git a/numpy/_core/tests/test_hashtable.py b/numpy/_core/tests/test_hashtable.py index 74be5219a287..25a7158aaf6f 100644 --- a/numpy/_core/tests/test_hashtable.py +++ b/numpy/_core/tests/test_hashtable.py @@ -1,6 +1,7 @@ import random import pytest + from numpy._core._multiarray_tests import identityhash_tester diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 757b8d72782f..81ba85ea4648 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -5,9 +5,9 @@ from itertools import product import pytest -from numpy._core._multiarray_tests import array_indexing import numpy as np +from numpy._core._multiarray_tests import array_indexing from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning from numpy.testing import ( HAS_REFCOUNT, diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index 78b943854679..240ea62850ee 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -1,10 +1,10 @@ import itertools import pytest -from numpy._core._multiarray_tests import internal_overlap, solve_diophantine import numpy as np from numpy._core import _umath_tests +from numpy._core._multiarray_tests import internal_overlap, solve_diophantine from numpy.lib.stride_tricks import as_strided from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 7603449ba28e..146431c7a7c0 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -21,11 +21,11 @@ from datetime import datetime, timedelta from decimal import Decimal -import numpy._core._multiarray_tests as _multiarray_tests import pytest -from numpy._core._rational_tests import rational import numpy as np +import numpy._core._multiarray_tests as _multiarray_tests +from numpy._core._rational_tests import rational from numpy._core.multiarray import _get_ndarray_c_version, dot from numpy._core.tests._locales import CommaDecimalPointLocale from numpy.exceptions import AxisError, ComplexWarning diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index ec28e48c5046..a29a49bfb71a 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -2,10 +2,10 @@ import sys import textwrap -import numpy._core._multiarray_tests as _multiarray_tests import pytest import numpy as np +import numpy._core._multiarray_tests as _multiarray_tests import numpy._core.umath as ncu from numpy import all, arange, array, nditer from numpy.testing import ( diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 8e786bf13d9e..8a72e4bfa65d 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -9,11 +9,11 @@ from hypothesis import given from hypothesis import strategies as st from hypothesis.extra import numpy as hynp -from numpy._core._rational_tests import rational import numpy as np from numpy import ma from numpy._core import sctypes +from numpy._core._rational_tests import rational from numpy._core.numerictypes import obj2sctype from numpy.exceptions import AxisError from numpy.random import rand, randint, randn diff --git a/numpy/_core/tests/test_scalarbuffer.py b/numpy/_core/tests/test_scalarbuffer.py index c957aec4f9b2..4d2744b85e53 100644 --- a/numpy/_core/tests/test_scalarbuffer.py +++ b/numpy/_core/tests/test_scalarbuffer.py @@ -2,10 +2,10 @@ Test scalar buffer interface adheres to PEP 3118 """ import pytest -from numpy._core._multiarray_tests import get_buffer_info -from numpy._core._rational_tests import rational import numpy as np +from numpy._core._multiarray_tests import get_buffer_info +from numpy._core._rational_tests import rational from numpy.testing import assert_, assert_equal, assert_raises # PEP3118 format strings for native (standard alignment and byteorder) types diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index fc37897bb7f7..746b410f79d2 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -9,9 +9,9 @@ from hypothesis import given, settings from hypothesis.extra import numpy as hynp from hypothesis.strategies import sampled_from -from numpy._core._rational_tests import rational import numpy as np +from numpy._core._rational_tests import rational from numpy._utils import _pep440 from numpy.exceptions import ComplexWarning from numpy.testing import ( diff --git a/numpy/_core/tests/test_simd.py b/numpy/_core/tests/test_simd.py index 697d89bcc26c..acea4315e679 100644 --- a/numpy/_core/tests/test_simd.py +++ b/numpy/_core/tests/test_simd.py @@ -6,8 +6,8 @@ import re import pytest -from numpy._core._multiarray_umath import __cpu_baseline__ +from numpy._core._multiarray_umath import __cpu_baseline__ from numpy._core._simd import clear_floatstatus, get_floatstatus, targets diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index f2b3f5a35a37..21ebc02c2625 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -4,13 +4,13 @@ import sys import warnings -import numpy._core._operand_flag_tests as opflag_tests -import numpy._core._rational_tests as _rational_tests -import numpy._core._umath_tests as umt import pytest from pytest import param import numpy as np +import numpy._core._operand_flag_tests as opflag_tests +import numpy._core._rational_tests as _rational_tests +import numpy._core._umath_tests as umt import numpy._core.umath as ncu import numpy.linalg._umath_linalg as uml from numpy.exceptions import AxisError diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index 5707e9279d5b..da9419d63a8a 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -4,9 +4,9 @@ from os import path import pytest -from numpy._core._multiarray_umath import __cpu_features__ import numpy as np +from numpy._core._multiarray_umath import __cpu_features__ from numpy.testing import assert_array_max_ulp from numpy.testing._private.utils import _glibc_older_than diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index a97af475def4..8f6f5c682a91 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -1,11 +1,12 @@ import platform import sys -# import the c-extension module directly since _arg is not exported via umath -import numpy._core._multiarray_umath as ncu import pytest import numpy as np + +# import the c-extension module directly since _arg is not exported via umath +import numpy._core._multiarray_umath as ncu from numpy.testing import ( assert_almost_equal, assert_array_equal, diff --git a/numpy/_typing/_nbit_base.pyi b/numpy/_typing/_nbit_base.pyi index ccf8f5ceac45..d88c9f4d9fd9 100644 --- a/numpy/_typing/_nbit_base.pyi +++ b/numpy/_typing/_nbit_base.pyi @@ -3,7 +3,6 @@ # mypy: disable-error-code=misc from typing import final - from typing_extensions import deprecated # Deprecated in NumPy 2.3, 2025-05-01 diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index f3472df9a554..2ed4e88b3e32 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -1,8 +1,7 @@ +from _typeshed import IdentityFunction from collections.abc import Callable, Iterable from typing import Protocol, TypeVar, overload, type_check_only -from _typeshed import IdentityFunction - from ._convertions import asbytes as asbytes from ._convertions import asunicode as asunicode diff --git a/numpy/_utils/_inspect.pyi b/numpy/_utils/_inspect.pyi index d53c3c40fcf5..40546d2f4497 100644 --- a/numpy/_utils/_inspect.pyi +++ b/numpy/_utils/_inspect.pyi @@ -1,8 +1,7 @@ import types +from _typeshed import SupportsLenAndGetItem from collections.abc import Callable, Mapping from typing import Any, Final, TypeAlias, TypeVar, overload - -from _typeshed import SupportsLenAndGetItem from typing_extensions import TypeIs __all__ = ["formatargspec", "getargspec"] diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi index 29dd4c912aa9..2c338d4e5b14 100644 --- a/numpy/_utils/_pep440.pyi +++ b/numpy/_utils/_pep440.pyi @@ -13,7 +13,6 @@ from typing import ( from typing import ( Literal as L, ) - from typing_extensions import TypeIs __all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index e26d6052eaae..aecb3899bdf5 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -1,6 +1,7 @@ # NOTE: Numpy's mypy plugin is used for importing the correct # platform-specific `ctypes._SimpleCData[int]` sub-type import ctypes +from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence from ctypes import c_int64 as _c_intp from typing import ( @@ -13,8 +14,6 @@ from typing import ( ) from typing import Literal as L -from _typeshed import StrOrBytesPath - import numpy as np from numpy import ( byte, diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 007dc643c0e3..f76b08fc28dc 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -12,7 +12,6 @@ from typing import ( type_check_only, ) from typing import Literal as L - from typing_extensions import TypeVar import numpy as np diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi index b9f959537214..67baf9b76845 100644 --- a/numpy/f2py/_backends/_meson.pyi +++ b/numpy/f2py/_backends/_meson.pyi @@ -2,7 +2,6 @@ from collections.abc import Callable from pathlib import Path from typing import Final from typing import Literal as L - from typing_extensions import override from ._backend import Backend diff --git a/numpy/f2py/_src_pyf.pyi b/numpy/f2py/_src_pyf.pyi index f5aecbf1decd..50ddd07bf638 100644 --- a/numpy/f2py/_src_pyf.pyi +++ b/numpy/f2py/_src_pyf.pyi @@ -1,9 +1,8 @@ import re +from _typeshed import StrOrBytesPath from collections.abc import Mapping from typing import Final -from _typeshed import StrOrBytesPath - routine_start_re: Final[re.Pattern[str]] = ... routine_end_re: Final[re.Pattern[str]] = ... function_start_re: Final[re.Pattern[str]] = ... diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index f2ff09faf33b..dfbae5c7d94d 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -1,10 +1,9 @@ +from _typeshed import FileDescriptorOrPath from collections.abc import Callable, Mapping from pprint import pprint as show from typing import Any, Final, Never, TypeAlias, TypeVar, overload from typing import Literal as L -from _typeshed import FileDescriptorOrPath - from .cfuncs import errmess __all__ = [ diff --git a/numpy/f2py/crackfortran.pyi b/numpy/f2py/crackfortran.pyi index 6b08f8784f01..c5f4fd7585ba 100644 --- a/numpy/f2py/crackfortran.pyi +++ b/numpy/f2py/crackfortran.pyi @@ -1,10 +1,9 @@ import re +from _typeshed import StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Mapping from typing import IO, Any, Concatenate, Final, Never, ParamSpec, TypeAlias, overload from typing import Literal as L -from _typeshed import StrOrBytesPath, StrPath - from .__version__ import version from .auxfuncs import isintent_dict as isintent_dict diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi index dd1d0c39e8a5..03aeffc5dcdd 100644 --- a/numpy/f2py/f2py2e.pyi +++ b/numpy/f2py/f2py2e.pyi @@ -3,7 +3,6 @@ import pprint from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence from types import ModuleType from typing import Any, Final, NotRequired, TypedDict, type_check_only - from typing_extensions import TypeVar, override from .__version__ import version diff --git a/numpy/f2py/rules.pyi b/numpy/f2py/rules.pyi index aa91e942698a..58614060ba87 100644 --- a/numpy/f2py/rules.pyi +++ b/numpy/f2py/rules.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable, Iterable, Mapping from typing import Any, Final, TypeAlias from typing import Literal as L - from typing_extensions import TypeVar from .__version__ import version diff --git a/numpy/f2py/symbolic.pyi b/numpy/f2py/symbolic.pyi index 74e7a48ab327..e7b14f751dc3 100644 --- a/numpy/f2py/symbolic.pyi +++ b/numpy/f2py/symbolic.pyi @@ -2,7 +2,6 @@ from collections.abc import Callable, Mapping from enum import Enum from typing import Any, Generic, ParamSpec, Self, TypeAlias, overload from typing import Literal as L - from typing_extensions import TypeVar __all__ = ["Expr"] diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi index 887cbe7e27c9..7cf391a12e1d 100644 --- a/numpy/fft/helper.pyi +++ b/numpy/fft/helper.pyi @@ -1,6 +1,5 @@ from typing import Any from typing import Literal as L - from typing_extensions import deprecated import numpy as np diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index a7ad5b9d91e7..4279b809f78e 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -1,6 +1,5 @@ from typing import Any, Generic, NamedTuple, SupportsIndex, TypeAlias, overload from typing import Literal as L - from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index e1a9e056a6e1..5fd589a3ac36 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -3,7 +3,6 @@ from collections.abc import Generator from types import EllipsisType from typing import Any, Final, TypeAlias, overload - from typing_extensions import TypeVar import numpy as np diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi index 9f91fdf893a0..ad52b7f67af0 100644 --- a/numpy/lib/_datasource.pyi +++ b/numpy/lib/_datasource.pyi @@ -1,8 +1,7 @@ +from _typeshed import OpenBinaryMode, OpenTextMode from pathlib import Path from typing import IO, Any, TypeAlias -from _typeshed import OpenBinaryMode, OpenTextMode - _Mode: TypeAlias = OpenBinaryMode | OpenTextMode ### diff --git a/numpy/lib/_format_impl.pyi b/numpy/lib/_format_impl.pyi index 870c2d761bb0..b45df02796d7 100644 --- a/numpy/lib/_format_impl.pyi +++ b/numpy/lib/_format_impl.pyi @@ -1,7 +1,6 @@ import os -from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard - from _typeshed import SupportsRead, SupportsWrite +from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard import numpy as np import numpy.typing as npt diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 090fb233dde1..cb6e18b53fa4 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,4 +1,5 @@ # ruff: noqa: ANN401 +from _typeshed import Incomplete from collections.abc import Callable, Iterable, Sequence from typing import ( Any, @@ -13,8 +14,6 @@ from typing import ( type_check_only, ) from typing import Literal as L - -from _typeshed import Incomplete from typing_extensions import TypeIs, deprecated import numpy as np diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 7ac2b3a093e0..c4509d9aa3ad 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -1,8 +1,7 @@ +from _typeshed import Incomplete from collections.abc import Sequence from typing import Any, ClassVar, Final, Generic, Self, SupportsIndex, final, overload from typing import Literal as L - -from _typeshed import Incomplete from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 40369c55f63d..94f014ccd52d 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -1,5 +1,12 @@ import types import zipfile +from _typeshed import ( + StrOrBytesPath, + StrPath, + SupportsKeysAndGetItem, + SupportsRead, + SupportsWrite, +) from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence from re import Pattern from typing import ( @@ -14,14 +21,6 @@ from typing import ( type_check_only, ) from typing import Literal as L - -from _typeshed import ( - StrOrBytesPath, - StrPath, - SupportsKeysAndGetItem, - SupportsRead, - SupportsWrite, -) from typing_extensions import TypeVar, deprecated, override import numpy as np diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index a50d372bb97e..0206d95109fa 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -9,7 +9,6 @@ from typing import ( overload, type_check_only, ) - from typing_extensions import deprecated import numpy as np diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index 944015e423bb..b9ab2a02f5f5 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,8 +1,7 @@ +from _typeshed import Incomplete from collections.abc import Container, Iterable from typing import Any, Protocol, TypeAlias, overload, type_check_only from typing import Literal as L - -from _typeshed import Incomplete from typing_extensions import TypeVar import numpy as np diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index 13c0a0163421..0aeec42129af 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -1,7 +1,6 @@ +from _typeshed import Incomplete from types import EllipsisType from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload - -from _typeshed import Incomplete from typing_extensions import TypeVar, override import numpy as np diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index 6fbd26f88084..7a34f273c423 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -1,6 +1,5 @@ -from typing import LiteralString - from _typeshed import SupportsWrite +from typing import LiteralString from typing_extensions import TypeVar import numpy as np diff --git a/numpy/lib/recfunctions.pyi b/numpy/lib/recfunctions.pyi index 073642918af3..a0b49ba1df00 100644 --- a/numpy/lib/recfunctions.pyi +++ b/numpy/lib/recfunctions.pyi @@ -1,7 +1,6 @@ +from _typeshed import Incomplete from collections.abc import Callable, Iterable, Mapping, Sequence from typing import Any, Literal, TypeAlias, overload - -from _typeshed import Incomplete from typing_extensions import TypeVar import numpy as np diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index da4ad3b333db..de6db7873faa 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,10 +1,9 @@ # pyright: reportIncompatibleMethodOverride=false # ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 +from _typeshed import Incomplete from collections.abc import Sequence from typing import Any, Literal, NoReturn, Self, SupportsIndex, TypeAlias, overload - -from _typeshed import Incomplete from typing_extensions import TypeIs, TypeVar import numpy as np diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 6d71a8cb8d2c..30c906fa3b4b 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -13,7 +13,6 @@ from typing import ( TypeAlias, overload, ) - from typing_extensions import TypeIs, TypeVar import numpy as np diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index 6ce4f4b9d6a1..ee4499dee1f3 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -1,4 +1,5 @@ import abc +from _typeshed import Incomplete from collections.abc import Callable, Mapping, Sequence from threading import Lock from typing import ( @@ -12,8 +13,6 @@ from typing import ( overload, type_check_only, ) - -from _typeshed import Incomplete from typing_extensions import CapsuleType import numpy as np diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 4e3b60a0ef70..59a7539b69f1 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -3,6 +3,7 @@ import sys import types import unittest import warnings +from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Sequence from contextlib import _GeneratorContextManager from pathlib import Path @@ -23,10 +24,8 @@ from typing import ( type_check_only, ) from typing import Literal as L -from unittest.case import SkipTest - -from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath from typing_extensions import TypeVar +from unittest.case import SkipTest import numpy as np from numpy._typing import ( diff --git a/numpy/testing/overrides.pyi b/numpy/testing/overrides.pyi index 3fefc3f350da..916154c155b1 100644 --- a/numpy/testing/overrides.pyi +++ b/numpy/testing/overrides.pyi @@ -1,6 +1,5 @@ from collections.abc import Callable, Hashable from typing import Any - from typing_extensions import TypeIs import numpy as np diff --git a/numpy/testing/print_coercion_tables.pyi b/numpy/testing/print_coercion_tables.pyi index c859305f2350..f463a18c05e4 100644 --- a/numpy/testing/print_coercion_tables.pyi +++ b/numpy/testing/print_coercion_tables.pyi @@ -1,6 +1,5 @@ from collections.abc import Iterable from typing import ClassVar, Generic, Self - from typing_extensions import TypeVar import numpy as np diff --git a/ruff.toml b/ruff.toml index deb52e834df9..7454c6c05e5b 100644 --- a/ruff.toml +++ b/ruff.toml @@ -16,6 +16,9 @@ extend-exclude = [ line-length = 88 +[format] +line-ending = "lf" + [lint] preview = true extend-select = [ @@ -115,3 +118,15 @@ ignore = [ "numpy/ma/core.pyi" = ["F403", "F405"] "numpy/matlib.py" = ["F405"] "numpy/matlib.pyi" = ["F811"] + +[lint.flake8-builtins] +builtins-allowed-modules = ["random", "typing"] + +[lint.flake8-import-conventions.extend-aliases] +"numpy" = "np" +"numpy.typing" = "npt" + +[lint.isort] +# these are treated as stdlib within .pyi stubs +extra-standard-library = ["_typeshed", "typing_extensions"] +known-first-party = ["numpy"] From 1ea01c57b9dfe7948eb05c13d9a379459e0896b4 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 12 Jun 2025 02:25:09 +0200 Subject: [PATCH 0113/1018] MAINT: bump ``ruff`` to ``0.11.13`` (#29186) --- environment.yml | 2 +- requirements/linter_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 5f1ee5e81a5f..770a83218133 100644 --- a/environment.yml +++ b/environment.yml @@ -45,7 +45,7 @@ dependencies: # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting - - ruff=0.11.9 + - ruff=0.11.13 - gitpython # Used in some tests - cffi diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 0716b235ec9c..45319571b561 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,3 +1,3 @@ # keep in sync with `environment.yml` -ruff==0.11.9 +ruff==0.11.13 GitPython>=3.1.30 From 303095ed53a46110b14ccb36e0f683cb961a1746 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 12 Jun 2025 09:21:18 +0300 Subject: [PATCH 0114/1018] BUG: fix matmul with transposed out arg (#29179) * BUG: fix matmul with transposed out arg * DOC: add release note * fixes from review --- doc/release/upcoming_changes/29179.change.rst | 4 ++++ doc/source/release/2.3.0-notes.rst | 6 ++++++ numpy/_core/src/umath/matmul.c.src | 2 +- numpy/_core/tests/test_multiarray.py | 6 ++++++ 4 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 doc/release/upcoming_changes/29179.change.rst diff --git a/doc/release/upcoming_changes/29179.change.rst b/doc/release/upcoming_changes/29179.change.rst new file mode 100644 index 000000000000..12eb6804d3dd --- /dev/null +++ b/doc/release/upcoming_changes/29179.change.rst @@ -0,0 +1,4 @@ +Fix bug in ``matmul`` for non-contiguous out kwarg parameter +------------------------------------------------------------ +In some cases, if ``out`` was non-contiguous, ``np.matmul`` would cause +memory corruption or a c-level assert. This was new to v2.3.0 and fixed in v2.3.1. diff --git a/doc/source/release/2.3.0-notes.rst b/doc/source/release/2.3.0-notes.rst index faad9ffcc8eb..4c3c923b3b5e 100644 --- a/doc/source/release/2.3.0-notes.rst +++ b/doc/source/release/2.3.0-notes.rst @@ -414,6 +414,12 @@ the best performance. (`gh-28769 `__) +Performance improvements for ``np.matmul`` +------------------------------------------ +Enable using BLAS for ``matmul`` even when operands are non-contiguous by copying +if needed. + +(`gh-23752 `__) Changes ======= diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index d9be7b1d6826..02c4fde56bf2 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -596,7 +596,7 @@ NPY_NO_EXPORT void * Use transpose equivalence: * matmul(a, b, o) == matmul(b.T, a.T, o.T) */ - if (o_f_blasable) { + if (o_transpose) { @TYPE@_matmul_matrixmatrix( ip2_, is2_p_, is2_n_, ip1_, is1_n_, is1_m_, diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 146431c7a7c0..11bb80ac1985 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -7317,6 +7317,12 @@ def test_dot_equivalent(self, args): r3 = np.matmul(args[0].copy(), args[1].copy()) assert_equal(r1, r3) + # matrix matrix, issue 29164 + if [len(args[0].shape), len(args[1].shape)] == [2, 2]: + out_f = np.zeros((r2.shape[0] * 2, r2.shape[1] * 2), order='F') + r4 = np.matmul(*args, out=out_f[::2, ::2]) + assert_equal(r2, r4) + def test_matmul_object(self): import fractions From a811c05394a2a052a5b95b130f415bea4ccd542e Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 12 Jun 2025 14:36:00 +0200 Subject: [PATCH 0115/1018] TYP: add ``__all__`` in ``numpy._core.__init__`` (#29187) Ported from numpy/numtype#156 --- numpy/_core/__init__.pyi | 668 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 666 insertions(+), 2 deletions(-) diff --git a/numpy/_core/__init__.pyi b/numpy/_core/__init__.pyi index 40d9c411b97c..a8884917f34a 100644 --- a/numpy/_core/__init__.pyi +++ b/numpy/_core/__init__.pyi @@ -1,2 +1,666 @@ -# NOTE: The `np._core` namespace is deliberately kept empty due to it -# being private +# keep in sync with https://github.com/numpy/numtype/blob/main/src/numpy-stubs/_core/__init__.pyi + +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .einsumfunc import einsum, einsum_path +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + var, +) +from .fromnumeric import transpose as permute_dims +from .function_base import geomspace, linspace, logspace +from .getlimits import finfo, iinfo +from .memmap import memmap +from .numeric import ( + False_, + True_, + allclose, + arange, + argwhere, + array, + array_equal, + array_equiv, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + astype, + base_repr, + binary_repr, + bitwise_not, + broadcast, + can_cast, + concatenate, + convolve, + copyto, + correlate, + count_nonzero, + cross, + dot, + dtype, + empty, + empty_like, + flatiter, + flatnonzero, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + fromstring, + full, + full_like, + identity, + indices, + inf, + inner, + isclose, + isfortran, + isscalar, + lexsort, + little_endian, + matmul, + may_share_memory, + min_scalar_type, + moveaxis, + nan, + ndarray, + nditer, + nested_iters, + newaxis, + ones, + ones_like, + outer, + promote_types, + putmask, + result_type, + roll, + rollaxis, + shares_memory, + tensordot, + ufunc, + vdot, + vecdot, + where, + zeros, + zeros_like, +) +from .numeric import concatenate as concat +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + sctypeDict, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .records import recarray, record +from .shape_base import ( + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + unstack, + vstack, +) +from .umath import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecmat, +) +from .umath import absolute as abs +from .umath import arccos as acos +from .umath import arccosh as acosh +from .umath import arcsin as asin +from .umath import arcsinh as asinh +from .umath import arctan as atan +from .umath import arctan2 as atan2 +from .umath import arctanh as atanh +from .umath import invert as bitwise_invert +from .umath import left_shift as bitwise_left_shift +from .umath import power as pow +from .umath import right_shift as bitwise_right_shift + +__all__ = [ + "False_", + "ScalarType", + "True_", + "abs", + "absolute", + "acos", + "acosh", + "add", + "all", + "allclose", + "amax", + "amin", + "any", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", + "array", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", + "asanyarray", + "asarray", + "ascontiguousarray", + "asfortranarray", + "asin", + "asinh", + "astype", + "atan", + "atan2", + "atanh", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_invert", + "bitwise_left_shift", + "bitwise_not", + "bitwise_or", + "bitwise_right_shift", + "bitwise_xor", + "block", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", + "can_cast", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concat", + "concatenate", + "conj", + "conjugate", + "convolve", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", + "cross", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "einsum", + "einsum_path", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "finfo", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", + "fromiter", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "geomspace", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hstack", + "hypot", + "identity", + "iinfo", + "indices", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", + "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", + "isscalar", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "linspace", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logspace", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "memmap", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", + "ones", + "ones_like", + "outer", + "partition", + "permute_dims", + "pi", + "positive", + "pow", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", + "putmask", + "rad2deg", + "radians", + "ravel", + "recarray", + "reciprocal", + "record", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "sctypeDict", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", + "shares_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "stack", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "unstack", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "vstack", + "where", + "zeros", + "zeros_like", +] From b88c248f92442a0df7ecd24ad887053501af979c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Jun 2025 17:36:23 +0000 Subject: [PATCH 0116/1018] MAINT: Bump github/codeql-action from 3.28.19 to 3.29.0 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.19 to 3.29.0. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/fca7ace96b7d713c7035871441bd52efbe39e27e...ce28f5bb42b7a9f2c824e633a3f6ee835bab6858) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index e0318652d2af..0342fd92c924 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 + uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 + uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 + uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 9e21251f87c8..e64789006e2c 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v2.1.27 + uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v2.1.27 with: sarif_file: results.sarif From 8c2dec1f824ecd366f88e423ef86a6507b1a329f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Rozet?= Date: Fri, 13 Jun 2025 11:44:29 +0200 Subject: [PATCH 0117/1018] BUG: Revert `np.vectorize` casting to legacy behavior (#29196) Revert use of no `dtype=object` to ensure correct cast behavior when the output dtype is discovered. Co-authored-by: Sebastian Berg --- numpy/lib/_function_base_impl.py | 4 +++- numpy/lib/tests/test_function_base.py | 9 +++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index f217af64fb4b..096043e6316f 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2573,6 +2573,7 @@ def _get_ufunc_and_otypes(self, func, args): # the subsequent call when the ufunc is evaluated. # Assumes that ufunc first evaluates the 0th elements in the input # arrays (the input values are not checked to ensure this) + args = [asarray(a) for a in args] if builtins.any(arg.size == 0 for arg in args): raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') @@ -2618,8 +2619,9 @@ def _vectorize_call(self, func, args): elif not args: res = func() else: - args = [asanyarray(a, dtype=object) for a in args] ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) + # gh-29196: `dtype=object` should eventually be removed + args = [asanyarray(a, dtype=object) for a in args] outputs = ufunc(*args, out=...) if ufunc.nout == 1: diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 50c61e6e04fa..f2dba193c849 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1732,6 +1732,15 @@ def test_string_ticket_1892(self): s = '0123456789' * 10 assert_equal(s, f(s)) + def test_dtype_promotion_gh_29189(self): + # dtype should not be silently promoted (int32 -> int64) + dtypes = [np.int16, np.int32, np.int64, np.float16, np.float32, np.float64] + + for dtype in dtypes: + x = np.asarray([1, 2, 3], dtype=dtype) + y = np.vectorize(lambda x: x + x)(x) + assert x.dtype == y.dtype + def test_cache(self): # Ensure that vectorized func called exactly once per argument. _calls = [0] From 79deb8f21ebe83c0d5e0f4bf2fb53887a31ad5cf Mon Sep 17 00:00:00 2001 From: Ben Woodruff Date: Fri, 13 Jun 2025 08:07:53 -0600 Subject: [PATCH 0118/1018] DOC: Suppress distutils doc build warnings for python 3.12+ (#29160) Enables an error free build of the docs for python 3.12+. Excludes related files, supresses all warnings for excluded files, and ignores case-by-case individual references. Closes #29131. [skip azp][skip cirrus][skip actions] --- doc/source/conf.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index e3146bf768c9..eba0bd014fb0 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -150,8 +150,26 @@ class PyTypeObject(ctypes.Structure): exclude_dirs = [] exclude_patterns = [] +suppress_warnings = [] +nitpick_ignore = [] + if sys.version_info[:2] >= (3, 12): - exclude_patterns += ["reference/distutils.rst"] + exclude_patterns += [ + "reference/distutils.rst", + "reference/distutils/misc_util.rst", + ] + suppress_warnings += [ + 'toc.excluded', # Suppress warnings about excluded toctree entries + ] + nitpicky = True + nitpick_ignore += [ + ('ref', 'numpy-distutils-refguide'), + # The first ignore is not catpured without nitpicky = True. + # These three ignores are required once nitpicky = True is set. + ('py:mod', 'numpy.distutils'), + ('py:class', 'Extension'), + ('py:class', 'numpy.distutils.misc_util.Configuration'), + ] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False @@ -612,7 +630,7 @@ class NumPyLexer(CLexer): breathe_default_members = ("members", "undoc-members", "protected-members") # See https://github.com/breathe-doc/breathe/issues/696 -nitpick_ignore = [ +nitpick_ignore += [ ('c:identifier', 'FILE'), ('c:identifier', 'size_t'), ('c:identifier', 'PyHeapTypeObject'), From 0b74c624e2791d5ee9ae3bec59e5b101d7e2f864 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 13 Jun 2025 21:04:39 +0200 Subject: [PATCH 0119/1018] TYP: fix ``ndarray.__array__`` annotation for ``copy`` (#29204) --- numpy/__init__.pyi | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d71a76569ad7..272e52f88e83 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2069,13 +2069,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __array__( - self, dtype: None = ..., /, *, copy: bool | None = ... - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__( - self, dtype: _DTypeT, /, *, copy: bool | None = ... - ) -> ndarray[_ShapeT_co, _DTypeT]: ... + def __array__(self, dtype: _DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT]: ... def __array_ufunc__( self, From 470943bf78824b12081df5b8c097c3511d42175f Mon Sep 17 00:00:00 2001 From: Michael Date: Fri, 13 Jun 2025 21:11:29 +0200 Subject: [PATCH 0120/1018] TST: additional tests for matmul with non-contiguous input and output (#29197) --- numpy/_core/tests/test_multiarray.py | 32 +++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 11bb80ac1985..f66109a3d8b5 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -7317,11 +7317,33 @@ def test_dot_equivalent(self, args): r3 = np.matmul(args[0].copy(), args[1].copy()) assert_equal(r1, r3) - # matrix matrix, issue 29164 - if [len(args[0].shape), len(args[1].shape)] == [2, 2]: - out_f = np.zeros((r2.shape[0] * 2, r2.shape[1] * 2), order='F') - r4 = np.matmul(*args, out=out_f[::2, ::2]) - assert_equal(r2, r4) + # issue 29164 with extra checks + @pytest.mark.parametrize('dtype', ( + np.float32, np.float64, np.complex64, np.complex128 + )) + def test_dot_equivalent_matrix_matrix_blastypes(self, dtype): + modes = list(itertools.product(['C', 'F'], [True, False])) + + def apply_mode(m, mode): + order, is_contiguous = mode + if is_contiguous: + return m.copy() if order == 'C' else m.T.copy().T + + retval = np.zeros( + (m.shape[0] * 2, m.shape[1] * 2), dtype=m.dtype, order=order + )[::2, ::2] + retval[...] = m + return retval + + is_complex = np.issubdtype(dtype, np.complexfloating) + m1 = self.m1.astype(dtype) + (1j if is_complex else 0) + m2 = self.m2.astype(dtype) + (1j if is_complex else 0) + dot_res = np.dot(m1, m2) + mo = np.zeros_like(dot_res) + + for mode in itertools.product(*[modes]*3): + m1_, m2_, mo_ = [apply_mode(*x) for x in zip([m1, m2, mo], mode)] + assert_equal(np.matmul(m1_, m2_, out=mo_), dot_res) def test_matmul_object(self): import fractions From 2f4ace73ee323932f2e5b9e32da696b47e7bda51 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Sun, 15 Jun 2025 15:21:03 +0300 Subject: [PATCH 0121/1018] BUG: fix linting (#29210) --- numpy/_core/tests/test_multiarray.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index f66109a3d8b5..b164f1dada3b 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -7333,7 +7333,7 @@ def apply_mode(m, mode): (m.shape[0] * 2, m.shape[1] * 2), dtype=m.dtype, order=order )[::2, ::2] retval[...] = m - return retval + return retval is_complex = np.issubdtype(dtype, np.complexfloating) m1 = self.m1.astype(dtype) + (1j if is_complex else 0) @@ -7341,7 +7341,7 @@ def apply_mode(m, mode): dot_res = np.dot(m1, m2) mo = np.zeros_like(dot_res) - for mode in itertools.product(*[modes]*3): + for mode in itertools.product(*[modes] * 3): m1_, m2_, mo_ = [apply_mode(*x) for x in zip([m1, m2, mo], mode)] assert_equal(np.matmul(m1_, m2_, out=mo_), dot_res) From aea8869549c110c46afbe75ed2e162b702d3d106 Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:09:16 +0530 Subject: [PATCH 0122/1018] CI: Add WoA validation setup to windows.yml --- .github/workflows/windows.yml | 74 +++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e760e37780a7..18d02081fd67 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -85,6 +85,80 @@ jobs: run: | spin test -- --timeout=600 --durations=10 + python64bit_openblas_winarm64: + name: arm64, LPARM64 OpenBLAS + runs-on: windows-11-arm + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' + strategy: + fail-fast: false + matrix: + compiler-pyversion: + - ["MSVC", "3.11"] + - ["Clang-cl", "3.14t-dev"] + + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - name: Setup Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + with: + python-version: ${{ matrix.compiler-pyversion[1] }} + architecture: arm64 + + - name: Setup MSVC + if: matrix.compiler-pyversion[0] == 'MSVC' + uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 + with: + architecture: arm64 + + - name: Install build dependencies from PyPI + run: | + pip install -r requirements/build_requirements.txt + + - name: Install pkg-config + run: | + choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $env:GITHUB_ENV + + - name: Install Clang-cl + if: matrix.compiler-pyversion[0] == 'Clang-cl' + run: | + uses: ./.github/windows_arm64_steps + + - name: Install NumPy (MSVC) + if: matrix.compiler-pyversion[0] == 'MSVC' + run: | + pip install -r requirements/ci_requirements.txt + spin build --with-scipy-openblas=32 -j2 -- --vsenv + + - name: Install NumPy (Clang-cl) + if: matrix.compiler-pyversion[0] == 'Clang-cl' + run: | + "[binaries]","c = 'clang-cl'","cpp = 'clang-cl'","ar = 'llvm-lib'","c_ld = 'lld-link'","cpp_ld = 'lld-link'" | Out-File $PWD/clang-cl-arm64.ini -Encoding ascii + pip install -r requirements/ci_requirements.txt + spin build --with-scipy-openblas=32 -j2 -- --vsenv --native-file=$PWD/clang-cl-arm64.ini + + - name: Meson Log + shell: bash + if: ${{ failure() }} + run: | + cat build/meson-logs/meson-log.txt + + - name: Install test dependencies + run: | + python -m pip install -r requirements/test_requirements.txt + python -m pip install threadpoolctl + + - name: Run test suite + run: | + spin test -- --timeout=600 --durations=10 + msvc_python_no_openblas: name: MSVC, ${{ matrix.architecture }} Python , no BLAS runs-on: ${{ matrix.os }} From d8c4f2ba8604fbb0e7f22a5df8ea3a355a53385e Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:12:41 +0530 Subject: [PATCH 0123/1018] CI: Create action.yml for LLVM Win-ARM64 as reusable blocks --- .github/windows_arm64_steps /action.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 .github/windows_arm64_steps /action.yml diff --git a/.github/windows_arm64_steps /action.yml b/.github/windows_arm64_steps /action.yml new file mode 100644 index 000000000000..86517b6246e7 --- /dev/null +++ b/.github/windows_arm64_steps /action.yml @@ -0,0 +1,16 @@ +name: Build Dependencies(Win-ARM64) +description: "Setup LLVM for Win-ARM64 builds" + +runs: + using: "composite" + steps: + - name: Install LLVM + shell: pwsh + run: | + Invoke-WebRequest https://github.com/llvm/llvm-project/releases/download/llvmorg-20.1.6/LLVM-20.1.6-woa64.exe -UseBasicParsing -OutFile LLVM-woa64.exe + Start-Process -FilePath ".\LLVM-woa64.exe" -ArgumentList "/S" -Wait + echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "CC=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "CXX=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "FC=flang-new" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + From f02bb58cb634286739a1c5eb9dc35bc0e086efa7 Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:16:56 +0530 Subject: [PATCH 0124/1018] CI: Modify wheel.yml to use clang-cl for Win-ARM64 --- .github/workflows/wheels.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 68352eb1fc7c..ab6bbb618899 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -127,11 +127,9 @@ jobs: with: architecture: 'x86' - - name: Setup MSVC arm64 + - name: Setup LLVM for Windows ARM64 if: ${{ matrix.buildplat[1] == 'win_arm64' }} - uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 - with: - architecture: 'arm64' + uses: ./.github/windows_arm64_steps - name: pkg-config-for-win run: | From fb0dff6061addfa3b7d2cba9ecc4dfedf5dad954 Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:32:03 +0530 Subject: [PATCH 0125/1018] CI: fix action.yml naming --- .github/{windows_arm64_steps => windows_arm64_steps}/action.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/{windows_arm64_steps => windows_arm64_steps}/action.yml (100%) diff --git a/.github/windows_arm64_steps /action.yml b/.github/windows_arm64_steps/action.yml similarity index 100% rename from .github/windows_arm64_steps /action.yml rename to .github/windows_arm64_steps/action.yml From f36c0daa423244ca6887ab426e3c6149a13a2667 Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:40:39 +0530 Subject: [PATCH 0126/1018] CI: Fix reusable LLVM block --- .github/workflows/windows.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 18d02081fd67..e723b787a5de 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -128,8 +128,7 @@ jobs: - name: Install Clang-cl if: matrix.compiler-pyversion[0] == 'Clang-cl' - run: | - uses: ./.github/windows_arm64_steps + uses: ./.github/windows_arm64_steps - name: Install NumPy (MSVC) if: matrix.compiler-pyversion[0] == 'MSVC' From 03d57303675386998ad005d28c7c3ca7177694d1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 17 Jun 2025 17:49:15 +0200 Subject: [PATCH 0127/1018] MAINT: Fix some undef warnings (#29216) As noted by Chuck in gh-29138, there are some undef warnings that seem not nice this should fix them. The fact that `NPY_LONG`, etc. are not defined at macro expansion time is a bit of a trap, maybe it would be nice to have CI fail for this... --- numpy/_core/src/_simd/_simd.c | 2 +- numpy/_core/src/multiarray/descriptor.c | 2 +- numpy/_core/src/umath/_operand_flag_tests.c | 2 +- numpy/_core/src/umath/_rational_tests.c | 2 +- numpy/_core/src/umath/_struct_ufunc_tests.c | 2 +- numpy/_core/src/umath/_umath_tests.c.src | 2 +- .../_core/tests/examples/limited_api/limited_api_latest.c | 8 ++++---- numpy/f2py/rules.py | 2 +- numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c | 2 +- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/_core/src/_simd/_simd.c b/numpy/_core/src/_simd/_simd.c index 2f0a5df6375c..d25d7bbf1c38 100644 --- a/numpy/_core/src/_simd/_simd.c +++ b/numpy/_core/src/_simd/_simd.c @@ -88,7 +88,7 @@ PyMODINIT_FUNC PyInit__simd(void) NPY_MTARGETS_CONF_DISPATCH(NPY_CPU_HAVE, ATTACH_MODULE, MAKE_MSVC_HAPPY) NPY_MTARGETS_CONF_BASELINE(ATTACH_BASELINE_MODULE, MAKE_MSVC_HAPPY) -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 5708e5c6ecb7..f520e3c4bceb 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2095,7 +2095,7 @@ static PyMemberDef arraydescr_members[] = { {"alignment", T_PYSSIZET, offsetof(PyArray_Descr, alignment), READONLY, NULL}, {"flags", -#if NPY_ULONGLONG == NPY_UINT64 +#if NPY_SIZEOF_LONGLONG == 8 T_ULONGLONG, offsetof(PyArray_Descr, flags), READONLY, NULL}, #else #error Assuming long long is 64bit, if not replace with getter function. diff --git a/numpy/_core/src/umath/_operand_flag_tests.c b/numpy/_core/src/umath/_operand_flag_tests.c index 9747b7946512..5cdff6220280 100644 --- a/numpy/_core/src/umath/_operand_flag_tests.c +++ b/numpy/_core/src/umath/_operand_flag_tests.c @@ -77,7 +77,7 @@ PyMODINIT_FUNC PyInit__operand_flag_tests(void) ((PyUFuncObject*)ufunc)->iter_flags = NPY_ITER_REDUCE_OK; PyModule_AddObject(m, "inplace_add", (PyObject*)ufunc); -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/umath/_rational_tests.c b/numpy/_core/src/umath/_rational_tests.c index a95c89b373df..d257bc22d051 100644 --- a/numpy/_core/src/umath/_rational_tests.c +++ b/numpy/_core/src/umath/_rational_tests.c @@ -1355,7 +1355,7 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { GCD_LCM_UFUNC(gcd,NPY_INT64,"greatest common denominator of two integers"); GCD_LCM_UFUNC(lcm,NPY_INT64,"least common multiple of two integers"); -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/umath/_struct_ufunc_tests.c b/numpy/_core/src/umath/_struct_ufunc_tests.c index 8edbdc00b6f3..56c4be117e44 100644 --- a/numpy/_core/src/umath/_struct_ufunc_tests.c +++ b/numpy/_core/src/umath/_struct_ufunc_tests.c @@ -157,7 +157,7 @@ PyMODINIT_FUNC PyInit__struct_ufunc_tests(void) PyDict_SetItemString(d, "add_triplet", add_triplet); Py_DECREF(add_triplet); -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index 9f2818d14526..845f51ebc94f 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -944,7 +944,7 @@ PyMODINIT_FUNC PyInit__umath_tests(void) { // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/tests/examples/limited_api/limited_api_latest.c b/numpy/_core/tests/examples/limited_api/limited_api_latest.c index 13668f2f0ebf..92d83ea977a1 100644 --- a/numpy/_core/tests/examples/limited_api/limited_api_latest.c +++ b/numpy/_core/tests/examples/limited_api/limited_api_latest.c @@ -1,11 +1,11 @@ -#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 - # error "Py_LIMITED_API not defined to Python major+minor version" -#endif - #include #include #include +#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 + # error "Py_LIMITED_API not defined to Python major+minor version" +#endif + static PyModuleDef moduledef = { .m_base = PyModuleDef_HEAD_INIT, .m_name = "limited_api_latest" diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 667ef287f92b..68c49e60028e 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -286,7 +286,7 @@ #initcommonhooks# #interface_usercode# -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal whether this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m , #gil_used#); #endif diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index b66672a43e21..25866f1a40ec 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -223,7 +223,7 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); #endif -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal whether this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif From 3f99204758206967cce4af0f460de4946443f62d Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 17 Jun 2025 19:30:32 +0200 Subject: [PATCH 0128/1018] MAINT: bump `mypy` to `1.16.1` (#29219) --- environment.yml | 2 +- requirements/test_requirements.txt | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/environment.yml b/environment.yml index 770a83218133..75c6626abaf8 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.16.0 + - mypy=1.16.1 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 17260753db4a..e50919ef4f7b 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -12,9 +12,8 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.16.0; platform_python_implementation != "PyPy" +mypy==1.16.1; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer tzdata - From f6a17f099ba3ae2acabc288bf2297fd821725ffc Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 17 Jun 2025 19:31:38 +0200 Subject: [PATCH 0129/1018] TYP: Workaround for a mypy issue in ``ndarray.__iter__`` (#29218) --- numpy/__init__.pyi | 12 ++++++++---- numpy/typing/tests/data/reveal/ndarray_misc.pyi | 10 ++++++++++ 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 272e52f88e83..ce879e208e49 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -807,6 +807,7 @@ _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) _FloatingT_co = TypeVar("_FloatingT_co", bound=floating, default=floating, covariant=True) _IntegerT = TypeVar("_IntegerT", bound=integer) _IntegerT_co = TypeVar("_IntegerT_co", bound=integer, default=integer, covariant=True) +_NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64) _NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] _NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] @@ -2572,10 +2573,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __len__(self) -> int: ... def __contains__(self, value: object, /) -> builtins.bool: ... - @overload # == 1-d & object_ - def __iter__(self: ndarray[tuple[int], dtype[object_]], /) -> Iterator[Any]: ... - @overload # == 1-d - def __iter__(self: ndarray[tuple[int], dtype[_ScalarT]], /) -> Iterator[_ScalarT]: ... + # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns + # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. + # This way the bug only occurs for 9-D arrays, which are probably not very common. + @overload + def __iter__(self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never]], /) -> Iterator[Any]: ... + @overload # == 1-d & dtype[T \ object_] + def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... @overload # >= 2-d def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], dtype[_ScalarT]], /) -> Iterator[NDArray[_ScalarT]]: ... @overload # ?-d diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 465ce7679b49..4cbb90621ca9 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -6,6 +6,7 @@ function-based counterpart in `../from_numeric.py`. """ +from collections.abc import Iterator import ctypes as ct import operator from types import ModuleType @@ -29,6 +30,10 @@ AR_m: npt.NDArray[np.timedelta64] AR_U: npt.NDArray[np.str_] AR_V: npt.NDArray[np.void] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] + ctypes_obj = AR_f8.ctypes assert_type(AR_f8.__dlpack__(), CapsuleType) @@ -235,3 +240,8 @@ assert_type(AR_m.to_device("cpu"), npt.NDArray[np.timedelta64]) assert_type(f8.__array_namespace__(), ModuleType) assert_type(AR_f8.__array_namespace__(), ModuleType) + +assert_type(iter(AR_f8), Iterator[Any]) # any-D +assert_type(iter(AR_f8_1d), Iterator[np.float64]) # 1-D +assert_type(iter(AR_f8_2d), Iterator[npt.NDArray[np.float64]]) # 2-D +assert_type(iter(AR_f8_3d), Iterator[npt.NDArray[np.float64]]) # 3-D From 32f4afaad7cb035e1b46987285d34c27c57dbe57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20Gro=C3=9F?= Date: Tue, 17 Jun 2025 19:34:26 +0200 Subject: [PATCH 0130/1018] ENH: improve Timsort with powersort merge-policy (#29208) Implement the improved merge policy for Timsort, as developed by Munro and Wild. Benchmarks show a significant improvement in performance. --- numpy/_core/src/npysort/timsort.cpp | 110 +++++++++++++--------------- 1 file changed, 51 insertions(+), 59 deletions(-) diff --git a/numpy/_core/src/npysort/timsort.cpp b/numpy/_core/src/npysort/timsort.cpp index 0f0f5721e7cf..9ecf88c0aeb9 100644 --- a/numpy/_core/src/npysort/timsort.cpp +++ b/numpy/_core/src/npysort/timsort.cpp @@ -39,8 +39,9 @@ #include #include -/* enough for 32 * 1.618 ** 128 elements */ -#define TIMSORT_STACK_SIZE 128 +/* enough for 32 * 1.618 ** 128 elements. + If powersort was used in all cases, 90 would suffice, as 32 * 2 ** 90 >= 32 * 1.618 ** 128 */ +#define RUN_STACK_SIZE 128 static npy_intp compute_min_run(npy_intp num) @@ -58,6 +59,7 @@ compute_min_run(npy_intp num) typedef struct { npy_intp s; /* start pointer */ npy_intp l; /* length */ + int power; /* node "level" for powersort merge strategy */ } run; /* buffer for argsort. Declared here to avoid multiple declarations. */ @@ -383,60 +385,51 @@ merge_at_(type *arr, const run *stack, const npy_intp at, buffer_ *buffer) return 0; } -template +/* See https://github.com/python/cpython/blob/ea23c897cd25702e72a04e06664f6864f07a7c5d/Objects/listsort.txt +* for a detailed explanation. +* In CPython, *num* is called *n*, but we changed it for consistency with the NumPy implementation. +*/ static int -try_collapse_(type *arr, run *stack, npy_intp *stack_ptr, buffer_ *buffer) +powerloop(npy_intp s1, npy_intp n1, npy_intp n2, npy_intp num) { - int ret; - npy_intp A, B, C, top; - top = *stack_ptr; - - while (1 < top) { - B = stack[top - 2].l; - C = stack[top - 1].l; - - if ((2 < top && stack[top - 3].l <= B + C) || - (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) { - A = stack[top - 3].l; - - if (A <= C) { - ret = merge_at_(arr, stack, top - 3, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 3].l += B; - stack[top - 2] = stack[top - 1]; - --top; - } - else { - ret = merge_at_(arr, stack, top - 2, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 2].l += C; - --top; - } + int result = 0; + npy_intp a = 2 * s1 + n1; /* 2*a */ + npy_intp b = a + n1 + n2; /* 2*b */ + for (;;) { + ++result; + if (a >= num) { /* both quotient bits are 1 */ + a -= num; + b -= num; } - else if (1 < top && B <= C) { - ret = merge_at_(arr, stack, top - 2, buffer); + else if (b >= num) { /* a/num bit is 0, b/num bit is 1 */ + break; + } + a <<= 1; + b <<= 1; + } + return result; +} +template +static int +found_new_run_(type *arr, run *stack, npy_intp *stack_ptr, npy_intp n2, + npy_intp num, buffer_ *buffer) +{ + int ret; + if (*stack_ptr > 0) { + npy_intp s1 = stack[*stack_ptr - 1].s; + npy_intp n1 = stack[*stack_ptr - 1].l; + int power = powerloop(s1, n1, n2, num); + while (*stack_ptr > 1 && stack[*stack_ptr - 2].power > power) { + ret = merge_at_(arr, stack, *stack_ptr - 2, buffer); if (NPY_UNLIKELY(ret < 0)) { return ret; } - - stack[top - 2].l += C; - --top; - } - else { - break; + stack[*stack_ptr - 2].l += stack[*stack_ptr - 1].l; + --(*stack_ptr); } + stack[*stack_ptr - 1].power = power; } - - *stack_ptr = top; return 0; } @@ -491,7 +484,7 @@ timsort_(void *start, npy_intp num) int ret; npy_intp l, n, stack_ptr, minrun; buffer_ buffer; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer.pw = NULL; buffer.size = 0; stack_ptr = 0; @@ -499,15 +492,14 @@ timsort_(void *start, npy_intp num) for (l = 0; l < num;) { n = count_run_((type *)start, l, num, minrun); + ret = found_new_run_((type *)start, stack, &stack_ptr, n, num, &buffer); + if (NPY_UNLIKELY(ret < 0)) + goto cleanup; + + // Push the new run onto the stack. stack[stack_ptr].s = l; stack[stack_ptr].l = n; ++stack_ptr; - ret = try_collapse_((type *)start, stack, &stack_ptr, &buffer); - - if (NPY_UNLIKELY(ret < 0)) { - goto cleanup; - } - l += n; } @@ -897,7 +889,7 @@ atimsort_(void *v, npy_intp *tosort, npy_intp num) int ret; npy_intp l, n, stack_ptr, minrun; buffer_intp buffer; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer.pw = NULL; buffer.size = 0; stack_ptr = 0; @@ -1371,7 +1363,7 @@ string_timsort_(void *start, npy_intp num, void *varr) size_t len = elsize / sizeof(type); int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; string_buffer_ buffer; /* Items that have zero size don't make sense to sort */ @@ -1800,7 +1792,7 @@ string_atimsort_(void *start, npy_intp *tosort, npy_intp num, void *varr) size_t len = elsize / sizeof(type); int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_intp buffer; /* Items that have zero size don't make sense to sort */ @@ -2253,7 +2245,7 @@ npy_timsort(void *start, npy_intp num, void *varr) PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_char buffer; /* Items that have zero size don't make sense to sort */ @@ -2689,7 +2681,7 @@ npy_atimsort(void *start, npy_intp *tosort, npy_intp num, void *varr) PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_intp buffer; /* Items that have zero size don't make sense to sort */ From d1c67573569885087a253120c1a9f2caf3ccf084 Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Tue, 17 Jun 2025 14:56:21 -0400 Subject: [PATCH 0131/1018] ENH: Detect CPU features on OpenBSD ARM and PowerPC64 Also while looking at this I noticed due to a compiler warning that npy__cpu_init_features_linux() was not enabled on FreeBSD with the original commit c47e9621ebf76f8085ff5ec8b01c07921d14f6a7 thus the code was doing nothing on FreeBSD ARM. --- numpy/_core/src/common/npy_cpu_features.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index f15f636cdb1e..617225ec9eff 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -562,7 +562,7 @@ npy__cpu_init_features(void) #elif defined(NPY_CPU_PPC64) || defined(NPY_CPU_PPC64LE) -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) #ifdef __FreeBSD__ #include // defines PPC_FEATURE_HAS_VSX #endif @@ -585,7 +585,7 @@ static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) #ifdef __linux__ unsigned int hwcap = getauxval(AT_HWCAP); if ((hwcap & PPC_FEATURE_HAS_VSX) == 0) @@ -612,7 +612,7 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_VSX2] = (hwcap & PPC_FEATURE2_ARCH_2_07) != 0; npy__cpu_have[NPY_CPU_FEATURE_VSX3] = (hwcap & PPC_FEATURE2_ARCH_3_00) != 0; npy__cpu_have[NPY_CPU_FEATURE_VSX4] = (hwcap & PPC_FEATURE2_ARCH_3_1) != 0; -// TODO: AIX, OpenBSD +// TODO: AIX #else npy__cpu_have[NPY_CPU_FEATURE_VSX] = 1; #if defined(NPY_CPU_PPC64LE) || defined(NPY_HAVE_VSX2) @@ -698,7 +698,7 @@ npy__cpu_init_features_arm8(void) npy__cpu_have[NPY_CPU_FEATURE_ASIMD] = 1; } -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) /* * we aren't sure of what kind kernel or clib we deal with * so we play it safe @@ -709,7 +709,7 @@ npy__cpu_init_features_arm8(void) #if defined(__linux__) __attribute__((weak)) unsigned long getauxval(unsigned long); // linker should handle it #endif -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) || defined(__OpenBSD__) __attribute__((weak)) int elf_aux_info(int, void *, int); // linker should handle it static unsigned long getauxval(unsigned long k) @@ -807,7 +807,7 @@ static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); -#ifdef __linux__ +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) if (npy__cpu_init_features_linux()) return; #endif From b6a6740e106277f4b1081210bbd158fed3e0d20c Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Wed, 18 Jun 2025 18:16:30 +0530 Subject: [PATCH 0132/1018] CI: Add conditions to check hash of LLVM package --- .github/windows_arm64_steps/action.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/windows_arm64_steps/action.yml b/.github/windows_arm64_steps/action.yml index 86517b6246e7..8ecb3b8a0cdd 100644 --- a/.github/windows_arm64_steps/action.yml +++ b/.github/windows_arm64_steps/action.yml @@ -4,10 +4,16 @@ description: "Setup LLVM for Win-ARM64 builds" runs: using: "composite" steps: - - name: Install LLVM + - name: Install LLVM with checksum verification shell: pwsh run: | Invoke-WebRequest https://github.com/llvm/llvm-project/releases/download/llvmorg-20.1.6/LLVM-20.1.6-woa64.exe -UseBasicParsing -OutFile LLVM-woa64.exe + $expectedHash = "92f69a1134e32e54b07d51c6e24d9594852f6476f32c3d70471ae00fffc2d462" + $fileHash = (Get-FileHash -Path "LLVM-woa64.exe" -Algorithm SHA256).Hash + if ($fileHash -ne $expectedHash) { + Write-Error "Checksum verification failed. The downloaded file may be corrupted or tampered with." + exit 1 + } Start-Process -FilePath ".\LLVM-woa64.exe" -ArgumentList "/S" -Wait echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append echo "CC=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append From d52b36ee0b8bf3e7ca9eec19d8c4b41ff2c04f59 Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 18 Jun 2025 19:32:32 +0200 Subject: [PATCH 0133/1018] strides comparison performance fix, compare discussion #29179 (#29188) --- numpy/_core/src/umath/matmul.c.src | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index 02c4fde56bf2..6f54aeb4d968 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -27,6 +27,8 @@ ***************************************************************************** */ +#define ABS(x) ((x) < 0 ? -(x) : (x)) + #if defined(HAVE_CBLAS) /* * -1 to be conservative, in case blas internally uses a for loop with an @@ -554,9 +556,9 @@ NPY_NO_EXPORT void } else { /* matrix @ matrix * copy if not blasable, see gh-12365 & gh-23588 */ - npy_bool i1_transpose = is1_m < is1_n, - i2_transpose = is2_n < is2_p, - o_transpose = os_m < os_p; + npy_bool i1_transpose = ABS(is1_m) < ABS(is1_n), + i2_transpose = ABS(is2_n) < ABS(is2_p), + o_transpose = ABS(os_m) < ABS(os_p); npy_intp tmp_is1_m = i1_transpose ? sz : sz*dn, tmp_is1_n = i1_transpose ? sz*dm : sz, From 2b0eda38f099203bb72572a8b640b37165fc7dc5 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 17 Jun 2025 17:49:17 +0200 Subject: [PATCH 0134/1018] TYP: Support iteration of ``StringDType`` arrays --- numpy/__init__.pyi | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ce879e208e49..bcbe95accee6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2577,11 +2577,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. # This way the bug only occurs for 9-D arrays, which are probably not very common. @overload - def __iter__(self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never]], /) -> Iterator[Any]: ... + def __iter__( + self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never], Any], / + ) -> Iterator[Any]: ... @overload # == 1-d & dtype[T \ object_] def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... + @overload # == 1-d & StringDType + def __iter__(self: ndarray[tuple[int], dtypes.StringDType], /) -> Iterator[str]: ... @overload # >= 2-d - def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], dtype[_ScalarT]], /) -> Iterator[NDArray[_ScalarT]]: ... + def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], _DTypeT], /) -> Iterator[ndarray[_AnyShape, _DTypeT]]: ... @overload # ?-d def __iter__(self, /) -> Iterator[Any]: ... From f15a116ee12ec539c7ed1fafb3a0abfb82005e0f Mon Sep 17 00:00:00 2001 From: Developer-Ecosystem-Engineering <65677710+Developer-Ecosystem-Engineering@users.noreply.github.com> Date: Wed, 18 Jun 2025 13:00:27 -0700 Subject: [PATCH 0135/1018] BUG: Address interaction between SME and FPSR (#29223) * BUG: Address interaction between SME and FPSR This is intended to resolve https://github.com/numpy/numpy/issues/28687 The root cause is an interaction between Arm Scalable Matrix Extension (SME) and the floating point status register (FPSR). As noted in Arm docs for FPSR, "On entry to or exit from Streaming SVE mode, FPSR.{IOC, DZC, OFC, UFC, IXC, IDC, QC} are set to 1 and the remaining bits are set to 0". This means that floating point status flags are all raised when SME is used, regardless of values or operations performed. These are manifesting now because Apple Silicon M4 supports SME and macOS 15.4 enables SME codepaths for Accelerate BLAS / LAPACK. However, SME / FPSR behavior is not specific to Apple Silicon M4 and will occur on non-Apple chips using SME as well. Changes add compile and runtime checks to determine whether BLAS / LAPACK might use SME (macOS / Accelerate only at the moment). If so, special handling of floating-point error (FPE) is added, which includes: - clearing FPE after some BLAS calls - short-circuiting FPE read after some BLAS calls All tests pass Performance is similar Another approach would have been to wrap all BLAS / LAPACK calls with save / restore FPE. However, it added a lot of overhead for the inner loops that utilize BLAS / LAPACK. Some benchmarks were 8x slower. * add blas_supports_fpe and ifdef check Address the linker & linter failures --- numpy/_core/meson.build | 1 + numpy/_core/src/common/blas_utils.c | 134 ++++++++++++++++++ numpy/_core/src/common/blas_utils.h | 30 ++++ numpy/_core/src/common/cblasfuncs.c | 3 +- numpy/_core/src/multiarray/multiarraymodule.c | 5 + numpy/_core/src/umath/matmul.c.src | 36 ++++- numpy/_core/tests/test_multiarray.py | 6 + numpy/testing/_private/utils.py | 10 ++ 8 files changed, 217 insertions(+), 8 deletions(-) create mode 100644 numpy/_core/src/common/blas_utils.c create mode 100644 numpy/_core/src/common/blas_utils.h diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index a4d2050122c6..cd46a20b0246 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1117,6 +1117,7 @@ src_multiarray_umath_common = [ ] if have_blas src_multiarray_umath_common += [ + 'src/common/blas_utils.c', 'src/common/cblasfuncs.c', 'src/common/python_xerbla.c', ] diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c new file mode 100644 index 000000000000..aaf976ed70e4 --- /dev/null +++ b/numpy/_core/src/common/blas_utils.c @@ -0,0 +1,134 @@ +#include +#include +#include + +#ifdef __APPLE__ +#include +#endif + +#include "numpy/numpyconfig.h" // NPY_VISIBILITY_HIDDEN +#include "numpy/npy_math.h" // npy_get_floatstatus_barrier +#include "blas_utils.h" + +#if NPY_BLAS_CHECK_FPE_SUPPORT + +/* Return whether we're running on macOS 15.4 or later + */ +static inline bool +is_macOS_version_15_4_or_later(void){ +#if !defined(__APPLE__) + return false; +#else + char *osProductVersion = NULL; + size_t size = 0; + bool ret = false; + + // Query how large OS version string should be + if(-1 == sysctlbyname("kern.osproductversion", NULL, &size, NULL, 0)){ + goto cleanup; + } + + osProductVersion = malloc(size + 1); + + // Get the OS version string + if(-1 == sysctlbyname("kern.osproductversion", osProductVersion, &size, NULL, 0)){ + goto cleanup; + } + + osProductVersion[size] = '\0'; + + // Parse the version string + int major = 0, minor = 0; + if(2 > sscanf(osProductVersion, "%d.%d", &major, &minor)) { + goto cleanup; + } + + if(major >= 15 && minor >= 4){ + ret = true; + } + +cleanup: + if(osProductVersion){ + free(osProductVersion); + } + + return ret; +#endif +} + +/* ARM Scalable Matrix Extension (SME) raises all floating-point error flags + * when it's used regardless of values or operations. As a consequence, + * when SME is used, all FPE state is lost and special handling is needed. + * + * For NumPy, SME is not currently used directly, but can be used via + * BLAS / LAPACK libraries. This function does a runtime check for whether + * BLAS / LAPACK can use SME and special handling around FPE is required. + */ +static inline bool +BLAS_can_use_ARM_SME(void) +{ +#if defined(__APPLE__) && defined(__aarch64__) && defined(ACCELERATE_NEW_LAPACK) + // ARM SME can be used by Apple's Accelerate framework for BLAS / LAPACK + // - macOS 15.4+ + // - Apple silicon M4+ + + // Does OS / Accelerate support ARM SME? + if(!is_macOS_version_15_4_or_later()){ + return false; + } + + // Does hardware support SME? + int has_SME = 0; + size_t size = sizeof(has_SME); + if(-1 == sysctlbyname("hw.optional.arm.FEAT_SME", &has_SME, &size, NULL, 0)){ + return false; + } + + if(has_SME){ + return true; + } +#endif + + // default assume SME is not used + return false; +} + +/* Static variable to cache runtime check of BLAS FPE support. + */ +static bool blas_supports_fpe = true; + +#endif // NPY_BLAS_CHECK_FPE_SUPPORT + + +NPY_VISIBILITY_HIDDEN bool +npy_blas_supports_fpe(void) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + return blas_supports_fpe; +#else + return true; +#endif +} + +NPY_VISIBILITY_HIDDEN void +npy_blas_init(void) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + blas_supports_fpe = !BLAS_can_use_ARM_SME(); +#endif +} + +NPY_VISIBILITY_HIDDEN int +npy_get_floatstatus_after_blas(void) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + if(!blas_supports_fpe){ + // BLAS does not support FPE and we need to return FPE state. + // Instead of clearing and then grabbing state, just return + // that no flags are set. + return 0; + } +#endif + char *param = NULL; + return npy_get_floatstatus_barrier(param); +} diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h new file mode 100644 index 000000000000..8c1437f88899 --- /dev/null +++ b/numpy/_core/src/common/blas_utils.h @@ -0,0 +1,30 @@ +#include + +#include "numpy/numpyconfig.h" // for NPY_VISIBILITY_HIDDEN + +/* NPY_BLAS_CHECK_FPE_SUPPORT controls whether we need a runtime check + * for floating-point error (FPE) support in BLAS. + */ +#if defined(__APPLE__) && defined(__aarch64__) && defined(ACCELERATE_NEW_LAPACK) +#define NPY_BLAS_CHECK_FPE_SUPPORT 1 +#else +#define NPY_BLAS_CHECK_FPE_SUPPORT 0 +#endif + +/* Initialize BLAS environment, if needed + */ +NPY_VISIBILITY_HIDDEN void +npy_blas_init(void); + +/* Runtime check if BLAS supports floating-point errors. + * true - BLAS supports FPE and one can rely on them to indicate errors + * false - BLAS does not support FPE. Special handling needed for FPE state + */ +NPY_VISIBILITY_HIDDEN bool +npy_blas_supports_fpe(void); + +/* If BLAS supports FPE, exactly the same as npy_get_floatstatus_barrier(). + * Otherwise, we can't rely on FPE state and need special handling. + */ +NPY_VISIBILITY_HIDDEN int +npy_get_floatstatus_after_blas(void); diff --git a/numpy/_core/src/common/cblasfuncs.c b/numpy/_core/src/common/cblasfuncs.c index f9d683d812d4..66a215dfeb64 100644 --- a/numpy/_core/src/common/cblasfuncs.c +++ b/numpy/_core/src/common/cblasfuncs.c @@ -12,6 +12,7 @@ #include "numpy/arrayobject.h" #include "numpy/npy_math.h" #include "numpy/ufuncobject.h" +#include "blas_utils.h" #include "npy_cblas.h" #include "arraytypes.h" #include "common.h" @@ -693,7 +694,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, NPY_END_ALLOW_THREADS; } - int fpes = npy_get_floatstatus_barrier((char *) result); + int fpes = npy_get_floatstatus_after_blas(); if (fpes && PyUFunc_GiveFloatingpointErrors("dot", fpes) < 0) { goto fail; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 7724756ba351..dcfb1226a0ab 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -43,6 +43,7 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "arraytypes.h" #include "arrayobject.h" #include "array_converter.h" +#include "blas_utils.h" #include "hashdescr.h" #include "descriptor.h" #include "dragon4.h" @@ -4781,6 +4782,10 @@ _multiarray_umath_exec(PyObject *m) { return -1; } +#if NPY_BLAS_CHECK_FPE_SUPPORT + npy_blas_init(); +#endif + #if defined(MS_WIN64) && defined(__GNUC__) PyErr_WarnEx(PyExc_Warning, "Numpy built with MINGW-W64 on Windows 64 bits is experimental, " \ diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index 6f54aeb4d968..11e014acec7f 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -16,6 +16,7 @@ +#include "blas_utils.h" #include "npy_cblas.h" #include "arraytypes.h" /* For TYPE_dot functions */ @@ -122,7 +123,7 @@ static inline void } } -NPY_NO_EXPORT void +static void @name@_gemv(void *ip1, npy_intp is1_m, npy_intp is1_n, void *ip2, npy_intp is2_n, void *op, npy_intp op_m, @@ -158,7 +159,7 @@ NPY_NO_EXPORT void is2_n / sizeof(@typ@), @step0@, op, op_m / sizeof(@typ@)); } -NPY_NO_EXPORT void +static void @name@_matmul_matrixmatrix(void *ip1, npy_intp is1_m, npy_intp is1_n, void *ip2, npy_intp is2_n, npy_intp is2_p, void *op, npy_intp os_m, npy_intp os_p, @@ -262,7 +263,7 @@ NPY_NO_EXPORT void * #IS_HALF = 0, 0, 0, 1, 0*13# */ -NPY_NO_EXPORT void +static void @TYPE@_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -320,7 +321,7 @@ NPY_NO_EXPORT void } /**end repeat**/ -NPY_NO_EXPORT void +static void BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -359,7 +360,7 @@ BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, } } -NPY_NO_EXPORT void +static void OBJECT_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -631,6 +632,11 @@ NPY_NO_EXPORT void #endif } #if @USEBLAS@ && defined(HAVE_CBLAS) +#if NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif if (allocate_buffer) free(tmp_ip12op); #endif } @@ -655,7 +661,7 @@ NPY_NO_EXPORT void * #prefix = c, z, 0# * #USE_BLAS = 1, 1, 0# */ -NPY_NO_EXPORT void +static void @name@_dotc(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n, void *NPY_UNUSED(ignore)) { @@ -751,6 +757,7 @@ OBJECT_dotc(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp * CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT# * #DOT = dot*15, dotc*4# * #CHECK_PYERR = 0*18, 1# + * #CHECK_BLAS = 1*2, 0*13, 1*2, 0*2# */ NPY_NO_EXPORT void @TYPE@_vecdot(char **args, npy_intp const *dimensions, npy_intp const *steps, @@ -774,6 +781,11 @@ NPY_NO_EXPORT void } #endif } +#if @CHECK_BLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif } /**end repeat**/ @@ -789,7 +801,7 @@ NPY_NO_EXPORT void * #step1 = &oneF, &oneD# * #step0 = &zeroF, &zeroD# */ -NPY_NO_EXPORT void +static void @name@_vecmat_via_gemm(void *ip1, npy_intp is1_n, void *ip2, npy_intp is2_n, npy_intp is2_m, void *op, npy_intp os_m, @@ -880,6 +892,11 @@ NPY_NO_EXPORT void #endif } } +#if @USEBLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif } /**end repeat**/ @@ -945,5 +962,10 @@ NPY_NO_EXPORT void #endif } } +#if @USEBLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif } /**end repeat**/ diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index b164f1dada3b..81e55deb3daf 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -31,6 +31,7 @@ from numpy.exceptions import AxisError, ComplexWarning from numpy.lib.recfunctions import repack_fields from numpy.testing import ( + BLAS_SUPPORTS_FPE, HAS_REFCOUNT, IS_64BIT, IS_PYPY, @@ -3363,6 +3364,11 @@ def test_dot(self): @pytest.mark.parametrize("dtype", [np.half, np.double, np.longdouble]) @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") def test_dot_errstate(self, dtype): + # Some dtypes use BLAS for 'dot' operation and + # not all BLAS support floating-point errors. + if not BLAS_SUPPORTS_FPE and dtype == np.double: + pytest.skip("BLAS does not support FPE") + a = np.array([1, 1], dtype=dtype) b = np.array([-np.inf, np.inf], dtype=dtype) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index d7ceaeab72cc..65f4059f98fd 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -42,6 +42,7 @@ 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', 'IS_64BIT', + 'BLAS_SUPPORTS_FPE', ] @@ -89,6 +90,15 @@ class KnownFailureException(Exception): IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON +BLAS_SUPPORTS_FPE = True +if platform.system() == 'Darwin' or platform.machine() == 'arm64': + try: + blas = np.__config__.CONFIG['Build Dependencies']['blas'] + if blas['name'] == 'accelerate': + BLAS_SUPPORTS_FPE = False + except KeyError: + pass + HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 IS_MUSL = False From 5cbc0bcebbe031140b63d2ffba06b73e471b43b2 Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Tue, 17 Jun 2025 19:57:18 -0400 Subject: [PATCH 0136/1018] ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. --- numpy/_core/src/common/npy_cpu_features.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index f15f636cdb1e..06c82fe41e27 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -846,22 +846,30 @@ npy__cpu_init_features(void) #elif defined(__riscv) && __riscv_xlen == 64 -#include +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) + #include -#ifndef HWCAP_RVV - // https://github.com/torvalds/linux/blob/v6.8/arch/riscv/include/uapi/asm/hwcap.h#L24 - #define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) + #ifndef HWCAP_RVV + // https://github.com/torvalds/linux/blob/v6.8/arch/riscv/include/uapi/asm/hwcap.h#L24 + #define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) + #endif #endif static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); - +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) +#ifdef __linux__ unsigned int hwcap = getauxval(AT_HWCAP); +#else + unsigned long hwcap; + elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap)); +#endif if (hwcap & COMPAT_HWCAP_ISA_V) { npy__cpu_have[NPY_CPU_FEATURE_RVV] = 1; } +#endif } /*********** Unsupported ARCH ***********/ From 7a74e12558789ecf0252dad56a77560db7411d13 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 19 Jun 2025 01:18:27 -0600 Subject: [PATCH 0137/1018] BUG: avoid negating unsigned integers in resize implementation (#29230) The negation of an unsigned int underflows and creates a large positive repeats, which leads to allocations failures and/or swapping. --- numpy/_core/fromnumeric.py | 3 ++- numpy/_core/tests/test_numeric.py | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 73dcd1ddc11d..e20d774d014d 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -1607,7 +1607,8 @@ def resize(a, new_shape): # First case must zero fill. The second would have repeats == 0. return np.zeros_like(a, shape=new_shape) - repeats = -(-new_size // a.size) # ceil division + # ceiling division without negating new_size + repeats = (new_size + a.size - 1) // a.size a = concatenate((a,) * repeats)[:new_size] return reshape(a, new_shape) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 8a72e4bfa65d..65da65ddc9f9 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -79,6 +79,13 @@ def test_negative_resize(self): with pytest.raises(ValueError, match=r"negative"): np.resize(A, new_shape=new_shape) + def test_unsigned_resize(self): + # ensure unsigned integer sizes don't lead to underflows + for dt_pair in [(np.int32, np.uint32), (np.int64, np.uint64)]: + arr = np.array([[23, 95], [66, 37]]) + assert_array_equal(np.resize(arr, dt_pair[0](1)), + np.resize(arr, dt_pair[1](1))) + def test_subclass(self): class MyArray(np.ndarray): __array_priority__ = 1. From 35079afa808ea0fc49bb13c878150ef20c4f64fa Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 19 Jun 2025 14:21:05 +0200 Subject: [PATCH 0138/1018] TST: Fix test that uses unininitialized memory (#29232) Tests should avoid this generally, this one is worse, since it can even fail due to warnings. --- numpy/_core/tests/test_ufunc.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 21ebc02c2625..a1cd63aec523 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2123,9 +2123,9 @@ class ArrayPriorityMinus1000b(ArrayPriorityBase): class ArrayPriorityMinus2000(ArrayPriorityBase): __array_priority__ = -2000 - x = ArrayPriorityMinus1000(2) - xb = ArrayPriorityMinus1000b(2) - y = ArrayPriorityMinus2000(2) + x = np.ones(2).view(ArrayPriorityMinus1000) + xb = np.ones(2).view(ArrayPriorityMinus1000b) + y = np.ones(2).view(ArrayPriorityMinus2000) assert np.add(x, y) is ArrayPriorityMinus1000 assert np.add(y, x) is ArrayPriorityMinus1000 From 4575abf725d9f14af73ee0442b05c01d0d46a7c1 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 19 Jun 2025 16:43:25 +0200 Subject: [PATCH 0139/1018] MAINT: bump ``ruff`` to ``0.12.0`` (#29220) --- environment.yml | 2 +- requirements/linter_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 75c6626abaf8..17de8d3eeb5e 100644 --- a/environment.yml +++ b/environment.yml @@ -45,7 +45,7 @@ dependencies: # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting - - ruff=0.11.13 + - ruff=0.12.0 - gitpython # Used in some tests - cffi diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 45319571b561..05319a9bdb8a 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,3 +1,3 @@ # keep in sync with `environment.yml` -ruff==0.11.13 +ruff==0.12.0 GitPython>=3.1.30 From 7b30ce7432d0b42bb805fbf5575b0d0ba5be98ab Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 19 Jun 2025 16:47:49 +0200 Subject: [PATCH 0140/1018] BUG: Enforce integer limitation in concatenate (#29231) * BUG: Enforce integer limitation in concatenate Concatenate internals only deal with integer many arrays, that should be fine in practice, but a SystemError (or in principle maybe also a harder crash?) is not really. * skip 32bit systems --- numpy/_core/src/multiarray/multiarraymodule.c | 11 +++++++++-- numpy/_core/tests/test_shape_base.py | 14 ++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index dcfb1226a0ab..d4766b5af7b4 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -669,10 +669,17 @@ PyArray_ConcatenateInto(PyObject *op, } /* Convert the input list into arrays */ - narrays = PySequence_Size(op); - if (narrays < 0) { + Py_ssize_t narrays_true = PySequence_Size(op); + if (narrays_true < 0) { return NULL; } + else if (narrays_true > NPY_MAX_INT) { + PyErr_Format(PyExc_ValueError, + "concatenate() only supports up to %d arrays but got %zd.", + NPY_MAX_INT, narrays_true); + return NULL; + } + narrays = (int)narrays_true; arrays = PyArray_malloc(narrays * sizeof(arrays[0])); if (arrays == NULL) { PyErr_NoMemory(); diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index f7b944be08b7..8de24278fc5d 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -1,3 +1,5 @@ +import sys + import pytest import numpy as np @@ -29,6 +31,7 @@ assert_raises, assert_raises_regex, ) +from numpy.testing._private.utils import requires_memory class TestAtleast1d: @@ -290,6 +293,17 @@ def test_exceptions(self): # No arrays to concatenate raises ValueError assert_raises(ValueError, concatenate, ()) + @pytest.mark.slow + @pytest.mark.skipif(sys.maxsize < 2**32, reason="only problematic on 64bit platforms") + @requires_memory(2 * np.iinfo(np.intc).max) + def test_huge_list_error(self): + a = np.array([1]) + max_int = np.iinfo(np.intc).max + arrs = (a,) * (max_int + 1) + msg = fr"concatenate\(\) only supports up to {max_int} arrays but got {max_int + 1}." + with pytest.raises(ValueError, match=msg): + np.concatenate(arrs) + def test_concatenate_axis_None(self): a = np.arange(4, dtype=np.float64).reshape((2, 2)) b = list(range(3)) From 1da34ea0daaedb94d2b673c70a47f4a57883a713 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 20 Jun 2025 07:43:13 +0200 Subject: [PATCH 0141/1018] DEP: Deprecate setting the strides attribute of a numpy array (#28925) Deprecate setting strides (mutating) on an array. --------- Co-authored-by: Charles Harris Co-authored-by: Sebastian Berg Co-authored-by: Joren Hammudoglu --- .../upcoming_changes/28925.deprecation.rst | 9 +++++ numpy/__init__.pyi | 3 +- numpy/_core/src/multiarray/getset.c | 22 ++++++++---- numpy/_core/tests/test_api.py | 3 +- numpy/_core/tests/test_deprecations.py | 7 ++++ numpy/_core/tests/test_half.py | 6 ++-- numpy/_core/tests/test_multiarray.py | 36 ++++++++++++------- numpy/_core/tests/test_nditer.py | 4 +-- numpy/_core/tests/test_regression.py | 23 ++++++------ numpy/lib/_npyio_impl.py | 2 +- 10 files changed, 75 insertions(+), 40 deletions(-) create mode 100644 doc/release/upcoming_changes/28925.deprecation.rst diff --git a/doc/release/upcoming_changes/28925.deprecation.rst b/doc/release/upcoming_changes/28925.deprecation.rst new file mode 100644 index 000000000000..a421839394fa --- /dev/null +++ b/doc/release/upcoming_changes/28925.deprecation.rst @@ -0,0 +1,9 @@ +Setting the ``strides`` attribute is deprecated +----------------------------------------------- +Setting the strides attribute is now deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a new view (no copy) via: +* `np.lib.stride_tricks.strided_window_view` if applicable, +* `np.lib.stride_tricks.as_strided` for the general case, +* or the `np.ndarray` constructor (``buffer`` is the original array) for a light-weight version. + diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index bcbe95accee6..2e9dea06ce6a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -217,7 +217,7 @@ from typing import ( # library include `typing_extensions` stubs: # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite -from typing_extensions import CapsuleType, TypeVar +from typing_extensions import CapsuleType, TypeVar, deprecated from numpy import ( char, @@ -2169,6 +2169,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def shape(self, value: _ShapeLike) -> None: ... @property def strides(self) -> _Shape: ... + @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") @strides.setter def strides(self, value: _ShapeLike) -> None: ... def byteswap(self, inplace: builtins.bool = ...) -> Self: ... diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 8482b6006e3e..48da52dd3178 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -85,7 +85,7 @@ array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) /* Free old dimensions and strides */ npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = nd; - ((PyArrayObject_fields *)self)->dimensions = _dimensions; + ((PyArrayObject_fields *)self)->dimensions = _dimensions; ((PyArrayObject_fields *)self)->strides = _dimensions + nd; if (nd) { @@ -95,7 +95,7 @@ array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) } else { /* Free old dimensions and strides */ - npy_free_cache_dim_array(self); + npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = 0; ((PyArrayObject_fields *)self)->dimensions = NULL; ((PyArrayObject_fields *)self)->strides = NULL; @@ -116,6 +116,19 @@ array_strides_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) static int array_strides_set(PyArrayObject *self, PyObject *obj, void *NPY_UNUSED(ignored)) { + if (obj == NULL) { + PyErr_SetString(PyExc_AttributeError, + "Cannot delete array strides"); + return -1; + } + + /* Deprecated NumPy 2.4, 2025-05-11 */ + if (DEPRECATE("Setting the strides on a NumPy array has been deprecated in NumPy 2.4.\n" + "As an alternative, you can create a new view using np.lib.stride_tricks.as_strided." + ) < 0 ) { + return -1; + } + PyArray_Dims newstrides = {NULL, -1}; PyArrayObject *new; npy_intp numbytes = 0; @@ -124,11 +137,6 @@ array_strides_set(PyArrayObject *self, PyObject *obj, void *NPY_UNUSED(ignored)) npy_intp upper_offset = 0; Py_buffer view; - if (obj == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array strides"); - return -1; - } if (!PyArray_OptionalIntpConverter(obj, &newstrides) || newstrides.len == -1) { PyErr_SetString(PyExc_TypeError, "invalid strides"); diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index d427ac0399a2..bb21d79c472d 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -5,6 +5,7 @@ import numpy as np import numpy._core.umath as ncu from numpy._core._rational_tests import rational +from numpy.lib import stride_tricks from numpy.testing import ( HAS_REFCOUNT, assert_, @@ -558,7 +559,7 @@ def check_copy_result(x, y, ccontig, fcontig, strides=False): def test_contiguous_flags(): a = np.ones((4, 4, 1))[::2, :, :] - a.strides = a.strides[:2] + (-123,) + a = stride_tricks.as_strided(a, strides=a.strides[:2] + (-123,)) b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) def check_contig(a, ccontig, fcontig): diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index c4acbf9d2d69..7d4875d6d149 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -406,6 +406,13 @@ def __array_wrap__(self, arr): self.assert_deprecated(lambda: np.negative(test2)) assert test2.called +class TestDeprecatedArrayAttributeSetting(_DeprecationTestCase): + message = "Setting the .*on a NumPy array has been deprecated.*" + + def test_deprecated_strides_set(self): + x = np.eye(2) + self.assert_deprecated(setattr, args=(x, 'strides', x.strides)) + class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): message = "Passing in a parenthesized single number" diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index e2d6e6796db4..711c13655b7a 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -21,7 +21,7 @@ class TestHalf: def setup_method(self): # An array of all possible float16 values self.all_f16 = np.arange(0x10000, dtype=uint16) - self.all_f16.dtype = float16 + self.all_f16 = self.all_f16.view(float16) # NaN value can cause an invalid FP exception if HW is being used with np.errstate(invalid='ignore'): @@ -32,7 +32,7 @@ def setup_method(self): self.nonan_f16 = np.concatenate( (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), np.arange(0x0000, 0x7c01, 1, dtype=uint16))) - self.nonan_f16.dtype = float16 + self.nonan_f16 = self.nonan_f16.view(float16) self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) @@ -218,7 +218,7 @@ def test_half_values(self): 0x0001, 0x8001, 0x0000, 0x8000, 0x7c00, 0xfc00], dtype=uint16) - b.dtype = float16 + b = b.view(dtype=float16) assert_equal(a, b) def test_half_rounding(self): diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 81e55deb3daf..0faf35c64b98 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -29,6 +29,7 @@ from numpy._core.multiarray import _get_ndarray_c_version, dot from numpy._core.tests._locales import CommaDecimalPointLocale from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib import stride_tricks from numpy.lib.recfunctions import repack_fields from numpy.testing import ( BLAS_SUPPORTS_FPE, @@ -382,7 +383,8 @@ def make_array(size, offset, strides): offset=offset * x.itemsize) except Exception as e: raise RuntimeError(e) - r.strides = strides = strides * x.itemsize + with pytest.warns(DeprecationWarning): + r.strides = strides * x.itemsize return r assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) @@ -392,24 +394,28 @@ def make_array(size, offset, strides): assert_raises(RuntimeError, make_array, 8, 3, 1) # Check that the true extent of the array is used. # Test relies on as_strided base not exposing a buffer. - x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) + x = stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) def set_strides(arr, strides): - arr.strides = strides + with pytest.warns(DeprecationWarning): + arr.strides = strides assert_raises(ValueError, set_strides, x, (10 * x.itemsize, x.itemsize)) # Test for offset calculations: - x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], + x = stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], shape=(10,), strides=(-1,)) assert_raises(ValueError, set_strides, x[::-1], -1) a = x[::-1] - a.strides = 1 - a[::2].strides = 2 + with pytest.warns(DeprecationWarning): + a.strides = 1 + with pytest.warns(DeprecationWarning): + a[::2].strides = 2 # test 0d arr_0d = np.array(0) - arr_0d.strides = () + with pytest.warns(DeprecationWarning): + arr_0d.strides = () assert_raises(TypeError, set_strides, arr_0d, None) def test_fill(self): @@ -3635,7 +3641,7 @@ def test_ravel(self): a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 - a.strides = strides + a = stride_tricks.as_strided(a, strides=strides) assert_(a.ravel(order='K').flags.owndata) assert_equal(a.ravel('K'), np.arange(0, 15, 2)) @@ -3644,7 +3650,7 @@ def test_ravel(self): a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 - a.strides = strides + a = stride_tricks.as_strided(a, strides=strides) assert_(np.may_share_memory(a.ravel(order='K'), a)) assert_equal(a.ravel(order='K'), np.arange(2**3)) @@ -3657,7 +3663,7 @@ def test_ravel(self): # 1-element tidy strides test: a = np.array([[1]]) - a.strides = (123, 432) + a = stride_tricks.as_strided(a, strides=(123, 432)) if np.ones(1).strides == (8,): assert_(np.may_share_memory(a.ravel('K'), a)) assert_equal(a.ravel('K').strides, (a.dtype.itemsize,)) @@ -4546,7 +4552,8 @@ def test_datetime64_byteorder(self): original = np.array([['2015-02-24T00:00:00.000000000']], dtype='datetime64[ns]') original_byte_reversed = original.copy(order='K') - original_byte_reversed.dtype = original_byte_reversed.dtype.newbyteorder('S') + new_dtype = original_byte_reversed.dtype.newbyteorder('S') + original_byte_reversed = original_byte_reversed.view(dtype=new_dtype) original_byte_reversed.byteswap(inplace=True) new = pickle.loads(pickle.dumps(original_byte_reversed)) @@ -8366,10 +8373,13 @@ def test_padded_struct_array(self): self._check_roundtrip(x3) @pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.") - def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')): # noqa: B008 + def test_relaxed_strides(self, c=stride_tricks.as_strided( # noqa: B008 + np.ones((1, 10, 10), dtype='i8'), # noqa: B008 + strides=(-1, 80, 8) + ) + ): # Note: c defined as parameter so that it is persistent and leak # checks will notice gh-16934 (buffer info cache leak). - c.strides = (-1, 80, 8) # strides need to be fixed at export assert_(memoryview(c).strides == (800, 80, 8)) diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index a29a49bfb71a..f71130f16331 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -858,7 +858,7 @@ def test_iter_nbo_align_contig(): # Unaligned input a = np.zeros((6 * 4 + 1,), dtype='i1')[1:] - a.dtype = 'f4' + a = a.view('f4') a[:] = np.arange(6, dtype='f4') assert_(not a.flags.aligned) # Without 'aligned', shouldn't copy @@ -1803,7 +1803,7 @@ def test_iter_buffering(): arrays.append(np.arange(10, dtype='f4')) # Unaligned array a = np.zeros((4 * 16 + 1,), dtype='i1')[1:] - a.dtype = 'i4' + a = a.view('i4') a[:] = np.arange(16, dtype='i4') arrays.append(a) # 4-D F-order array diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index fbfa9311a1dc..3d44728aafaa 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -12,6 +12,7 @@ import numpy as np from numpy._utils import asbytes, asunicode from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib.stride_tricks import as_strided from numpy.testing import ( HAS_REFCOUNT, IS_64BIT, @@ -208,7 +209,7 @@ def test_mem_dot(self): # Dummy array to detect bad memory access: _z = np.ones(10) _dummy = np.empty((0, 10)) - z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides) + z = as_strided(_z, _dummy.shape, _dummy.strides) np.dot(x, np.transpose(y), out=z) assert_equal(_z, np.ones(10)) # Do the same for the built-in dot: @@ -438,19 +439,16 @@ def test_lexsort_zerolen_custom_strides(self): xs = np.array([], dtype='i8') assert np.lexsort((xs,)).shape[0] == 0 # Works - xs.strides = (16,) + xs = as_strided(xs, strides=(16,)) assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError def test_lexsort_zerolen_custom_strides_2d(self): xs = np.array([], dtype='i8') + xt = as_strided(xs, shape=(0, 2), strides=(16, 16)) + assert np.lexsort((xt,), axis=0).shape[0] == 0 - xs.shape = (0, 2) - xs.strides = (16, 16) - assert np.lexsort((xs,), axis=0).shape[0] == 0 - - xs.shape = (2, 0) - xs.strides = (16, 16) - assert np.lexsort((xs,), axis=0).shape[0] == 2 + xt = as_strided(xs, shape=(2, 0), strides=(16, 16)) + assert np.lexsort((xt,), axis=0).shape[0] == 2 def test_lexsort_invalid_axis(self): assert_raises(AxisError, np.lexsort, (np.arange(1),), axis=2) @@ -644,7 +642,7 @@ def test_reshape_order(self): def test_reshape_zero_strides(self): # Issue #380, test reshaping of zero strided arrays a = np.ones(1) - a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) + a = as_strided(a, shape=(5,), strides=(0,)) assert_(a.reshape(5, 1).strides[0] == 0) def test_reshape_zero_size(self): @@ -1654,7 +1652,7 @@ def test_eq_string_and_object_array(self): def test_nonzero_byteswap(self): a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) - a.dtype = np.float32 + a = a.view(np.float32) assert_equal(a.nonzero()[0], [1]) a = a.byteswap() a = a.view(a.dtype.newbyteorder()) @@ -1878,7 +1876,8 @@ def test_alignment_update(self): # Check that alignment flag is updated on stride setting a = np.arange(10) assert_(a.flags.aligned) - a.strides = 3 + with pytest.warns(DeprecationWarning): + a.strides = 3 assert_(not a.flags.aligned) def test_ticket_1770(self): diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index f284eeb74834..36ead97a1aae 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1730,7 +1730,7 @@ def fromregex(file, regexp, dtype, encoding=None): # re-interpret as a single-field structured array. newdtype = np.dtype(dtype[dtype.names[0]]) output = np.array(seq, dtype=newdtype) - output.dtype = dtype + output = output.view(dtype) else: output = np.array(seq, dtype=dtype) From 20d034fff6931b97780e9f22172309d81a5e8322 Mon Sep 17 00:00:00 2001 From: Koki Watanabe <56009584+math-hiyoko@users.noreply.github.com> Date: Fri, 20 Jun 2025 22:56:31 +0900 Subject: [PATCH 0142/1018] ENH: np.unique: support hash based unique for string dtype (#28767) * Support NPY_STRING, NPY_UNICODE * unique for NPY_STRING and NPY_UNICODE * fix construct array * remove unneccessary include * refactor * refactoring * comment * feature: unique for NPY_VSTRING * refactoring * remove unneccessary include * add test * add error message * linter * linter * reserve bucket * remove emoji from testcase * fix testcase * remove error * fix testcase * fix testcase name * use basic_string * fix testcase * add ValueError * fix testcase * fix memory error * remove multibyte char * refactoring * add multibyte char * refactoring * fix memory error * fix GIL * fix strlen * remove PyArray_GETPTR1 * refactoring * refactoring * use optional * refactoring * refactoring * refactoring * refactoring * fix comment * linter * add doc * DOC: fix * DOC: fix format * MNT: refactoring * MNT: refactoring * ENH: Store pointers to strings in the set instead of the strings themselves. * FIX: length in memcmp * ENH: refactoring * DOC: 49sec -> 34sec * Update numpy/lib/_arraysetops_impl.py Co-authored-by: Nathan Goldbaum * DOC: Mention that hash-based np.unique returns unsorted strings * ENH: support medium and long vstrings * FIX: comment * ENH: use RAII wrapper * FIX: error handling of string packing * FIX: error handling of string packing * FIX: change default bucket size * FIX: include * FIX: cast * ENH: support equal_nan=False * FIX: function equal * FIX: check the case if pack_status douesn't return NULL * FIX: check the case if pack_status douesn't return NULL * FIX: stderr * ENH: METH_VARARGS -> METH_FASTCALL * FIX: log * FIX: release allocator * FIX: comment * FIX: delete log * ENH: implemented FNV-1a as hash function * bool -> npy_bool * FIX: cast * 34sec -> 35.1sec * fix: lint * fix: cast using const void * * fix: fix fnv1a hash * fix: lint * 35.1sec -> 33.5sec * enh: define macro HASH_TABLE_INITIAL_BUCKETS * enh: error handling of NpyString_load * enh: delete comments on GIL * fix: PyErr_SetString when NpyString_load failed * fix: PyErr_SetString -> npy_gil_error --------- Co-authored-by: Nathan Goldbaum --- doc/release/upcoming_changes/28767.change.rst | 10 + .../upcoming_changes/28767.performance.rst | 10 + numpy/_core/meson.build | 1 + numpy/_core/src/multiarray/fnv.c | 85 ++++ numpy/_core/src/multiarray/fnv.h | 26 ++ numpy/_core/src/multiarray/multiarraymodule.c | 2 +- numpy/_core/src/multiarray/unique.cpp | 368 ++++++++++++++---- numpy/_core/src/multiarray/unique.h | 3 +- numpy/lib/_arraysetops_impl.py | 3 +- numpy/lib/tests/test_arraysetops.py | 186 ++++++++- 10 files changed, 605 insertions(+), 89 deletions(-) create mode 100644 doc/release/upcoming_changes/28767.change.rst create mode 100644 doc/release/upcoming_changes/28767.performance.rst create mode 100644 numpy/_core/src/multiarray/fnv.c create mode 100644 numpy/_core/src/multiarray/fnv.h diff --git a/doc/release/upcoming_changes/28767.change.rst b/doc/release/upcoming_changes/28767.change.rst new file mode 100644 index 000000000000..ec173c3672b0 --- /dev/null +++ b/doc/release/upcoming_changes/28767.change.rst @@ -0,0 +1,10 @@ +``unique_values`` for string dtypes may return unsorted data +------------------------------------------------------------ +np.unique now supports hash‐based duplicate removal for string dtypes. +This enhancement extends the hash-table algorithm to byte strings ('S'), +Unicode strings ('U'), and the experimental string dtype ('T', StringDType). +As a result, calling np.unique() on an array of strings will use +the faster hash-based method to obtain unique values. +Note that this hash-based method does not guarantee that the returned unique values will be sorted. +This also works for StringDType arrays containing None (missing values) +when using equal_nan=True (treating missing values as equal). diff --git a/doc/release/upcoming_changes/28767.performance.rst b/doc/release/upcoming_changes/28767.performance.rst new file mode 100644 index 000000000000..ef8ac1c3a45d --- /dev/null +++ b/doc/release/upcoming_changes/28767.performance.rst @@ -0,0 +1,10 @@ +Performance improvements to ``np.unique`` for string dtypes +----------------------------------------------------------- +The hash-based algorithm for unique extraction provides +an order-of-magnitude speedup on large string arrays. +In an internal benchmark with about 1 billion string elements, +the hash-based np.unique completed in roughly 33.5 seconds, +compared to 498 seconds with the sort-based method +– about 15× faster for unsorted unique operations on strings. +This improvement greatly reduces the time to find unique values +in very large string datasets. diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index cd46a20b0246..6098986618e4 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1207,6 +1207,7 @@ src_multiarray = multiarray_gen_headers + [ # Remove this `arm64_exports.c` file once scipy macos arm64 build correctly # links to the arm64 npymath library, see gh-22673 'src/npymath/arm64_exports.c', + 'src/multiarray/fnv.c', ] src_umath = umath_gen_headers + [ diff --git a/numpy/_core/src/multiarray/fnv.c b/numpy/_core/src/multiarray/fnv.c new file mode 100644 index 000000000000..2b7848519e61 --- /dev/null +++ b/numpy/_core/src/multiarray/fnv.c @@ -0,0 +1,85 @@ +/* + FNV-1a hash algorithm implementation + Based on the implementation from: + https://github.com/lcn2/fnv +*/ + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include +#include "numpy/npy_common.h" +#include "fnv.h" + + +#define FNV1A_32_INIT ((npy_uint32)0x811c9dc5) +#define FNV1A_64_INIT ((npy_uint64)0xcbf29ce484222325ULL) + +/* + Compute a 32-bit FNV-1a hash of buffer + original implementation from: + https://github.com/lcn2/fnv/blob/b7fcbee95538ee6a15744e756e7e7f1c02862cb0/hash_32a.c +*/ +npy_uint32 +npy_fnv1a_32(const void *buf, size_t len, npy_uint32 hval) +{ + const unsigned char *bp = (const unsigned char *)buf; /* start of buffer */ + const unsigned char *be = bp + len; /* beyond end of buffer */ + + /* + FNV-1a hash each octet in the buffer + */ + while (bp < be) { + + /* xor the bottom with the current octet */ + hval ^= (npy_uint32)*bp++; + + /* multiply by the 32 bit FNV magic prime */ + /* hval *= 0x01000193; */ + hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24); + } + + return hval; +} + +/* + Compute a 64-bit FNV-1a hash of the given data + original implementation from: + https://github.com/lcn2/fnv/blob/b7fcbee95538ee6a15744e756e7e7f1c02862cb0/hash_64a.c +*/ +npy_uint64 +npy_fnv1a_64(const void *buf, size_t len, npy_uint64 hval) +{ + const unsigned char *bp = (const unsigned char *)buf; /* start of buffer */ + const unsigned char *be = bp + len; /* beyond end of buffer */ + + /* + FNV-1a hash each octet in the buffer + */ + while (bp < be) { + + /* xor the bottom with the current octet */ + hval ^= (npy_uint64)*bp++; + + /* multiply by the 64 bit FNV magic prime */ + /* hval *= 0x100000001b3ULL; */ + hval += (hval << 1) + (hval << 4) + (hval << 5) + + (hval << 7) + (hval << 8) + (hval << 40); + } + + return hval; +} + +/* + * Compute a size_t FNV-1a hash of the given data + * This will use 32-bit or 64-bit hash depending on the size of size_t + */ +size_t +npy_fnv1a(const void *buf, size_t len) +{ +#if NPY_SIZEOF_SIZE_T == 8 + return (size_t)npy_fnv1a_64(buf, len, FNV1A_64_INIT); +#else /* NPY_SIZEOF_SIZE_T == 4 */ + return (size_t)npy_fnv1a_32(buf, len, FNV1A_32_INIT); +#endif +} diff --git a/numpy/_core/src/multiarray/fnv.h b/numpy/_core/src/multiarray/fnv.h new file mode 100644 index 000000000000..c76f54a645b9 --- /dev/null +++ b/numpy/_core/src/multiarray/fnv.h @@ -0,0 +1,26 @@ +/* + FNV-1a hash algorithm implementation + Based on the implementation from: + https://github.com/lcn2/fnv +*/ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ + + +/* + Compute a size_t FNV-1a hash of the given data + This will use 32-bit or 64-bit hash depending on the size of size_t + + Parameters: + ----------- + buf - pointer to the data to be hashed + len - length of the data in bytes + + Returns: + ----------- + size_t hash value +*/ +size_t npy_fnv1a(const void *buf, size_t len); + +#endif // NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index d4766b5af7b4..e80c6c0cd45c 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4579,7 +4579,7 @@ static struct PyMethodDef array_module_methods[] = { {"from_dlpack", (PyCFunction)from_dlpack, METH_FASTCALL | METH_KEYWORDS, NULL}, {"_unique_hash", (PyCFunction)array__unique_hash, - METH_O, "Collect unique values via a hash map."}, + METH_FASTCALL | METH_KEYWORDS, "Collect unique values via a hash map."}, {NULL, NULL, 0, NULL} /* sentinel */ }; diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index f36acfdef49a..636f1ef0137c 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -1,13 +1,21 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +#define HASH_TABLE_INITIAL_BUCKETS 1024 #include -#include +#include +#include #include +#include #include #include "numpy/arrayobject.h" +#include "gil_utils.h" +extern "C" { + #include "fnv.h" + #include "npy_argparse.h" +} // This is to use RAII pattern to handle cpp exceptions while avoiding memory leaks. // Adapted from https://stackoverflow.com/a/25510879/2536294 @@ -18,77 +26,128 @@ struct FinalAction { private: F clean_; }; - template FinalAction finally(F f) { return FinalAction(f); } -template +template static PyObject* -unique(PyArrayObject *self) +unique_integer(PyArrayObject *self, npy_bool equal_nan) { - /* This function takes a numpy array and returns a numpy array containing - the unique values. - - It assumes the numpy array includes data that can be viewed as unsigned integers - of a certain size (sizeof(T)). - - It doesn't need to know the actual type, since it needs to find unique values - among binary representations of the input data. This means it won't apply to - custom or complicated dtypes or string values. + /* + * Returns a new NumPy array containing the unique values of the input array of integer. + * This function uses hashing to identify uniqueness efficiently. */ NPY_ALLOW_C_API_DEF; - std::unordered_set hashset; - - NpyIter *iter = NpyIter_New(self, NPY_ITER_READONLY | - NPY_ITER_EXTERNAL_LOOP | - NPY_ITER_REFS_OK | - NPY_ITER_ZEROSIZE_OK | - NPY_ITER_GROWINNER, - NPY_KEEPORDER, NPY_NO_CASTING, - NULL); - // Making sure the iterator is deallocated when the function returns, with - // or w/o an exception - auto iter_dealloc = finally([&]() { NpyIter_Deallocate(iter); }); - if (iter == NULL) { - return NULL; + NPY_ALLOW_C_API; + PyArray_Descr *descr = PyArray_DESCR(self); + Py_INCREF(descr); + NPY_DISABLE_C_API; + + PyThreadState *_save1 = PyEval_SaveThread(); + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + + // Reserve hashset capacity in advance to minimize reallocations and collisions. + // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: + // - Reserving for all elements (isize) may over-allocate when there are few unique values. + // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). + // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 + std::unordered_set hashset(std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS)); + + // Input array is one-dimensional, enabling efficient iteration using strides. + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert(*(T *)idata); } - NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { + npy_intp length = hashset.size(); + + PyEval_RestoreThread(_save1); + NPY_ALLOW_C_API; + PyObject *res_obj = PyArray_NewFromDescr( + &PyArray_Type, + descr, + 1, // ndim + &length, // shape + NULL, // strides + NULL, // data + // This flag is needed to be able to call .sort on it. + NPY_ARRAY_WRITEABLE, // flags + NULL // obj + ); + + if (res_obj == NULL) { return NULL; } - char **dataptr = NpyIter_GetDataPtrArray(iter); - npy_intp *strideptr = NpyIter_GetInnerStrideArray(iter); - npy_intp *innersizeptr = NpyIter_GetInnerLoopSizePtr(iter); - - // release the GIL - PyThreadState *_save; - _save = PyEval_SaveThread(); - // Making sure the GIL is re-acquired when the function returns, with - // or w/o an exception - auto grab_gil = finally([&]() { PyEval_RestoreThread(_save); }); - // first we put the data in a hash map - - if (NpyIter_GetIterSize(iter) > 0) { - do { - char* data = *dataptr; - npy_intp stride = *strideptr; - npy_intp count = *innersizeptr; - - while (count--) { - hashset.insert(*((T *) data)); - data += stride; - } - } while (iternext(iter)); + NPY_DISABLE_C_API; + PyThreadState *_save2 = PyEval_SaveThread(); + auto save2_dealloc = finally([&]() { + PyEval_RestoreThread(_save2); + }); + + char *odata = PyArray_BYTES((PyArrayObject *)res_obj); + npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; + // Output array is one-dimensional, enabling efficient iteration using strides. + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + *(T *)odata = *it; } - npy_intp length = hashset.size(); + return res_obj; +} +template +static PyObject* +unique_string(PyArrayObject *self, npy_bool equal_nan) +{ + /* + * Returns a new NumPy array containing the unique values of the input array of fixed size strings. + * This function uses hashing to identify uniqueness efficiently. + */ + NPY_ALLOW_C_API_DEF; NPY_ALLOW_C_API; PyArray_Descr *descr = PyArray_DESCR(self); Py_INCREF(descr); + NPY_DISABLE_C_API; + + PyThreadState *_save1 = PyEval_SaveThread(); + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + + // variables for the string + npy_intp itemsize = descr->elsize; + npy_intp num_chars = itemsize / sizeof(T); + auto hash = [num_chars](const T *value) -> size_t { + return npy_fnv1a(value, num_chars * sizeof(T)); + }; + auto equal = [itemsize](const T *lhs, const T *rhs) -> bool { + return std::memcmp(lhs, rhs, itemsize) == 0; + }; + + // Reserve hashset capacity in advance to minimize reallocations and collisions. + // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: + // - Reserving for all elements (isize) may over-allocate when there are few unique values. + // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). + // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 + std::unordered_set hashset( + std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal + ); + + // Input array is one-dimensional, enabling efficient iteration using strides. + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert((T *)idata); + } + + npy_intp length = hashset.size(); + + PyEval_RestoreThread(_save1); + NPY_ALLOW_C_API; PyObject *res_obj = PyArray_NewFromDescr( &PyArray_Type, descr, @@ -100,18 +159,147 @@ unique(PyArrayObject *self) NPY_ARRAY_WRITEABLE, // flags NULL // obj ); + + if (res_obj == NULL) { + return NULL; + } NPY_DISABLE_C_API; + PyThreadState *_save2 = PyEval_SaveThread(); + auto save2_dealloc = finally([&]() { + PyEval_RestoreThread(_save2); + }); + + char *odata = PyArray_BYTES((PyArrayObject *)res_obj); + npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; + // Output array is one-dimensional, enabling efficient iteration using strides. + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + std::memcpy(odata, *it, itemsize); + } + + return res_obj; +} + +static PyObject* +unique_vstring(PyArrayObject *self, npy_bool equal_nan) +{ + /* + * Returns a new NumPy array containing the unique values of the input array. + * This function uses hashing to identify uniqueness efficiently. + */ + NPY_ALLOW_C_API_DEF; + NPY_ALLOW_C_API; + PyArray_Descr *descr = PyArray_DESCR(self); + Py_INCREF(descr); + NPY_DISABLE_C_API; + + PyThreadState *_save1 = PyEval_SaveThread(); + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + + // variables for the vstring + npy_string_allocator *in_allocator = NpyString_acquire_allocator((PyArray_StringDTypeObject *)descr); + auto hash = [equal_nan](const npy_static_string *value) -> size_t { + if (value->buf == NULL) { + if (equal_nan) { + return 0; + } else { + return std::hash{}(value); + } + } + return npy_fnv1a(value->buf, value->size * sizeof(char)); + }; + auto equal = [equal_nan](const npy_static_string *lhs, const npy_static_string *rhs) -> bool { + if (lhs->buf == NULL && rhs->buf == NULL) { + if (equal_nan) { + return true; + } else { + return lhs == rhs; + } + } + if (lhs->buf == NULL || rhs->buf == NULL) { + return false; + } + if (lhs->size != rhs->size) { + return false; + } + return std::memcmp(lhs->buf, rhs->buf, lhs->size) == 0; + }; + // Reserve hashset capacity in advance to minimize reallocations and collisions. + // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: + // - Reserving for all elements (isize) may over-allocate when there are few unique values. + // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). + // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 + std::unordered_set hashset( + std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal + ); + + // Input array is one-dimensional, enabling efficient iteration using strides. + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + // unpacked_strings need to be allocated outside of the loop because of the lifetime problem. + std::vector unpacked_strings(isize, {0, NULL}); + for (npy_intp i = 0; i < isize; i++, idata += istride) { + npy_packed_static_string *packed_string = (npy_packed_static_string *)idata; + int is_null = NpyString_load(in_allocator, packed_string, &unpacked_strings[i]); + if (is_null == -1) { + npy_gil_error(PyExc_RuntimeError, + "Failed to load string from packed static string. "); + return NULL; + } + hashset.insert(&unpacked_strings[i]); + } + + NpyString_release_allocator(in_allocator); + + npy_intp length = hashset.size(); + + PyEval_RestoreThread(_save1); + NPY_ALLOW_C_API; + PyObject *res_obj = PyArray_NewFromDescr( + &PyArray_Type, + descr, + 1, // ndim + &length, // shape + NULL, // strides + NULL, // data + // This flag is needed to be able to call .sort on it. + NPY_ARRAY_WRITEABLE, // flags + NULL // obj + ); if (res_obj == NULL) { return NULL; } + PyArray_Descr *res_descr = PyArray_DESCR((PyArrayObject *)res_obj); + Py_INCREF(res_descr); + NPY_DISABLE_C_API; + + PyThreadState *_save2 = PyEval_SaveThread(); + auto save2_dealloc = finally([&]() { + PyEval_RestoreThread(_save2); + }); + + npy_string_allocator *out_allocator = NpyString_acquire_allocator((PyArray_StringDTypeObject *)res_descr); + auto out_allocator_dealloc = finally([&]() { + NpyString_release_allocator(out_allocator); + }); - // then we iterate through the map's keys to get the unique values - T* data = (T *)PyArray_DATA((PyArrayObject *)res_obj); - auto it = hashset.begin(); - size_t i = 0; - for (; it != hashset.end(); it++, i++) { - data[i] = *it; + char *odata = PyArray_BYTES((PyArrayObject *)res_obj); + npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; + // Output array is one-dimensional, enabling efficient iteration using strides. + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + npy_packed_static_string *packed_string = (npy_packed_static_string *)odata; + int pack_status = 0; + if ((*it)->buf == NULL) { + pack_status = NpyString_pack_null(out_allocator, packed_string); + } else { + pack_status = NpyString_pack(out_allocator, packed_string, (*it)->buf, (*it)->size); + } + if (pack_status == -1) { + // string packing failed + return NULL; + } } return res_obj; @@ -119,27 +307,30 @@ unique(PyArrayObject *self) // this map contains the functions used for each item size. -typedef std::function function_type; +typedef std::function function_type; std::unordered_map unique_funcs = { - {NPY_BYTE, unique}, - {NPY_UBYTE, unique}, - {NPY_SHORT, unique}, - {NPY_USHORT, unique}, - {NPY_INT, unique}, - {NPY_UINT, unique}, - {NPY_LONG, unique}, - {NPY_ULONG, unique}, - {NPY_LONGLONG, unique}, - {NPY_ULONGLONG, unique}, - {NPY_INT8, unique}, - {NPY_INT16, unique}, - {NPY_INT32, unique}, - {NPY_INT64, unique}, - {NPY_UINT8, unique}, - {NPY_UINT16, unique}, - {NPY_UINT32, unique}, - {NPY_UINT64, unique}, - {NPY_DATETIME, unique}, + {NPY_BYTE, unique_integer}, + {NPY_UBYTE, unique_integer}, + {NPY_SHORT, unique_integer}, + {NPY_USHORT, unique_integer}, + {NPY_INT, unique_integer}, + {NPY_UINT, unique_integer}, + {NPY_LONG, unique_integer}, + {NPY_ULONG, unique_integer}, + {NPY_LONGLONG, unique_integer}, + {NPY_ULONGLONG, unique_integer}, + {NPY_INT8, unique_integer}, + {NPY_INT16, unique_integer}, + {NPY_INT32, unique_integer}, + {NPY_INT64, unique_integer}, + {NPY_UINT8, unique_integer}, + {NPY_UINT16, unique_integer}, + {NPY_UINT32, unique_integer}, + {NPY_UINT64, unique_integer}, + {NPY_DATETIME, unique_integer}, + {NPY_STRING, unique_string}, + {NPY_UNICODE, unique_string}, + {NPY_VSTRING, unique_vstring}, }; @@ -154,14 +345,21 @@ std::unordered_map unique_funcs = { * type is unsupported or `NULL` with an error set. */ extern "C" NPY_NO_EXPORT PyObject * -array__unique_hash(PyObject *NPY_UNUSED(module), PyObject *arr_obj) +array__unique_hash(PyObject *NPY_UNUSED(module), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - if (!PyArray_Check(arr_obj)) { - PyErr_SetString(PyExc_TypeError, - "_unique_hash() requires a NumPy array input."); + PyArrayObject *arr = NULL; + npy_bool equal_nan = NPY_TRUE; // default to True + + NPY_PREPARE_ARGPARSER; + if (npy_parse_arguments("_unique_hash", args, len_args, kwnames, + "arr", &PyArray_Converter, &arr, + "|equal_nan", &PyArray_BoolConverter, &equal_nan, + NULL, NULL, NULL + ) < 0 + ) { return NULL; } - PyArrayObject *arr = (PyArrayObject *)arr_obj; try { auto type = PyArray_TYPE(arr); @@ -170,7 +368,7 @@ array__unique_hash(PyObject *NPY_UNUSED(module), PyObject *arr_obj) Py_RETURN_NOTIMPLEMENTED; } - return unique_funcs[type](arr); + return unique_funcs[type](arr, equal_nan); } catch (const std::bad_alloc &e) { PyErr_NoMemory(); diff --git a/numpy/_core/src/multiarray/unique.h b/numpy/_core/src/multiarray/unique.h index 3e258405e8f4..7b3fb143ada4 100644 --- a/numpy/_core/src/multiarray/unique.h +++ b/numpy/_core/src/multiarray/unique.h @@ -5,7 +5,8 @@ extern "C" { #endif -PyObject* array__unique_hash(PyObject *NPY_UNUSED(dummy), PyObject *args); +PyObject* array__unique_hash(PyObject *NPY_UNUSED(dummy), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames); #ifdef __cplusplus } diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index ef0739ba486f..c4788385b924 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -368,7 +368,8 @@ def _unique1d(ar, return_index=False, return_inverse=False, conv = _array_converter(ar) ar_, = conv - if (hash_unique := _unique_hash(ar_)) is not NotImplemented: + if (hash_unique := _unique_hash(ar_, equal_nan=equal_nan)) \ + is not NotImplemented: if sorted: hash_unique.sort() # We wrap the result back in case it was a subclass of numpy.ndarray. diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 7865e1b16ee9..b3e2bfa279b0 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -5,6 +5,7 @@ import numpy as np from numpy import ediff1d, intersect1d, isin, setdiff1d, setxor1d, union1d, unique +from numpy.dtypes import StringDType from numpy.exceptions import AxisError from numpy.testing import ( assert_array_equal, @@ -813,7 +814,9 @@ def test_unique_1d(self): def test_unique_zero_sized(self): # test for zero-sized arrays - for dt in self.get_types(): + types = self.get_types() + types.extend('SU') + for dt in types: a = np.array([], dt) b = np.array([], dt) i1 = np.array([], np.int64) @@ -838,6 +841,187 @@ class Subclass(np.ndarray): bb = Subclass(b.shape, dtype=dt, buffer=b) self.check_all(aa, bb, i1, i2, c, dt) + def test_unique_byte_string_hash_based(self): + # test for byte string arrays + arr = ['apple', 'banana', 'apple', 'cherry', 'date', 'banana', 'fig', 'grape'] + unq_sorted = ['apple', 'banana', 'cherry', 'date', 'fig', 'grape'] + + a1 = unique(arr, sorted=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + assert_array_equal(sorted(a1.tolist()), unq_sorted) + + def test_unique_unicode_string_hash_based(self): + # test for unicode string arrays + arr = [ + 'café', 'cafe', 'café', 'naïve', 'naive', + 'résumé', 'naïve', 'resume', 'résumé', + ] + unq_sorted = ['cafe', 'café', 'naive', 'naïve', 'resume', 'résumé'] + + a1 = unique(arr, sorted=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + assert_array_equal(sorted(a1.tolist()), unq_sorted) + + def test_unique_vstring_hash_based_equal_nan(self): + # test for unicode and nullable string arrays (equal_nan=True) + a = np.array([ + # short strings + 'straße', + None, + 'strasse', + 'straße', + None, + 'niño', + 'nino', + 'élève', + 'eleve', + 'niño', + 'élève', + # medium strings + 'b' * 20, + 'ß' * 30, + None, + 'é' * 30, + 'e' * 20, + 'ß' * 30, + 'n' * 30, + 'ñ' * 20, + None, + 'e' * 20, + 'ñ' * 20, + # long strings + 'b' * 300, + 'ß' * 400, + None, + 'é' * 400, + 'e' * 300, + 'ß' * 400, + 'n' * 400, + 'ñ' * 300, + None, + 'e' * 300, + 'ñ' * 300, + ], + dtype=StringDType(na_object=None) + ) + unq_sorted_wo_none = [ + 'b' * 20, + 'b' * 300, + 'e' * 20, + 'e' * 300, + 'eleve', + 'nino', + 'niño', + 'n' * 30, + 'n' * 400, + 'strasse', + 'straße', + 'ß' * 30, + 'ß' * 400, + 'élève', + 'é' * 30, + 'é' * 400, + 'ñ' * 20, + 'ñ' * 300, + ] + + a1 = unique(a, sorted=False, equal_nan=True) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + + # a1 should have exactly one None + count_none = sum(x is None for x in a1) + assert_equal(count_none, 1) + + a1_wo_none = sorted(x for x in a1 if x is not None) + assert_array_equal(a1_wo_none, unq_sorted_wo_none) + + def test_unique_vstring_hash_based_not_equal_nan(self): + # test for unicode and nullable string arrays (equal_nan=False) + a = np.array([ + # short strings + 'straße', + None, + 'strasse', + 'straße', + None, + 'niño', + 'nino', + 'élève', + 'eleve', + 'niño', + 'élève', + # medium strings + 'b' * 20, + 'ß' * 30, + None, + 'é' * 30, + 'e' * 20, + 'ß' * 30, + 'n' * 30, + 'ñ' * 20, + None, + 'e' * 20, + 'ñ' * 20, + # long strings + 'b' * 300, + 'ß' * 400, + None, + 'é' * 400, + 'e' * 300, + 'ß' * 400, + 'n' * 400, + 'ñ' * 300, + None, + 'e' * 300, + 'ñ' * 300, + ], + dtype=StringDType(na_object=None) + ) + unq_sorted_wo_none = [ + 'b' * 20, + 'b' * 300, + 'e' * 20, + 'e' * 300, + 'eleve', + 'nino', + 'niño', + 'n' * 30, + 'n' * 400, + 'strasse', + 'straße', + 'ß' * 30, + 'ß' * 400, + 'élève', + 'é' * 30, + 'é' * 400, + 'ñ' * 20, + 'ñ' * 300, + ] + + a1 = unique(a, sorted=False, equal_nan=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + + # a1 should have exactly one None + count_none = sum(x is None for x in a1) + assert_equal(count_none, 6) + + a1_wo_none = sorted(x for x in a1 if x is not None) + assert_array_equal(a1_wo_none, unq_sorted_wo_none) + + def test_unique_vstring_errors(self): + a = np.array( + [ + 'apple', 'banana', 'apple', None, 'cherry', + 'date', 'banana', 'fig', None, 'grape', + ] * 2, + dtype=StringDType(na_object=None) + ) + assert_raises(ValueError, unique, a, equal_nan=False) + @pytest.mark.parametrize("arg", ["return_index", "return_inverse", "return_counts"]) def test_unsupported_hash_based(self, arg): """These currently never use the hash-based solution. However, From 7c91551b85650c5465d21956acf66a47d9d0d02b Mon Sep 17 00:00:00 2001 From: specsy Date: Fri, 20 Jun 2025 20:29:10 +0530 Subject: [PATCH 0143/1018] DOC: Update CONTRIBUTING.rst (#28158) * Update CONTRIBUTING.rst fixes #19778 Updating the contibution section so that contributors avoid doing mkstakes while asking questions, instead they focus on doing contribution and work on project right after. * Update CONTRIBUTING.rst Shortened the length of the sentence. * Update CONTRIBUTING.rst --- CONTRIBUTING.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 6e019983a0a2..0919790c65d1 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -7,8 +7,9 @@ Whether you're new to open source or experienced, your contributions help us grow. Pull requests (PRs) are always welcome, but making a PR is just the -start. Please respond to comments and requests for changes to help -move the process forward. Please follow our +start. Please respond to comments and requests for changes to help move the process forward. +Skip asking for an issue to be assigned to you on GitHub—send in your PR, explain what you did and ask for a review. It makes collaboration and support much easier. +Please follow our `Code of Conduct `__, which applies to all interactions, including issues and PRs. From e3e875ab478a855d2c4198df652f1dc34fc09f17 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 20 Jun 2025 21:28:38 +0200 Subject: [PATCH 0144/1018] MAIN: Enable linting with E501 --- numpy/_core/tests/test_api.py | 9 ++++--- numpy/_core/tests/test_shape_base.py | 9 ++++--- numpy/lib/tests/test_function_base.py | 36 ++++++++++++++++----------- ruff.toml | 4 +-- 4 files changed, 33 insertions(+), 25 deletions(-) diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index bb21d79c472d..da4a8f423bc5 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -591,11 +591,12 @@ def check_contig(a, ccontig, fcontig): def test_broadcast_arrays(): # Test user defined dtypes - a = np.array([(1, 2, 3)], dtype='u4,u4,u4') - b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + dtype = 'u4,u4,u4' + a = np.array([(1, 2, 3)], dtype=dtype) + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype) result = np.broadcast_arrays(a, b) - assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) - assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype=dtype)) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype)) @pytest.mark.parametrize(["shape", "fill_value", "expected_output"], [((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])), diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 8de24278fc5d..9a6febaf51f0 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -294,13 +294,15 @@ def test_exceptions(self): assert_raises(ValueError, concatenate, ()) @pytest.mark.slow - @pytest.mark.skipif(sys.maxsize < 2**32, reason="only problematic on 64bit platforms") + @pytest.mark.skipif(sys.maxsize < 2**32, + reason="only problematic on 64bit platforms") @requires_memory(2 * np.iinfo(np.intc).max) def test_huge_list_error(self): a = np.array([1]) max_int = np.iinfo(np.intc).max arrs = (a,) * (max_int + 1) - msg = fr"concatenate\(\) only supports up to {max_int} arrays but got {max_int + 1}." + msg = (fr"concatenate\(\) only supports up to {max_int} arrays" + f" but got {max_int + 1}.") with pytest.raises(ValueError, match=msg): np.concatenate(arrs) @@ -379,7 +381,8 @@ def test_concatenate(self): assert_(out is rout) assert_equal(res, rout) - @pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython") + @pytest.mark.skipif(IS_PYPY, + reason="PYPY handles sq_concat, nb_add differently than cpython") def test_operator_concat(self): import operator a = array([1, 2]) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index f2dba193c849..8f79e3d88b8c 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -65,6 +65,7 @@ suppress_warnings, ) +np_floats = [np.half, np.single, np.double, np.longdouble] def get_mat(n): data = np.arange(n) @@ -309,7 +310,7 @@ def test_basic(self): def test_order(self): # It turns out that people rely on np.copy() preserving order by # default; changing this broke scikit-learn: - # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 + # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa: E501 a = np.array([[1, 2], [3, 4]]) assert_(a.flags.c_contiguous) assert_(not a.flags.f_contiguous) @@ -2520,7 +2521,7 @@ def test_extreme(self): assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) assert_(np.all(np.abs(c) <= 1.0)) - @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + @pytest.mark.parametrize("test_type", np_floats) def test_corrcoef_dtype(self, test_type): cast_A = self.A.astype(test_type) res = corrcoef(cast_A, dtype=test_type) @@ -2626,7 +2627,7 @@ def test_unit_fweights_and_aweights(self): aweights=self.unit_weights), self.res1) - @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + @pytest.mark.parametrize("test_type", np_floats) def test_cov_dtype(self, test_type): cast_x1 = self.x1.astype(test_type) res = cov(cast_x1, dtype=test_type) @@ -2648,7 +2649,8 @@ def test_simple(self): # need at least one test above 8, as the implementation is piecewise A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0]) - expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049, 2815.71662847]) + expected = np.array([1.06307822, 1.12518299, 1.01214991, + 1.00006049, 2815.71662847]) assert_almost_equal(i0(A), expected) assert_almost_equal(i0(-A), expected) @@ -3141,23 +3143,27 @@ def test_non_finite_any_nan(self, sc): def test_non_finite_inf(self, sc): """ Test that interp between opposite infs gives nan """ - assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan)) + inf = np.inf + nan = np.nan + assert_equal(np.interp(0.5, [-inf, +inf], sc([ 0, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-inf, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+inf, -inf])), sc(nan)) # unless the y values are equal assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) def test_non_finite_half_inf_xf(self, sc): """ Test that interp where both axes have a bound at inf gives nan """ - assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan)) + inf = np.inf + nan = np.nan + assert_equal(np.interp(0.5, [-inf, 1], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, +inf])), sc(nan)) def test_non_finite_half_inf_x(self, sc): """ Test interp where the x axis has a bound at inf """ diff --git a/ruff.toml b/ruff.toml index 7454c6c05e5b..70bef8fb095d 100644 --- a/ruff.toml +++ b/ruff.toml @@ -73,7 +73,6 @@ ignore = [ "test*.py" = ["B015", "B018", "E201", "E714"] "benchmarks/benchmarks/bench_linalg.py" = ["E501"] -"numpy/_core/tests/test_api.py" = ["E501"] "numpy/_core/tests/test_arrayprint.py" = ["E501"] "numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] "numpy/_core/tests/test_cpu_features.py" = ["E501"] @@ -88,13 +87,12 @@ ignore = [ "numpy/_core/tests/test_umath*py" = ["E501"] "numpy/_core/tests/test_numeric*.py" = ["E501"] "numpy/_core/tests/test_regression.py" = ["E501"] -"numpy/_core/tests/test_shape_base.py" = ["E501"] "numpy/_core/tests/test_simd*.py" = ["E501"] "numpy/_core/tests/test_strings.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] -"numpy/lib/tests/test_function_base.py" = ["E501"] +#"numpy/lib/tests/test_function_base.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] "numpy/lib/tests/test_io.py" = ["E501"] "numpy/lib/tests/test_polynomial.py" = ["E501"] From dea4451c945f878f20402ebf2bc8261b9c000e59 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 20 Jun 2025 21:30:38 +0200 Subject: [PATCH 0145/1018] MAIN: Enable linting with E501 --- numpy/lib/tests/test_io.py | 9 ++++++--- ruff.toml | 3 +-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 79fca0dd690b..48f579d7ddc7 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -645,7 +645,8 @@ def check_large_zip(memoryerror_raised): raise MemoryError("Child process raised a MemoryError exception") # -9 indicates a SIGKILL, probably an OOM. if p.exitcode == -9: - pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient") + msg = "subprocess got a SIGKILL, apparently free memory was not sufficient" + pytest.xfail(msg) assert p.exitcode == 0 class LoadTxtBase: @@ -1673,7 +1674,8 @@ def test_dtype_with_converters_and_usecols(self): conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', names=None, converters=conv, encoding="bytes") - control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], dtype=dtyp) + control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], + dtype=dtyp) assert_equal(test, control) dtyp = [('e1', 'i4'), ('e2', 'i4'), ('n', 'i1')] test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', @@ -1886,7 +1888,8 @@ def test_user_missing_values(self): # basekwargs['dtype'] = mdtype test = np.genfromtxt(TextIO(data), - missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs) + missing_values={0: -9, 1: -99, 2: -999j}, + usemask=True, **basekwargs) control = ma.array([(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)], mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], diff --git a/ruff.toml b/ruff.toml index 70bef8fb095d..75a2f54a6734 100644 --- a/ruff.toml +++ b/ruff.toml @@ -92,9 +92,8 @@ ignore = [ "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] -#"numpy/lib/tests/test_function_base.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] -"numpy/lib/tests/test_io.py" = ["E501"] +#"numpy/lib/tests/test_io.py" = ["E501"] "numpy/lib/tests/test_polynomial.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/tests/test_configtool.py" = ["E501"] From 09eb6b78d6fbe017702d66c08057a5304dd38c9f Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 20 Jun 2025 21:33:43 +0200 Subject: [PATCH 0146/1018] MAIN: Enable linting with E501 --- numpy/lib/tests/test_polynomial.py | 15 ++++++++++----- ruff.toml | 2 -- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index c173ac321d74..65ad5c854639 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -61,7 +61,8 @@ def test_poly1d_math(self): assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) assert_equal(p + q, np.poly1d([4., 4., 4.])) assert_equal(p - q, np.poly1d([-2., 0., 2.])) - assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.])) + assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., + 312., 324., 216., 81.])) assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) assert_equal(p.deriv(), np.poly1d([2., 2.])) @@ -131,12 +132,16 @@ def test_roots(self): for i in np.logspace(10, 25, num=1000, base=10): tgt = np.array([-1, 1, i]) res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) - assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) for i in np.logspace(10, 25, num=1000, base=10): tgt = np.array([-1, 1.01, i]) res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) - assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) def test_str_leading_zeros(self): p = np.poly1d([4, 3, 2, 1]) @@ -249,8 +254,8 @@ def test_complex(self): def test_integ_coeffs(self): p = np.poly1d([3, 2, 1]) p2 = p.integ(3, k=[9, 7, 6]) - assert_( - (p2.coeffs == [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6]).all()) + expected = [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6] + assert_( (p2.coeffs == expected).all()) def test_zero_dims(self): try: diff --git a/ruff.toml b/ruff.toml index 75a2f54a6734..a50900937073 100644 --- a/ruff.toml +++ b/ruff.toml @@ -93,8 +93,6 @@ ignore = [ "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] -#"numpy/lib/tests/test_io.py" = ["E501"] -"numpy/lib/tests/test_polynomial.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/tests/test_configtool.py" = ["E501"] "numpy/f2py/*py" = ["E501"] From 1b3a0d9385aa1ded43885d966bb8733da839762f Mon Sep 17 00:00:00 2001 From: Yuki Kobayashi Date: Sat, 21 Jun 2025 00:32:00 +0000 Subject: [PATCH 0147/1018] DOC: Fix some markup errors --- doc/source/f2py/buildtools/distutils-to-meson.rst | 2 +- doc/source/reference/random/multithreading.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index bf5da973e9fa..b24638e62239 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -117,7 +117,7 @@ sample is included below. +------------------------------------+-------------------------------+ | LDFLAGS | Linker options | +------------------------------------+-------------------------------+ - | LD\ :sub:`LIBRARY`\ \ :sub:`PATH`\ | Library file locations (Unix) | + | LD\_LIBRARY\_PATH | Library file locations (Unix) | +------------------------------------+-------------------------------+ | LIBS | Libraries to link against | +------------------------------------+-------------------------------+ diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst index 17c6a515cdbc..73d2fc9ee5ad 100644 --- a/doc/source/reference/random/multithreading.rst +++ b/doc/source/reference/random/multithreading.rst @@ -9,7 +9,7 @@ well-behaved (writable and aligned). Under normal circumstances, arrays created using the common constructors such as :meth:`numpy.empty` will satisfy these requirements. -This example makes use of:mod:`concurrent.futures` to fill an array using +This example makes use of :mod:`concurrent.futures` to fill an array using multiple threads. Threads are long-lived so that repeated calls do not require any additional overheads from thread creation. From 124ac8d41f2d913cbac1a05452cdae3d80d7e789 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 21 Jun 2025 07:40:15 -0600 Subject: [PATCH 0148/1018] MAINT: Update main after 2.1.0 release. - Add 2.3.1-notes.rst - Add 2.3.1-changelog.rst - Update release.rst [skip cirrus] [skip azp] [skip actions] --- doc/changelog/2.3.1-changelog.rst | 34 +++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.3.1-notes.rst | 53 ++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+) create mode 100644 doc/changelog/2.3.1-changelog.rst create mode 100644 doc/source/release/2.3.1-notes.rst diff --git a/doc/changelog/2.3.1-changelog.rst b/doc/changelog/2.3.1-changelog.rst new file mode 100644 index 000000000000..a1c840f8beda --- /dev/null +++ b/doc/changelog/2.3.1-changelog.rst @@ -0,0 +1,34 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Brad Smith + +* Charles Harris +* Developer-Ecosystem-Engineering +* François Rozet +* Joren Hammudoglu +* Matti Picus +* Mugundan Selvanayagam +* Nathan Goldbaum +* Sebastian Berg + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#29140 `__: MAINT: Prepare 2.3.x for further development +* `#29191 `__: BUG: fix matmul with transposed out arg (#29179) +* `#29192 `__: TYP: Backport typing fixes and improvements. +* `#29205 `__: BUG: Revert ``np.vectorize`` casting to legacy behavior (#29196) +* `#29222 `__: TYP: Backport typing fixes +* `#29233 `__: BUG: avoid negating unsigned integers in resize implementation... +* `#29234 `__: TST: Fix test that uses unininitialized memory (#29232) +* `#29235 `__: BUG: Address interaction between SME and FPSR (#29223) +* `#29237 `__: BUG: Enforce integer limitation in concatenate (#29231) +* `#29238 `__: CI: Add support for building NumPy with LLVM for Win-ARM64 +* `#29241 `__: ENH: Detect CPU features on OpenBSD ARM and PowerPC64 +* `#29242 `__: ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. diff --git a/doc/source/release.rst b/doc/source/release.rst index 6c6a853b06f5..59e6dd07b002 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.4.0 + 2.3.1 2.3.0 2.2.6 2.2.5 diff --git a/doc/source/release/2.3.1-notes.rst b/doc/source/release/2.3.1-notes.rst new file mode 100644 index 000000000000..d8193f07671c --- /dev/null +++ b/doc/source/release/2.3.1-notes.rst @@ -0,0 +1,53 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.1 Release Notes +========================= + +The NumPy 2.3.1 release is a patch release with several bug fixes, annotation +improvements, and better support for OpenBSD. Highlights are: + +- Fix bug in ``matmul`` for non-contiguous out kwarg parameter +- Fix for Accelerate runtime warnings on M4 hardware +- Fix new in NumPy 2.3.0 ``np.vectorize`` casting errors +- Improved support of cpu features for FreeBSD and OpenBSD + +This release supports Python versions 3.11-3.13, Python 3.14 will be supported +when it is released. + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Brad Smith + +* Charles Harris +* Developer-Ecosystem-Engineering +* François Rozet +* Joren Hammudoglu +* Matti Picus +* Mugundan Selvanayagam +* Nathan Goldbaum +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#29140 `__: MAINT: Prepare 2.3.x for further development +* `#29191 `__: BUG: fix matmul with transposed out arg (#29179) +* `#29192 `__: TYP: Backport typing fixes and improvements. +* `#29205 `__: BUG: Revert ``np.vectorize`` casting to legacy behavior (#29196) +* `#29222 `__: TYP: Backport typing fixes +* `#29233 `__: BUG: avoid negating unsigned integers in resize implementation... +* `#29234 `__: TST: Fix test that uses uninitialized memory (#29232) +* `#29235 `__: BUG: Address interaction between SME and FPSR (#29223) +* `#29237 `__: BUG: Enforce integer limitation in concatenate (#29231) +* `#29238 `__: CI: Add support for building NumPy with LLVM for Win-ARM64 +* `#29241 `__: ENH: Detect CPU features on OpenBSD ARM and PowerPC64 +* `#29242 `__: ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. + From 1fefc5c6b767e86c2642641e7ba7a22ab0cf554b Mon Sep 17 00:00:00 2001 From: Mohammed Abdul Rahman <130785777+that-ar-guy@users.noreply.github.com> Date: Mon, 23 Jun 2025 23:26:14 +0530 Subject: [PATCH 0149/1018] DOC: Clarify dtype argument for __array__ in custom container guide (#29254) * DOC: Clarify dtype argument for __array__ in custom container guide --- doc/source/user/basics.dispatch.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index ae53995a3917..117d60f85467 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -46,6 +46,21 @@ array([[1., 0., 0., 0., 0.], [0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]) +The ``__array__`` method can optionally accept a `dtype` argument. If provided, +this argument specifies the desired data type for the resulting NumPy array. +Your implementation should attempt to convert the data to this `dtype` +if possible. If the conversion is not supported, it's generally best +to fall back to a default type or raise a `TypeError` or `ValueError`. + +Here's an example demonstrating its use with `dtype` specification: + +>>> np.asarray(arr, dtype=np.float32) +array([[1., 0., 0., 0., 0.], + [0., 1., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 1., 0.], + [0., 0., 0., 0., 1.]], dtype=float32) + If we operate on ``arr`` with a numpy function, numpy will again use the ``__array__`` interface to convert it to an array and then apply the function in the usual way. From 32b43214b4622c336258899744a9d5692c30c689 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Mon, 23 Jun 2025 22:23:50 +0200 Subject: [PATCH 0150/1018] Apply suggestions from code review Co-authored-by: Joren Hammudoglu --- numpy/_core/tests/test_shape_base.py | 12 ++++++++---- numpy/lib/tests/test_function_base.py | 24 ++++++++++++------------ numpy/lib/tests/test_polynomial.py | 4 ++-- 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 9a6febaf51f0..1b9728e5c006 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -294,8 +294,10 @@ def test_exceptions(self): assert_raises(ValueError, concatenate, ()) @pytest.mark.slow - @pytest.mark.skipif(sys.maxsize < 2**32, - reason="only problematic on 64bit platforms") + @pytest.mark.skipif( + sys.maxsize < 2**32, + reason="only problematic on 64bit platforms" + ) @requires_memory(2 * np.iinfo(np.intc).max) def test_huge_list_error(self): a = np.array([1]) @@ -381,8 +383,10 @@ def test_concatenate(self): assert_(out is rout) assert_equal(res, rout) - @pytest.mark.skipif(IS_PYPY, - reason="PYPY handles sq_concat, nb_add differently than cpython") + @pytest.mark.skipif( + IS_PYPY, + reason="PYPY handles sq_concat, nb_add differently than cpython" + ) def test_operator_concat(self): import operator a = array([1, 2]) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 8f79e3d88b8c..eccf4bcfb019 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -310,7 +310,7 @@ def test_basic(self): def test_order(self): # It turns out that people rely on np.copy() preserving order by # default; changing this broke scikit-learn: - # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa: E501 + # github.com/scikit-learn/scikit-learn/commit/7842748 a = np.array([[1, 2], [3, 4]]) assert_(a.flags.c_contiguous) assert_(not a.flags.f_contiguous) @@ -3145,9 +3145,9 @@ def test_non_finite_inf(self, sc): """ Test that interp between opposite infs gives nan """ inf = np.inf nan = np.nan - assert_equal(np.interp(0.5, [-inf, +inf], sc([ 0, 10])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([-inf, +inf])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([+inf, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, +inf], sc([ 0, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-inf, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+inf, -inf])), sc(nan)) # unless the y values are equal assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) @@ -3156,14 +3156,14 @@ def test_non_finite_half_inf_xf(self, sc): """ Test that interp where both axes have a bound at inf gives nan """ inf = np.inf nan = np.nan - assert_equal(np.interp(0.5, [-inf, 1], sc([-inf, 10])), sc(nan)) - assert_equal(np.interp(0.5, [-inf, 1], sc([+inf, 10])), sc(nan)) - assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, -inf])), sc(nan)) - assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, +inf])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, +inf], sc([-inf, 10])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, +inf], sc([+inf, 10])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, -inf])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, +inf])), sc(nan)) def test_non_finite_half_inf_x(self, sc): """ Test interp where the x axis has a bound at inf """ diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index 65ad5c854639..32547f8e6c18 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -254,8 +254,8 @@ def test_complex(self): def test_integ_coeffs(self): p = np.poly1d([3, 2, 1]) p2 = p.integ(3, k=[9, 7, 6]) - expected = [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6] - assert_( (p2.coeffs == expected).all()) + expected = [1 / 4 / 5, 1 / 3 / 4, 1 / 2 / 3, 9 / 1 / 2, 7, 6] + assert_((p2.coeffs == expected).all()) def test_zero_dims(self): try: From 2d6a4d76062e20e6220401a1609345d433bd6c8b Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 25 Jun 2025 14:35:57 +0100 Subject: [PATCH 0151/1018] TYP: Type ``MaskedArray.__{mul,rmul}__`` (#29265) --- numpy/__init__.pyi | 2 + numpy/ma/core.pyi | 86 +++++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 97 +++++++++++++++++++++++++++ 3 files changed, 183 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2e9dea06ce6a..9fae513bdf39 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3007,6 +3007,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__mul__` @overload def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -3048,6 +3049,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rmul__` @overload # signature equivalent to __mul__ def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index de6db7873faa..6c0de39594dd 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -658,8 +658,90 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - def __mul__(self, other): ... - def __rmul__(self, other): ... + # Keep in sync with `ndarray.__mul__` + @overload + def __mul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __mul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__( + self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __mul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __mul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rmul__` + @overload # signature equivalent to __mul__ + def __rmul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rmul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__( + self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __rmul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rmul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __truediv__(self, other): ... def __rtruediv__(self, other): ... def __floordiv__(self, other): ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 97f833b6a488..60ee04218489 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -623,3 +623,100 @@ assert_type(AR_LIKE_c - MAR_o, Any) assert_type(AR_LIKE_td64 - MAR_o, Any) assert_type(AR_LIKE_dt64 - MAR_o, Any) assert_type(AR_LIKE_o - MAR_o, Any) + +# Masked Array multiplication + +assert_type(MAR_b * AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_u * MAR_b, MaskedArray[np.uint32]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_b, MaskedArray[np.signedinteger]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_b, MaskedArray[np.floating]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_b, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_b, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_b, Any) # type: ignore[assert-type] + +assert_type(MAR_u4 * AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 * AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 * AR_LIKE_o, Any) + +assert_type(MAR_i8 * AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 * AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 * AR_LIKE_o, Any) + +assert_type(MAR_f8 * AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_f8, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_f8, Any) # type: ignore[assert-type] + +assert_type(MAR_c16 * AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_c16, Any) # type: ignore[assert-type] + +assert_type(MAR_td64 * AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_td64, MaskedArray[np.datetime64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_td64, Any) # type: ignore[assert-type] + +assert_type(AR_LIKE_o * MAR_dt64, Any) # type: ignore[assert-type] + +assert_type(MAR_o * AR_LIKE_b, Any) +assert_type(MAR_o * AR_LIKE_u, Any) +assert_type(MAR_o * AR_LIKE_i, Any) +assert_type(MAR_o * AR_LIKE_f, Any) +assert_type(MAR_o * AR_LIKE_c, Any) +assert_type(MAR_o * AR_LIKE_td64, Any) +assert_type(MAR_o * AR_LIKE_dt64, Any) +assert_type(MAR_o * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] From f8281b92e527564c390f834e3d467651026b27b9 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 25 Jun 2025 11:39:05 -0600 Subject: [PATCH 0152/1018] BUG: fix fencepost error in StringDType internals (#29269) This makes all the comparisons with NPY_MEDIUM_STRING_MAX_SIZE use <= consistently: --- .../src/multiarray/stringdtype/static_string.c | 2 +- numpy/_core/tests/test_stringdtype.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index 1c29bbb67f7e..89b53bcb8538 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -478,7 +478,7 @@ heap_or_arena_allocate(npy_string_allocator *allocator, if (*flags == 0) { // string isn't previously allocated, so add to existing arena allocation char *ret = arena_malloc(arena, allocator->realloc, sizeof(char) * size); - if (size < NPY_MEDIUM_STRING_MAX_SIZE) { + if (size <= NPY_MEDIUM_STRING_MAX_SIZE) { *flags = NPY_STRING_INITIALIZED; } else { diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 9bab810d4421..1c2b606dedf3 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1193,6 +1193,24 @@ def test_growing_strings(dtype): assert_array_equal(arr, uarr) +def test_assign_medium_strings(): + # see gh-29261 + N = 9 + src = np.array( + ( + ['0' * 256] * 3 + ['0' * 255] + ['0' * 256] + ['0' * 255] + + ['0' * 256] * 2 + ['0' * 255] + ), dtype='T') + dst = np.array( + ( + ['0' * 255] + ['0' * 256] * 2 + ['0' * 255] + ['0' * 256] + + ['0' * 255] + [''] * 5 + ), dtype='T') + + dst[1:N + 1] = src + assert_array_equal(dst[1:N + 1], src) + + UFUNC_TEST_DATA = [ "hello" * 10, "Ae¢☃€ 😊" * 20, From b89320a514ed121f8b21959307fc778165e53323 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 25 Jun 2025 20:03:33 +0200 Subject: [PATCH 0153/1018] STY: ruff/isort config tweaks - episode 2 (#29185) * DEV: enable ruff/isort ``combine-as-imports`` and ``split-on-trailing-comma`` * STY: run ``ruff check --fix`` to fix the new ``I001`` errors --- numpy/__config__.pyi | 10 +- numpy/__init__.py | 3 +- numpy/_core/_add_newdocs_scalars.py | 3 +- numpy/_core/_asarray.py | 6 +- numpy/_core/_dtype.pyi | 3 +- numpy/_core/_methods.py | 4 +- numpy/_core/_type_aliases.pyi | 3 +- numpy/_core/defchararray.py | 14 +- numpy/_core/defchararray.pyi | 31 ++- numpy/_core/fromnumeric.py | 5 +- numpy/_core/function_base.pyi | 3 +- numpy/_core/getlimits.py | 3 +- numpy/_core/multiarray.pyi | 8 +- numpy/_core/numeric.py | 3 +- numpy/_core/numeric.pyi | 20 +- numpy/_core/numerictypes.pyi | 3 +- numpy/_core/records.py | 3 +- numpy/_core/shape_base.py | 4 +- numpy/_core/strings.py | 28 +- numpy/_core/strings.pyi | 16 +- numpy/_core/tests/test_argparse.py | 2 - numpy/_core/tests/test_custom_dtypes.py | 2 +- numpy/_core/tests/test_indexerrors.py | 5 +- numpy/_core/tests/test_numeric.py | 3 +- numpy/_core/tests/test_scalar_ctors.py | 6 +- numpy/_core/tests/test_stringdtype.py | 3 +- numpy/_core/tests/test_umath.py | 3 +- numpy/_typing/__init__.py | 262 +++++++++--------- numpy/_typing/_callable.pyi | 6 +- numpy/_typing/_dtype_like.py | 9 +- numpy/_utils/__init__.pyi | 3 +- numpy/_utils/_pep440.pyi | 4 +- numpy/ctypeslib/__init__.pyi | 18 -- numpy/ctypeslib/_ctypeslib.pyi | 10 +- numpy/dtypes.pyi | 2 +- numpy/f2py/__init__.pyi | 3 +- numpy/f2py/_backends/_meson.pyi | 3 +- numpy/f2py/auxfuncs.pyi | 3 +- numpy/f2py/crackfortran.pyi | 13 +- numpy/f2py/f2py2e.pyi | 3 +- numpy/f2py/rules.pyi | 3 +- numpy/f2py/symbolic.pyi | 3 +- numpy/f2py/tests/test_kind.py | 2 - numpy/fft/__init__.pyi | 7 +- numpy/fft/_helper.pyi | 3 +- numpy/fft/_pocketfft.pyi | 3 +- numpy/fft/helper.pyi | 3 +- numpy/lib/__init__.pyi | 52 ++-- numpy/lib/_arraypad_impl.pyi | 11 +- numpy/lib/_arraysetops_impl.pyi | 11 +- numpy/lib/_function_base_impl.py | 11 +- numpy/lib/_function_base_impl.pyi | 2 +- numpy/lib/_histograms_impl.pyi | 14 +- numpy/lib/_index_tricks_impl.pyi | 13 +- numpy/lib/_nanfunctions_impl.pyi | 6 +- numpy/lib/_npyio_impl.pyi | 2 +- numpy/lib/_polynomial_impl.pyi | 4 +- numpy/lib/_twodim_base_impl.pyi | 10 +- numpy/lib/_type_check_impl.pyi | 3 +- numpy/lib/array_utils.pyi | 6 - numpy/lib/format.pyi | 42 --- numpy/lib/introspect.py | 3 +- numpy/lib/mixins.pyi | 3 +- numpy/lib/npyio.pyi | 4 - numpy/lib/scimath.pyi | 18 -- numpy/lib/stride_tricks.pyi | 2 - numpy/lib/tests/test__iotools.py | 7 +- numpy/linalg/__init__.pyi | 4 +- numpy/linalg/_linalg.py | 30 +- numpy/linalg/_linalg.pyi | 2 +- numpy/linalg/_umath_linalg.pyi | 3 +- numpy/ma/core.py | 2 +- numpy/ma/extras.py | 3 +- numpy/ma/tests/test_mrecords.py | 14 +- numpy/ma/tests/test_old_ma.py | 6 +- numpy/matrixlib/tests/test_matrix_linalg.py | 2 +- numpy/polynomial/chebyshev.pyi | 3 +- numpy/polynomial/hermite.pyi | 3 +- numpy/polynomial/hermite_e.pyi | 3 +- numpy/polynomial/laguerre.pyi | 3 +- numpy/polynomial/legendre.pyi | 3 +- numpy/polynomial/polynomial.pyi | 3 +- numpy/polynomial/polyutils.pyi | 9 +- numpy/polynomial/tests/test_chebyshev.py | 7 +- numpy/polynomial/tests/test_classes.py | 7 +- numpy/polynomial/tests/test_hermite.py | 7 +- numpy/polynomial/tests/test_hermite_e.py | 7 +- numpy/polynomial/tests/test_laguerre.py | 7 +- numpy/polynomial/tests/test_legendre.py | 7 +- numpy/polynomial/tests/test_polyutils.py | 7 +- .../tests/test_randomstate_regression.py | 6 +- numpy/random/tests/test_regression.py | 6 +- numpy/testing/_private/utils.pyi | 2 +- numpy/tests/test_reloading.py | 8 +- numpy/tests/test_scripts.py | 3 +- ruff.toml | 2 + 96 files changed, 352 insertions(+), 601 deletions(-) diff --git a/numpy/__config__.pyi b/numpy/__config__.pyi index b59bdcd252b6..21e8b01fdd96 100644 --- a/numpy/__config__.pyi +++ b/numpy/__config__.pyi @@ -1,7 +1,13 @@ from enum import Enum from types import ModuleType -from typing import Final, NotRequired, TypedDict, overload, type_check_only -from typing import Literal as L +from typing import ( + Final, + Literal as L, + NotRequired, + TypedDict, + overload, + type_check_only, +) _CompilerConfigDictValue = TypedDict( "_CompilerConfigDictValue", diff --git a/numpy/__init__.py b/numpy/__init__.py index aadc1fab3407..d6702bba4622 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -454,8 +454,7 @@ pass del ta - from . import lib - from . import matrixlib as _mat + from . import lib, matrixlib as _mat from .lib import scimath as emath from .lib._arraypad_impl import pad from .lib._arraysetops_impl import ( diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index 96170d80c7c9..9d4cb48825e0 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -6,8 +6,7 @@ import os import sys -from numpy._core import dtype -from numpy._core import numerictypes as _numerictypes +from numpy._core import dtype, numerictypes as _numerictypes from numpy._core.function_base import add_newdoc ############################################################################## diff --git a/numpy/_core/_asarray.py b/numpy/_core/_asarray.py index 613c5cf57060..edaff5222f69 100644 --- a/numpy/_core/_asarray.py +++ b/numpy/_core/_asarray.py @@ -4,11 +4,7 @@ `require` fits this category despite its name not matching this pattern. """ from .multiarray import array, asanyarray -from .overrides import ( - array_function_dispatch, - finalize_array_function_like, - set_module, -) +from .overrides import array_function_dispatch, finalize_array_function_like, set_module __all__ = ["require"] diff --git a/numpy/_core/_dtype.pyi b/numpy/_core/_dtype.pyi index adb38583783f..28adecf4ad2f 100644 --- a/numpy/_core/_dtype.pyi +++ b/numpy/_core/_dtype.pyi @@ -1,5 +1,4 @@ -from typing import Final, TypeAlias, TypedDict, overload, type_check_only -from typing import Literal as L +from typing import Final, Literal as L, TypeAlias, TypedDict, overload, type_check_only from typing_extensions import ReadOnly, TypeVar import numpy as np diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index 21ad7900016b..a3d5b02a2a14 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -9,9 +9,7 @@ from contextlib import nullcontext import numpy as np -from numpy._core import multiarray as mu -from numpy._core import numerictypes as nt -from numpy._core import umath as um +from numpy._core import multiarray as mu, numerictypes as nt, umath as um from numpy._core.multiarray import asanyarray from numpy._globals import _NoValue diff --git a/numpy/_core/_type_aliases.pyi b/numpy/_core/_type_aliases.pyi index 3c9dac7a1202..3d57e8135378 100644 --- a/numpy/_core/_type_aliases.pyi +++ b/numpy/_core/_type_aliases.pyi @@ -1,6 +1,5 @@ from collections.abc import Collection -from typing import Final, TypeAlias, TypedDict, type_check_only -from typing import Literal as L +from typing import Final, Literal as L, TypeAlias, TypedDict, type_check_only import numpy as np diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index bde8921f5504..0378e976254d 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -22,31 +22,19 @@ from numpy._core.multiarray import compare_chararrays from numpy._core.strings import ( _join as join, -) -from numpy._core.strings import ( _rsplit as rsplit, -) -from numpy._core.strings import ( _split as split, -) -from numpy._core.strings import ( _splitlines as splitlines, ) from numpy._utils import set_module from numpy.strings import * from numpy.strings import ( multiply as strings_multiply, -) -from numpy.strings import ( partition as strings_partition, -) -from numpy.strings import ( rpartition as strings_rpartition, ) -from .numeric import array as narray -from .numeric import asarray as asnarray -from .numeric import ndarray +from .numeric import array as narray, asarray as asnarray, ndarray from .numerictypes import bytes_, character, str_ __all__ = [ diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 26a5af432824..25754965c46a 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1,5 +1,12 @@ -from typing import Any, Self, SupportsIndex, SupportsInt, TypeAlias, overload -from typing import Literal as L +from typing import ( + Any, + Literal as L, + Self, + SupportsIndex, + SupportsInt, + TypeAlias, + overload, +) from typing_extensions import TypeVar import numpy as np @@ -14,13 +21,19 @@ from numpy import ( str_, ) from numpy._core.multiarray import compare_chararrays -from numpy._typing import NDArray, _AnyShape, _Shape, _ShapeLike, _SupportsArray -from numpy._typing import _ArrayLikeAnyString_co as UST_co -from numpy._typing import _ArrayLikeBool_co as b_co -from numpy._typing import _ArrayLikeBytes_co as S_co -from numpy._typing import _ArrayLikeInt_co as i_co -from numpy._typing import _ArrayLikeStr_co as U_co -from numpy._typing import _ArrayLikeString_co as T_co +from numpy._typing import ( + NDArray, + _AnyShape, + _ArrayLikeAnyString_co as UST_co, + _ArrayLikeBool_co as b_co, + _ArrayLikeBytes_co as S_co, + _ArrayLikeInt_co as i_co, + _ArrayLikeStr_co as U_co, + _ArrayLikeString_co as T_co, + _Shape, + _ShapeLike, + _SupportsArray, +) __all__ = [ "equal", diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index e20d774d014d..34fe1798f45e 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -8,10 +8,7 @@ import numpy as np from numpy._utils import set_module -from . import _methods, overrides -from . import multiarray as mu -from . import numerictypes as nt -from . import umath as um +from . import _methods, multiarray as mu, numerictypes as nt, overrides, umath as um from ._multiarray_umath import _array_converter from .multiarray import asanyarray, asarray, concatenate diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 600265b1fd0a..19e1238c4e15 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,6 +1,5 @@ from _typeshed import Incomplete -from typing import Literal as L -from typing import SupportsIndex, TypeAlias, TypeVar, overload +from typing import Literal as L, SupportsIndex, TypeAlias, TypeVar, overload import numpy as np from numpy._typing import ( diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index afa2ccebcfd2..a3d0086974b1 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -8,8 +8,7 @@ from numpy._utils import set_module -from . import numeric -from . import numerictypes as ntypes +from . import numeric, numerictypes as ntypes from ._machar import MachAr from .numeric import array, inf, nan from .umath import exp2, isnan, log10, nextafter diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 91ff666688bb..aa9e796da10b 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -6,6 +6,7 @@ from typing import ( Any, ClassVar, Final, + Literal as L, Protocol, SupportsIndex, TypeAlias, @@ -16,9 +17,6 @@ from typing import ( overload, type_check_only, ) -from typing import ( - Literal as L, -) from typing_extensions import CapsuleType import numpy as np @@ -41,6 +39,7 @@ from numpy import ( # type: ignore[attr-defined] count_nonzero, datetime64, dtype, + einsum as c_einsum, flatiter, float64, floating, @@ -61,9 +60,6 @@ from numpy import ( # type: ignore[attr-defined] unsignedinteger, vecdot, ) -from numpy import ( - einsum as c_einsum, -) from numpy._typing import ( ArrayLike, # DTypes diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 964447fa0d8a..4886468f0864 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -10,8 +10,7 @@ import numpy as np from numpy.exceptions import AxisError -from . import multiarray, numerictypes, overrides, shape_base, umath -from . import numerictypes as nt +from . import multiarray, numerictypes, numerictypes as nt, overrides, shape_base, umath from ._ufunc_config import errstate from .multiarray import ( # noqa: F401 ALLOW_THREADS, diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index b54fa856b007..c73a2694df3c 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -3,6 +3,7 @@ from collections.abc import Callable, Sequence from typing import ( Any, Final, + Literal as L, Never, NoReturn, SupportsAbs, @@ -13,7 +14,6 @@ from typing import ( Unpack, overload, ) -from typing import Literal as L import numpy as np from numpy import ( @@ -116,15 +116,15 @@ from .fromnumeric import ( transpose, var, ) -from .multiarray import ALLOW_THREADS as ALLOW_THREADS -from .multiarray import BUFSIZE as BUFSIZE -from .multiarray import CLIP as CLIP -from .multiarray import MAXDIMS as MAXDIMS -from .multiarray import MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS -from .multiarray import MAY_SHARE_EXACT as MAY_SHARE_EXACT -from .multiarray import RAISE as RAISE -from .multiarray import WRAP as WRAP from .multiarray import ( + ALLOW_THREADS as ALLOW_THREADS, + BUFSIZE as BUFSIZE, + CLIP as CLIP, + MAXDIMS as MAXDIMS, + MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT as MAY_SHARE_EXACT, + RAISE as RAISE, + WRAP as WRAP, _Array, _ConstructorEmpty, _KwargsEmpty, @@ -156,6 +156,7 @@ from .multiarray import ( ndarray, nditer, nested_iters, + normalize_axis_index as normalize_axis_index, promote_types, putmask, result_type, @@ -164,7 +165,6 @@ from .multiarray import ( where, zeros, ) -from .multiarray import normalize_axis_index as normalize_axis_index from .numerictypes import ( ScalarType, bool, diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index b649b8f91cd1..4f810da6904b 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -1,6 +1,5 @@ from builtins import bool as py_bool -from typing import Final, TypedDict, type_check_only -from typing import Literal as L +from typing import Final, Literal as L, TypedDict, type_check_only import numpy as np from numpy import ( diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 39bcf4ba6294..9a6af16e3b23 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -8,8 +8,7 @@ from numpy._utils import set_module -from . import numeric as sb -from . import numerictypes as nt +from . import numeric as sb, numerictypes as nt from .arrayprint import _get_legacy_print_mode # All of the functions allow formats to be a dtype diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index c2a0f0dae789..39de8739db0e 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -5,9 +5,7 @@ import itertools import operator -from . import fromnumeric as _from_nx -from . import numeric as _nx -from . import overrides +from . import fromnumeric as _from_nx, numeric as _nx, overrides from .multiarray import array, asanyarray, normalize_axis_index array_function_dispatch = functools.partial( diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index b4dc1656024f..4d56f1e0c779 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -14,10 +14,8 @@ greater_equal, less, less_equal, - not_equal, -) -from numpy import ( multiply as _multiply_ufunc, + not_equal, ) from numpy._core.multiarray import _vec_string from numpy._core.overrides import array_function_dispatch, set_module @@ -40,6 +38,10 @@ _strip_chars, _strip_whitespace, _zfill, + count as _count_ufunc, + endswith as _endswith_ufunc, + find as _find_ufunc, + index as _index_ufunc, isalnum, isalpha, isdecimal, @@ -49,28 +51,10 @@ isspace, istitle, isupper, - str_len, -) -from numpy._core.umath import ( - count as _count_ufunc, -) -from numpy._core.umath import ( - endswith as _endswith_ufunc, -) -from numpy._core.umath import ( - find as _find_ufunc, -) -from numpy._core.umath import ( - index as _index_ufunc, -) -from numpy._core.umath import ( rfind as _rfind_ufunc, -) -from numpy._core.umath import ( rindex as _rindex_ufunc, -) -from numpy._core.umath import ( startswith as _startswith_ufunc, + str_len, ) diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index b187ce71d25c..1b8bfd84cc3d 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -1,12 +1,16 @@ from typing import TypeAlias, overload import numpy as np -from numpy._typing import NDArray, _AnyShape, _SupportsArray -from numpy._typing import _ArrayLikeAnyString_co as UST_co -from numpy._typing import _ArrayLikeBytes_co as S_co -from numpy._typing import _ArrayLikeInt_co as i_co -from numpy._typing import _ArrayLikeStr_co as U_co -from numpy._typing import _ArrayLikeString_co as T_co +from numpy._typing import ( + NDArray, + _AnyShape, + _ArrayLikeAnyString_co as UST_co, + _ArrayLikeBytes_co as S_co, + _ArrayLikeInt_co as i_co, + _ArrayLikeStr_co as U_co, + _ArrayLikeString_co as T_co, + _SupportsArray, +) __all__ = [ "add", diff --git a/numpy/_core/tests/test_argparse.py b/numpy/_core/tests/test_argparse.py index 0c49ec00277e..ededced3b9fe 100644 --- a/numpy/_core/tests/test_argparse.py +++ b/numpy/_core/tests/test_argparse.py @@ -18,8 +18,6 @@ def func(arg1, /, arg2, *, arg3): import numpy as np from numpy._core._multiarray_tests import ( argparse_example_function as func, -) -from numpy._core._multiarray_tests import ( threaded_argparse_example_function as thread_func, ) from numpy.testing import IS_WASM diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index 3336286d8c98..4d2082a949b7 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -5,8 +5,8 @@ import numpy as np from numpy._core._multiarray_umath import ( _discover_array_parameters as discover_array_params, + _get_sfloat_dtype, ) -from numpy._core._multiarray_umath import _get_sfloat_dtype from numpy.testing import assert_array_equal SF = _get_sfloat_dtype() diff --git a/numpy/_core/tests/test_indexerrors.py b/numpy/_core/tests/test_indexerrors.py index 02110c28356a..70e97dd6428e 100644 --- a/numpy/_core/tests/test_indexerrors.py +++ b/numpy/_core/tests/test_indexerrors.py @@ -1,8 +1,5 @@ import numpy as np -from numpy.testing import ( - assert_raises, - assert_raises_regex, -) +from numpy.testing import assert_raises, assert_raises_regex class TestIndexErrors: diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 65da65ddc9f9..4ead2fc7ec6f 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -6,8 +6,7 @@ from decimal import Decimal import pytest -from hypothesis import given -from hypothesis import strategies as st +from hypothesis import given, strategies as st from hypothesis.extra import numpy as hynp import numpy as np diff --git a/numpy/_core/tests/test_scalar_ctors.py b/numpy/_core/tests/test_scalar_ctors.py index be3ef0459c82..ea78cd9d9f51 100644 --- a/numpy/_core/tests/test_scalar_ctors.py +++ b/numpy/_core/tests/test_scalar_ctors.py @@ -4,11 +4,7 @@ import pytest import numpy as np -from numpy.testing import ( - assert_almost_equal, - assert_equal, - assert_warns, -) +from numpy.testing import assert_almost_equal, assert_equal, assert_warns class TestFromString: diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 1c2b606dedf3..8197822cd0e3 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -8,8 +8,7 @@ import pytest import numpy as np -from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype -from numpy._core.tests._natype import pd_NA +from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype, pd_NA from numpy.dtypes import StringDType from numpy.testing import IS_PYPY, assert_array_equal diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 001a7bffbcc8..c54b2ac86bc2 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -12,8 +12,7 @@ import numpy as np import numpy._core.umath as ncu -from numpy._core import _umath_tests as ncu_tests -from numpy._core import sctypes +from numpy._core import _umath_tests as ncu_tests, sctypes from numpy.testing import ( HAS_REFCOUNT, IS_MUSL, diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 16a7eee66ebd..0044749e3dce 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -1,148 +1,158 @@ """Private counterpart of ``numpy.typing``.""" -from ._array_like import ArrayLike as ArrayLike -from ._array_like import NDArray as NDArray -from ._array_like import _ArrayLike as _ArrayLike -from ._array_like import _ArrayLikeAnyString_co as _ArrayLikeAnyString_co -from ._array_like import _ArrayLikeBool_co as _ArrayLikeBool_co -from ._array_like import _ArrayLikeBytes_co as _ArrayLikeBytes_co -from ._array_like import _ArrayLikeComplex128_co as _ArrayLikeComplex128_co -from ._array_like import _ArrayLikeComplex_co as _ArrayLikeComplex_co -from ._array_like import _ArrayLikeDT64_co as _ArrayLikeDT64_co -from ._array_like import _ArrayLikeFloat64_co as _ArrayLikeFloat64_co -from ._array_like import _ArrayLikeFloat_co as _ArrayLikeFloat_co -from ._array_like import _ArrayLikeInt as _ArrayLikeInt -from ._array_like import _ArrayLikeInt_co as _ArrayLikeInt_co -from ._array_like import _ArrayLikeNumber_co as _ArrayLikeNumber_co -from ._array_like import _ArrayLikeObject_co as _ArrayLikeObject_co -from ._array_like import _ArrayLikeStr_co as _ArrayLikeStr_co -from ._array_like import _ArrayLikeString_co as _ArrayLikeString_co -from ._array_like import _ArrayLikeTD64_co as _ArrayLikeTD64_co -from ._array_like import _ArrayLikeUInt_co as _ArrayLikeUInt_co -from ._array_like import _ArrayLikeVoid_co as _ArrayLikeVoid_co -from ._array_like import _FiniteNestedSequence as _FiniteNestedSequence -from ._array_like import _SupportsArray as _SupportsArray -from ._array_like import _SupportsArrayFunc as _SupportsArrayFunc +from ._array_like import ( + ArrayLike as ArrayLike, + NDArray as NDArray, + _ArrayLike as _ArrayLike, + _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, + _ArrayLikeBool_co as _ArrayLikeBool_co, + _ArrayLikeBytes_co as _ArrayLikeBytes_co, + _ArrayLikeComplex128_co as _ArrayLikeComplex128_co, + _ArrayLikeComplex_co as _ArrayLikeComplex_co, + _ArrayLikeDT64_co as _ArrayLikeDT64_co, + _ArrayLikeFloat64_co as _ArrayLikeFloat64_co, + _ArrayLikeFloat_co as _ArrayLikeFloat_co, + _ArrayLikeInt as _ArrayLikeInt, + _ArrayLikeInt_co as _ArrayLikeInt_co, + _ArrayLikeNumber_co as _ArrayLikeNumber_co, + _ArrayLikeObject_co as _ArrayLikeObject_co, + _ArrayLikeStr_co as _ArrayLikeStr_co, + _ArrayLikeString_co as _ArrayLikeString_co, + _ArrayLikeTD64_co as _ArrayLikeTD64_co, + _ArrayLikeUInt_co as _ArrayLikeUInt_co, + _ArrayLikeVoid_co as _ArrayLikeVoid_co, + _FiniteNestedSequence as _FiniteNestedSequence, + _SupportsArray as _SupportsArray, + _SupportsArrayFunc as _SupportsArrayFunc, +) # -from ._char_codes import _BoolCodes as _BoolCodes -from ._char_codes import _ByteCodes as _ByteCodes -from ._char_codes import _BytesCodes as _BytesCodes -from ._char_codes import _CDoubleCodes as _CDoubleCodes -from ._char_codes import _CharacterCodes as _CharacterCodes -from ._char_codes import _CLongDoubleCodes as _CLongDoubleCodes -from ._char_codes import _Complex64Codes as _Complex64Codes -from ._char_codes import _Complex128Codes as _Complex128Codes -from ._char_codes import _ComplexFloatingCodes as _ComplexFloatingCodes -from ._char_codes import _CSingleCodes as _CSingleCodes -from ._char_codes import _DoubleCodes as _DoubleCodes -from ._char_codes import _DT64Codes as _DT64Codes -from ._char_codes import _FlexibleCodes as _FlexibleCodes -from ._char_codes import _Float16Codes as _Float16Codes -from ._char_codes import _Float32Codes as _Float32Codes -from ._char_codes import _Float64Codes as _Float64Codes -from ._char_codes import _FloatingCodes as _FloatingCodes -from ._char_codes import _GenericCodes as _GenericCodes -from ._char_codes import _HalfCodes as _HalfCodes -from ._char_codes import _InexactCodes as _InexactCodes -from ._char_codes import _Int8Codes as _Int8Codes -from ._char_codes import _Int16Codes as _Int16Codes -from ._char_codes import _Int32Codes as _Int32Codes -from ._char_codes import _Int64Codes as _Int64Codes -from ._char_codes import _IntCCodes as _IntCCodes -from ._char_codes import _IntCodes as _IntCodes -from ._char_codes import _IntegerCodes as _IntegerCodes -from ._char_codes import _IntPCodes as _IntPCodes -from ._char_codes import _LongCodes as _LongCodes -from ._char_codes import _LongDoubleCodes as _LongDoubleCodes -from ._char_codes import _LongLongCodes as _LongLongCodes -from ._char_codes import _NumberCodes as _NumberCodes -from ._char_codes import _ObjectCodes as _ObjectCodes -from ._char_codes import _ShortCodes as _ShortCodes -from ._char_codes import _SignedIntegerCodes as _SignedIntegerCodes -from ._char_codes import _SingleCodes as _SingleCodes -from ._char_codes import _StrCodes as _StrCodes -from ._char_codes import _StringCodes as _StringCodes -from ._char_codes import _TD64Codes as _TD64Codes -from ._char_codes import _UByteCodes as _UByteCodes -from ._char_codes import _UInt8Codes as _UInt8Codes -from ._char_codes import _UInt16Codes as _UInt16Codes -from ._char_codes import _UInt32Codes as _UInt32Codes -from ._char_codes import _UInt64Codes as _UInt64Codes -from ._char_codes import _UIntCCodes as _UIntCCodes -from ._char_codes import _UIntCodes as _UIntCodes -from ._char_codes import _UIntPCodes as _UIntPCodes -from ._char_codes import _ULongCodes as _ULongCodes -from ._char_codes import _ULongLongCodes as _ULongLongCodes -from ._char_codes import _UnsignedIntegerCodes as _UnsignedIntegerCodes -from ._char_codes import _UShortCodes as _UShortCodes -from ._char_codes import _VoidCodes as _VoidCodes +from ._char_codes import ( + _BoolCodes as _BoolCodes, + _ByteCodes as _ByteCodes, + _BytesCodes as _BytesCodes, + _CDoubleCodes as _CDoubleCodes, + _CharacterCodes as _CharacterCodes, + _CLongDoubleCodes as _CLongDoubleCodes, + _Complex64Codes as _Complex64Codes, + _Complex128Codes as _Complex128Codes, + _ComplexFloatingCodes as _ComplexFloatingCodes, + _CSingleCodes as _CSingleCodes, + _DoubleCodes as _DoubleCodes, + _DT64Codes as _DT64Codes, + _FlexibleCodes as _FlexibleCodes, + _Float16Codes as _Float16Codes, + _Float32Codes as _Float32Codes, + _Float64Codes as _Float64Codes, + _FloatingCodes as _FloatingCodes, + _GenericCodes as _GenericCodes, + _HalfCodes as _HalfCodes, + _InexactCodes as _InexactCodes, + _Int8Codes as _Int8Codes, + _Int16Codes as _Int16Codes, + _Int32Codes as _Int32Codes, + _Int64Codes as _Int64Codes, + _IntCCodes as _IntCCodes, + _IntCodes as _IntCodes, + _IntegerCodes as _IntegerCodes, + _IntPCodes as _IntPCodes, + _LongCodes as _LongCodes, + _LongDoubleCodes as _LongDoubleCodes, + _LongLongCodes as _LongLongCodes, + _NumberCodes as _NumberCodes, + _ObjectCodes as _ObjectCodes, + _ShortCodes as _ShortCodes, + _SignedIntegerCodes as _SignedIntegerCodes, + _SingleCodes as _SingleCodes, + _StrCodes as _StrCodes, + _StringCodes as _StringCodes, + _TD64Codes as _TD64Codes, + _UByteCodes as _UByteCodes, + _UInt8Codes as _UInt8Codes, + _UInt16Codes as _UInt16Codes, + _UInt32Codes as _UInt32Codes, + _UInt64Codes as _UInt64Codes, + _UIntCCodes as _UIntCCodes, + _UIntCodes as _UIntCodes, + _UIntPCodes as _UIntPCodes, + _ULongCodes as _ULongCodes, + _ULongLongCodes as _ULongLongCodes, + _UnsignedIntegerCodes as _UnsignedIntegerCodes, + _UShortCodes as _UShortCodes, + _VoidCodes as _VoidCodes, +) # -from ._dtype_like import DTypeLike as DTypeLike -from ._dtype_like import _DTypeLike as _DTypeLike -from ._dtype_like import _DTypeLikeBool as _DTypeLikeBool -from ._dtype_like import _DTypeLikeBytes as _DTypeLikeBytes -from ._dtype_like import _DTypeLikeComplex as _DTypeLikeComplex -from ._dtype_like import _DTypeLikeComplex_co as _DTypeLikeComplex_co -from ._dtype_like import _DTypeLikeDT64 as _DTypeLikeDT64 -from ._dtype_like import _DTypeLikeFloat as _DTypeLikeFloat -from ._dtype_like import _DTypeLikeInt as _DTypeLikeInt -from ._dtype_like import _DTypeLikeObject as _DTypeLikeObject -from ._dtype_like import _DTypeLikeStr as _DTypeLikeStr -from ._dtype_like import _DTypeLikeTD64 as _DTypeLikeTD64 -from ._dtype_like import _DTypeLikeUInt as _DTypeLikeUInt -from ._dtype_like import _DTypeLikeVoid as _DTypeLikeVoid -from ._dtype_like import _SupportsDType as _SupportsDType -from ._dtype_like import _VoidDTypeLike as _VoidDTypeLike +from ._dtype_like import ( + DTypeLike as DTypeLike, + _DTypeLike as _DTypeLike, + _DTypeLikeBool as _DTypeLikeBool, + _DTypeLikeBytes as _DTypeLikeBytes, + _DTypeLikeComplex as _DTypeLikeComplex, + _DTypeLikeComplex_co as _DTypeLikeComplex_co, + _DTypeLikeDT64 as _DTypeLikeDT64, + _DTypeLikeFloat as _DTypeLikeFloat, + _DTypeLikeInt as _DTypeLikeInt, + _DTypeLikeObject as _DTypeLikeObject, + _DTypeLikeStr as _DTypeLikeStr, + _DTypeLikeTD64 as _DTypeLikeTD64, + _DTypeLikeUInt as _DTypeLikeUInt, + _DTypeLikeVoid as _DTypeLikeVoid, + _SupportsDType as _SupportsDType, + _VoidDTypeLike as _VoidDTypeLike, +) # -from ._nbit import _NBitByte as _NBitByte -from ._nbit import _NBitDouble as _NBitDouble -from ._nbit import _NBitHalf as _NBitHalf -from ._nbit import _NBitInt as _NBitInt -from ._nbit import _NBitIntC as _NBitIntC -from ._nbit import _NBitIntP as _NBitIntP -from ._nbit import _NBitLong as _NBitLong -from ._nbit import _NBitLongDouble as _NBitLongDouble -from ._nbit import _NBitLongLong as _NBitLongLong -from ._nbit import _NBitShort as _NBitShort -from ._nbit import _NBitSingle as _NBitSingle +from ._nbit import ( + _NBitByte as _NBitByte, + _NBitDouble as _NBitDouble, + _NBitHalf as _NBitHalf, + _NBitInt as _NBitInt, + _NBitIntC as _NBitIntC, + _NBitIntP as _NBitIntP, + _NBitLong as _NBitLong, + _NBitLongDouble as _NBitLongDouble, + _NBitLongLong as _NBitLongLong, + _NBitShort as _NBitShort, + _NBitSingle as _NBitSingle, +) # from ._nbit_base import ( NBitBase as NBitBase, # type: ignore[deprecated] # pyright: ignore[reportDeprecated] + _8Bit as _8Bit, + _16Bit as _16Bit, + _32Bit as _32Bit, + _64Bit as _64Bit, + _96Bit as _96Bit, + _128Bit as _128Bit, ) -from ._nbit_base import _8Bit as _8Bit -from ._nbit_base import _16Bit as _16Bit -from ._nbit_base import _32Bit as _32Bit -from ._nbit_base import _64Bit as _64Bit -from ._nbit_base import _96Bit as _96Bit -from ._nbit_base import _128Bit as _128Bit # from ._nested_sequence import _NestedSequence as _NestedSequence # -from ._scalars import _BoolLike_co as _BoolLike_co -from ._scalars import _CharLike_co as _CharLike_co -from ._scalars import _ComplexLike_co as _ComplexLike_co -from ._scalars import _FloatLike_co as _FloatLike_co -from ._scalars import _IntLike_co as _IntLike_co -from ._scalars import _NumberLike_co as _NumberLike_co -from ._scalars import _ScalarLike_co as _ScalarLike_co -from ._scalars import _TD64Like_co as _TD64Like_co -from ._scalars import _UIntLike_co as _UIntLike_co -from ._scalars import _VoidLike_co as _VoidLike_co +from ._scalars import ( + _BoolLike_co as _BoolLike_co, + _CharLike_co as _CharLike_co, + _ComplexLike_co as _ComplexLike_co, + _FloatLike_co as _FloatLike_co, + _IntLike_co as _IntLike_co, + _NumberLike_co as _NumberLike_co, + _ScalarLike_co as _ScalarLike_co, + _TD64Like_co as _TD64Like_co, + _UIntLike_co as _UIntLike_co, + _VoidLike_co as _VoidLike_co, +) # -from ._shape import _AnyShape as _AnyShape -from ._shape import _Shape as _Shape -from ._shape import _ShapeLike as _ShapeLike +from ._shape import _AnyShape as _AnyShape, _Shape as _Shape, _ShapeLike as _ShapeLike # -from ._ufunc import _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1 -from ._ufunc import _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1 -from ._ufunc import _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2 -from ._ufunc import _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1 -from ._ufunc import _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2 +from ._ufunc import ( + _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, + _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, + _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, + _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, +) diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 21df1d983fe6..1ce7de5bb423 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -38,11 +38,7 @@ from . import NBitBase from ._array_like import NDArray from ._nbit import _NBitInt from ._nested_sequence import _NestedSequence -from ._scalars import ( - _BoolLike_co, - _IntLike_co, - _NumberLike_co, -) +from ._scalars import _BoolLike_co, _IntLike_co, _NumberLike_co _T1 = TypeVar("_T1") _T2 = TypeVar("_T2") diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index c406b3098384..526fb86dd322 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -1,12 +1,5 @@ from collections.abc import Sequence # noqa: F811 -from typing import ( - Any, - Protocol, - TypeAlias, - TypedDict, - TypeVar, - runtime_checkable, -) +from typing import Any, Protocol, TypeAlias, TypedDict, TypeVar, runtime_checkable import numpy as np diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index 2ed4e88b3e32..b630777ced99 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -2,8 +2,7 @@ from _typeshed import IdentityFunction from collections.abc import Callable, Iterable from typing import Protocol, TypeVar, overload, type_check_only -from ._convertions import asbytes as asbytes -from ._convertions import asunicode as asunicode +from ._convertions import asbytes as asbytes, asunicode as asunicode ### diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi index 2c338d4e5b14..11ae02e57a59 100644 --- a/numpy/_utils/_pep440.pyi +++ b/numpy/_utils/_pep440.pyi @@ -5,14 +5,12 @@ from typing import ( ClassVar, Final, Generic, + Literal as L, NamedTuple, TypeVar, final, type_check_only, ) -from typing import ( - Literal as L, -) from typing_extensions import TypeIs __all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] diff --git a/numpy/ctypeslib/__init__.pyi b/numpy/ctypeslib/__init__.pyi index adc51da2696c..f088d0281d33 100644 --- a/numpy/ctypeslib/__init__.pyi +++ b/numpy/ctypeslib/__init__.pyi @@ -3,31 +3,13 @@ from ctypes import c_int64 as _c_intp from ._ctypeslib import ( __all__ as __all__, -) -from ._ctypeslib import ( __doc__ as __doc__, -) -from ._ctypeslib import ( _concrete_ndptr as _concrete_ndptr, -) -from ._ctypeslib import ( _ndptr as _ndptr, -) -from ._ctypeslib import ( as_array as as_array, -) -from ._ctypeslib import ( as_ctypes as as_ctypes, -) -from ._ctypeslib import ( as_ctypes_type as as_ctypes_type, -) -from ._ctypeslib import ( c_intp as c_intp, -) -from ._ctypeslib import ( load_library as load_library, -) -from ._ctypeslib import ( ndpointer as ndpointer, ) diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index aecb3899bdf5..996b2c0be388 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -4,15 +4,7 @@ import ctypes from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence from ctypes import c_int64 as _c_intp -from typing import ( - Any, - ClassVar, - Generic, - TypeAlias, - TypeVar, - overload, -) -from typing import Literal as L +from typing import Any, ClassVar, Generic, Literal as L, TypeAlias, TypeVar, overload import numpy as np from numpy import ( diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index f76b08fc28dc..36844a90d31f 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -2,6 +2,7 @@ from typing import ( Any, Generic, + Literal as L, LiteralString, Never, NoReturn, @@ -11,7 +12,6 @@ from typing import ( overload, type_check_only, ) -from typing import Literal as L from typing_extensions import TypeVar import numpy as np diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi index d12f47e80a7d..aa7d5918f7d2 100644 --- a/numpy/f2py/__init__.pyi +++ b/numpy/f2py/__init__.pyi @@ -1,5 +1,4 @@ -from .f2py2e import main as main -from .f2py2e import run_main +from .f2py2e import main as main, run_main __all__ = ["get_include", "run_main"] diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi index 67baf9b76845..bcb2b0304401 100644 --- a/numpy/f2py/_backends/_meson.pyi +++ b/numpy/f2py/_backends/_meson.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable from pathlib import Path -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L from typing_extensions import override from ._backend import Backend diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index dfbae5c7d94d..32e381cf9a1c 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -1,8 +1,7 @@ from _typeshed import FileDescriptorOrPath from collections.abc import Callable, Mapping from pprint import pprint as show -from typing import Any, Final, Never, TypeAlias, TypeVar, overload -from typing import Literal as L +from typing import Any, Final, Literal as L, Never, TypeAlias, TypeVar, overload from .cfuncs import errmess diff --git a/numpy/f2py/crackfortran.pyi b/numpy/f2py/crackfortran.pyi index c5f4fd7585ba..742d358916a2 100644 --- a/numpy/f2py/crackfortran.pyi +++ b/numpy/f2py/crackfortran.pyi @@ -1,8 +1,17 @@ import re from _typeshed import StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Mapping -from typing import IO, Any, Concatenate, Final, Never, ParamSpec, TypeAlias, overload -from typing import Literal as L +from typing import ( + IO, + Any, + Concatenate, + Final, + Literal as L, + Never, + ParamSpec, + TypeAlias, + overload, +) from .__version__ import version from .auxfuncs import isintent_dict as isintent_dict diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi index 03aeffc5dcdd..46794e552b41 100644 --- a/numpy/f2py/f2py2e.pyi +++ b/numpy/f2py/f2py2e.pyi @@ -6,8 +6,7 @@ from typing import Any, Final, NotRequired, TypedDict, type_check_only from typing_extensions import TypeVar, override from .__version__ import version -from .auxfuncs import _Bool -from .auxfuncs import outmess as outmess +from .auxfuncs import _Bool, outmess as outmess ### diff --git a/numpy/f2py/rules.pyi b/numpy/f2py/rules.pyi index 58614060ba87..30439f6b8351 100644 --- a/numpy/f2py/rules.pyi +++ b/numpy/f2py/rules.pyi @@ -1,6 +1,5 @@ from collections.abc import Callable, Iterable, Mapping -from typing import Any, Final, TypeAlias -from typing import Literal as L +from typing import Any, Final, Literal as L, TypeAlias from typing_extensions import TypeVar from .__version__ import version diff --git a/numpy/f2py/symbolic.pyi b/numpy/f2py/symbolic.pyi index e7b14f751dc3..06be2bb16044 100644 --- a/numpy/f2py/symbolic.pyi +++ b/numpy/f2py/symbolic.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable, Mapping from enum import Enum -from typing import Any, Generic, ParamSpec, Self, TypeAlias, overload -from typing import Literal as L +from typing import Any, Generic, Literal as L, ParamSpec, Self, TypeAlias, overload from typing_extensions import TypeVar __all__ = ["Expr"] diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py index ce223a555456..ecf884fb4999 100644 --- a/numpy/f2py/tests/test_kind.py +++ b/numpy/f2py/tests/test_kind.py @@ -5,8 +5,6 @@ from numpy.f2py.crackfortran import ( _selected_int_kind_func as selected_int_kind, -) -from numpy.f2py.crackfortran import ( _selected_real_kind_func as selected_real_kind, ) diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index 54d0ea8c79b6..893a697f1398 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -1,9 +1,4 @@ -from ._helper import ( - fftfreq, - fftshift, - ifftshift, - rfftfreq, -) +from ._helper import fftfreq, fftshift, ifftshift, rfftfreq from ._pocketfft import ( fft, fft2, diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index d06bda7ad9a9..1ea451ec2eb1 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -1,5 +1,4 @@ -from typing import Any, Final, TypeVar, overload -from typing import Literal as L +from typing import Any, Final, Literal as L, TypeVar, overload from numpy import complexfloating, floating, generic, integer from numpy._typing import ( diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 215cf14d1395..4f5e5c944b4c 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -1,6 +1,5 @@ from collections.abc import Sequence -from typing import Literal as L -from typing import TypeAlias +from typing import Literal as L, TypeAlias from numpy import complex128, float64 from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi index 7cf391a12e1d..5147652172a6 100644 --- a/numpy/fft/helper.pyi +++ b/numpy/fft/helper.pyi @@ -1,5 +1,4 @@ -from typing import Any -from typing import Literal as L +from typing import Any, Literal as L from typing_extensions import deprecated import numpy as np diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index 6185a494d035..5a85743e4d0f 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -3,28 +3,36 @@ from numpy._core.multiarray import add_docstring, tracemalloc_domain # all submodules of `lib` are accessible at runtime through `__getattr__`, # so we implicitly re-export them here -from . import _array_utils_impl as _array_utils_impl -from . import _arraypad_impl as _arraypad_impl -from . import _arraysetops_impl as _arraysetops_impl -from . import _arrayterator_impl as _arrayterator_impl -from . import _datasource as _datasource -from . import _format_impl as _format_impl -from . import _function_base_impl as _function_base_impl -from . import _histograms_impl as _histograms_impl -from . import _index_tricks_impl as _index_tricks_impl -from . import _iotools as _iotools -from . import _nanfunctions_impl as _nanfunctions_impl -from . import _npyio_impl as _npyio_impl -from . import _polynomial_impl as _polynomial_impl -from . import _scimath_impl as _scimath_impl -from . import _shape_base_impl as _shape_base_impl -from . import _stride_tricks_impl as _stride_tricks_impl -from . import _twodim_base_impl as _twodim_base_impl -from . import _type_check_impl as _type_check_impl -from . import _ufunclike_impl as _ufunclike_impl -from . import _utils_impl as _utils_impl -from . import _version as _version -from . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks +from . import ( + _array_utils_impl as _array_utils_impl, + _arraypad_impl as _arraypad_impl, + _arraysetops_impl as _arraysetops_impl, + _arrayterator_impl as _arrayterator_impl, + _datasource as _datasource, + _format_impl as _format_impl, + _function_base_impl as _function_base_impl, + _histograms_impl as _histograms_impl, + _index_tricks_impl as _index_tricks_impl, + _iotools as _iotools, + _nanfunctions_impl as _nanfunctions_impl, + _npyio_impl as _npyio_impl, + _polynomial_impl as _polynomial_impl, + _scimath_impl as _scimath_impl, + _shape_base_impl as _shape_base_impl, + _stride_tricks_impl as _stride_tricks_impl, + _twodim_base_impl as _twodim_base_impl, + _type_check_impl as _type_check_impl, + _ufunclike_impl as _ufunclike_impl, + _utils_impl as _utils_impl, + _version as _version, + array_utils, + format, + introspect, + mixins, + npyio, + scimath, + stride_tricks, +) from ._arrayterator_impl import Arrayterator from ._version import NumpyVersion diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index 46b43762b87f..6158f299efe2 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -1,22 +1,15 @@ from typing import ( Any, + Literal as L, Protocol, TypeAlias, TypeVar, overload, type_check_only, ) -from typing import ( - Literal as L, -) from numpy import generic -from numpy._typing import ( - ArrayLike, - NDArray, - _ArrayLike, - _ArrayLikeInt, -) +from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeInt __all__ = ["pad"] diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 4279b809f78e..c0291680a8ec 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -1,5 +1,12 @@ -from typing import Any, Generic, NamedTuple, SupportsIndex, TypeAlias, overload -from typing import Literal as L +from typing import ( + Any, + Generic, + Literal as L, + NamedTuple, + SupportsIndex, + TypeAlias, + overload, +) from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 096043e6316f..f5690a829fc6 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -10,9 +10,14 @@ from numpy._core import overrides, transpose from numpy._core._multiarray_umath import _array_converter from numpy._core.fromnumeric import any, mean, nonzero, partition, ravel, sum -from numpy._core.multiarray import _monotonicity, _place, bincount, normalize_axis_index -from numpy._core.multiarray import interp as compiled_interp -from numpy._core.multiarray import interp_complex as compiled_interp_complex +from numpy._core.multiarray import ( + _monotonicity, + _place, + bincount, + interp as compiled_interp, + interp_complex as compiled_interp_complex, + normalize_axis_index, +) from numpy._core.numeric import ( absolute, arange, diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index cb6e18b53fa4..78947b1b3b46 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -4,6 +4,7 @@ from collections.abc import Callable, Iterable, Sequence from typing import ( Any, Concatenate, + Literal as L, ParamSpec, Protocol, SupportsIndex, @@ -13,7 +14,6 @@ from typing import ( overload, type_check_only, ) -from typing import Literal as L from typing_extensions import TypeIs, deprecated import numpy as np diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 5e7afb5e397b..06987dd71e6b 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -1,17 +1,7 @@ from collections.abc import Sequence -from typing import ( - Any, - SupportsIndex, - TypeAlias, -) -from typing import ( - Literal as L, -) +from typing import Any, Literal as L, SupportsIndex, TypeAlias -from numpy._typing import ( - ArrayLike, - NDArray, -) +from numpy._typing import ArrayLike, NDArray __all__ = ["histogram", "histogramdd", "histogram_bin_edges"] diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index c4509d9aa3ad..208a8e868b48 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -1,7 +1,16 @@ from _typeshed import Incomplete from collections.abc import Sequence -from typing import Any, ClassVar, Final, Generic, Self, SupportsIndex, final, overload -from typing import Literal as L +from typing import ( + Any, + ClassVar, + Final, + Generic, + Literal as L, + Self, + SupportsIndex, + final, + overload, +) from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/lib/_nanfunctions_impl.pyi b/numpy/lib/_nanfunctions_impl.pyi index f39800d58d07..fd5d277cbd7d 100644 --- a/numpy/lib/_nanfunctions_impl.pyi +++ b/numpy/lib/_nanfunctions_impl.pyi @@ -11,11 +11,7 @@ from numpy._core.fromnumeric import ( sum, var, ) -from numpy.lib._function_base_impl import ( - median, - percentile, - quantile, -) +from numpy.lib._function_base_impl import median, percentile, quantile __all__ = [ "nansum", diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 94f014ccd52d..253fcb0fdf9e 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -14,13 +14,13 @@ from typing import ( Any, ClassVar, Generic, + Literal as L, Protocol, Self, TypeAlias, overload, type_check_only, ) -from typing import Literal as L from typing_extensions import TypeVar, deprecated, override import numpy as np diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index faf2f01e6a22..3da1a2af60c9 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -1,5 +1,6 @@ from typing import ( Any, + Literal as L, NoReturn, SupportsIndex, SupportsInt, @@ -7,9 +8,6 @@ from typing import ( TypeVar, overload, ) -from typing import ( - Literal as L, -) import numpy as np from numpy import ( diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 43df38ed5b06..9e70d0a617f6 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -1,13 +1,5 @@ from collections.abc import Callable, Sequence -from typing import ( - Any, - TypeAlias, - TypeVar, - overload, -) -from typing import ( - Literal as L, -) +from typing import Any, Literal as L, TypeAlias, TypeVar, overload import numpy as np from numpy import ( diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index b9ab2a02f5f5..5e98ad22ca8b 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,7 +1,6 @@ from _typeshed import Incomplete from collections.abc import Container, Iterable -from typing import Any, Protocol, TypeAlias, overload, type_check_only -from typing import Literal as L +from typing import Any, Literal as L, Protocol, TypeAlias, overload, type_check_only from typing_extensions import TypeVar import numpy as np diff --git a/numpy/lib/array_utils.pyi b/numpy/lib/array_utils.pyi index 8adc3c5b22a6..4b9ebe334a1f 100644 --- a/numpy/lib/array_utils.pyi +++ b/numpy/lib/array_utils.pyi @@ -1,12 +1,6 @@ from ._array_utils_impl import ( __all__ as __all__, -) -from ._array_utils_impl import ( byte_bounds as byte_bounds, -) -from ._array_utils_impl import ( normalize_axis_index as normalize_axis_index, -) -from ._array_utils_impl import ( normalize_axis_tuple as normalize_axis_tuple, ) diff --git a/numpy/lib/format.pyi b/numpy/lib/format.pyi index dd9470e1e6a3..c29e18fe0581 100644 --- a/numpy/lib/format.pyi +++ b/numpy/lib/format.pyi @@ -1,66 +1,24 @@ from ._format_impl import ( ARRAY_ALIGN as ARRAY_ALIGN, -) -from ._format_impl import ( BUFFER_SIZE as BUFFER_SIZE, -) -from ._format_impl import ( EXPECTED_KEYS as EXPECTED_KEYS, -) -from ._format_impl import ( GROWTH_AXIS_MAX_DIGITS as GROWTH_AXIS_MAX_DIGITS, -) -from ._format_impl import ( MAGIC_LEN as MAGIC_LEN, -) -from ._format_impl import ( MAGIC_PREFIX as MAGIC_PREFIX, -) -from ._format_impl import ( __all__ as __all__, -) -from ._format_impl import ( __doc__ as __doc__, -) -from ._format_impl import ( descr_to_dtype as descr_to_dtype, -) -from ._format_impl import ( drop_metadata as drop_metadata, -) -from ._format_impl import ( dtype_to_descr as dtype_to_descr, -) -from ._format_impl import ( header_data_from_array_1_0 as header_data_from_array_1_0, -) -from ._format_impl import ( isfileobj as isfileobj, -) -from ._format_impl import ( magic as magic, -) -from ._format_impl import ( open_memmap as open_memmap, -) -from ._format_impl import ( read_array as read_array, -) -from ._format_impl import ( read_array_header_1_0 as read_array_header_1_0, -) -from ._format_impl import ( read_array_header_2_0 as read_array_header_2_0, -) -from ._format_impl import ( read_magic as read_magic, -) -from ._format_impl import ( write_array as write_array, -) -from ._format_impl import ( write_array_header_1_0 as write_array_header_1_0, -) -from ._format_impl import ( write_array_header_2_0 as write_array_header_2_0, ) diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index a7e4c93932c6..5526a332fead 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -65,8 +65,7 @@ def opt_func_info(func_name=None, signature=None): """ import re - from numpy._core._multiarray_umath import __cpu_targets_info__ as targets - from numpy._core._multiarray_umath import dtype + from numpy._core._multiarray_umath import __cpu_targets_info__ as targets, dtype if func_name is not None: func_pattern = re.compile(func_name) diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi index 4f4801feac8f..f23c58fa6586 100644 --- a/numpy/lib/mixins.pyi +++ b/numpy/lib/mixins.pyi @@ -1,6 +1,5 @@ from abc import ABC, abstractmethod -from typing import Any -from typing import Literal as L +from typing import Any, Literal as L from numpy import ufunc diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi index 49fb4d1fc736..fd3ae8f5a287 100644 --- a/numpy/lib/npyio.pyi +++ b/numpy/lib/npyio.pyi @@ -1,9 +1,5 @@ from numpy.lib._npyio_impl import ( DataSource as DataSource, -) -from numpy.lib._npyio_impl import ( NpzFile as NpzFile, -) -from numpy.lib._npyio_impl import ( __doc__ as __doc__, ) diff --git a/numpy/lib/scimath.pyi b/numpy/lib/scimath.pyi index 253235dfc576..ef2772a33a47 100644 --- a/numpy/lib/scimath.pyi +++ b/numpy/lib/scimath.pyi @@ -1,30 +1,12 @@ from ._scimath_impl import ( __all__ as __all__, -) -from ._scimath_impl import ( arccos as arccos, -) -from ._scimath_impl import ( arcsin as arcsin, -) -from ._scimath_impl import ( arctanh as arctanh, -) -from ._scimath_impl import ( log as log, -) -from ._scimath_impl import ( log2 as log2, -) -from ._scimath_impl import ( log10 as log10, -) -from ._scimath_impl import ( logn as logn, -) -from ._scimath_impl import ( power as power, -) -from ._scimath_impl import ( sqrt as sqrt, ) diff --git a/numpy/lib/stride_tricks.pyi b/numpy/lib/stride_tricks.pyi index 42d8fe9ef43b..eb46f28ae5f4 100644 --- a/numpy/lib/stride_tricks.pyi +++ b/numpy/lib/stride_tricks.pyi @@ -1,6 +1,4 @@ from numpy.lib._stride_tricks_impl import ( as_strided as as_strided, -) -from numpy.lib._stride_tricks_impl import ( sliding_window_view as sliding_window_view, ) diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py index 1581ffbe95fd..548a3db2dc07 100644 --- a/numpy/lib/tests/test__iotools.py +++ b/numpy/lib/tests/test__iotools.py @@ -10,12 +10,7 @@ flatten_dtype, has_nested_fields, ) -from numpy.testing import ( - assert_, - assert_allclose, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_allclose, assert_equal, assert_raises class TestLineSplitter: diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 16c8048c1a11..53c115f7bd65 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -1,6 +1,4 @@ -from . import _linalg as _linalg -from . import _umath_linalg as _umath_linalg -from . import linalg as linalg +from . import _linalg as _linalg, _umath_linalg as _umath_linalg, linalg as linalg from ._linalg import ( cholesky, cond, diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index d7850c4a0204..52a2ffb8f50b 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -35,7 +35,9 @@ cdouble, complexfloating, count_nonzero, + cross as _core_cross, csingle, + diagonal as _core_diagonal, divide, dot, double, @@ -49,10 +51,13 @@ intp, isfinite, isnan, + matmul as _core_matmul, + matrix_transpose as _core_matrix_transpose, moveaxis, multiply, newaxis, object_, + outer as _core_outer, overrides, prod, reciprocal, @@ -62,34 +67,11 @@ sqrt, sum, swapaxes, - zeros, -) -from numpy._core import ( - cross as _core_cross, -) -from numpy._core import ( - diagonal as _core_diagonal, -) -from numpy._core import ( - matmul as _core_matmul, -) -from numpy._core import ( - matrix_transpose as _core_matrix_transpose, -) -from numpy._core import ( - outer as _core_outer, -) -from numpy._core import ( tensordot as _core_tensordot, -) -from numpy._core import ( trace as _core_trace, -) -from numpy._core import ( transpose as _core_transpose, -) -from numpy._core import ( vecdot as _core_vecdot, + zeros, ) from numpy._globals import _NoValue from numpy._typing import NDArray diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 3f318a892da5..68bd6d933921 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -1,6 +1,7 @@ from collections.abc import Iterable from typing import ( Any, + Literal as L, NamedTuple, Never, SupportsIndex, @@ -9,7 +10,6 @@ from typing import ( TypeVar, overload, ) -from typing import Literal as L import numpy as np from numpy import ( diff --git a/numpy/linalg/_umath_linalg.pyi b/numpy/linalg/_umath_linalg.pyi index cd07acdb1f9e..f90706a7b159 100644 --- a/numpy/linalg/_umath_linalg.pyi +++ b/numpy/linalg/_umath_linalg.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np from numpy._typing._ufunc import _GUFunc_Nin2_Nout1 diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 05ea373a6a12..5e231fbbadb7 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -35,6 +35,7 @@ amax, amin, angle, + array as narray, # noqa: F401 bool_, expand_dims, finfo, # noqa: F401 @@ -42,7 +43,6 @@ iscomplexobj, ndarray, ) -from numpy import array as narray # noqa: F401 from numpy._core import multiarray as mu from numpy._core.numeric import normalize_axis_tuple from numpy._utils import set_module diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 094c1e26b191..381cf75f00c3 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -23,8 +23,7 @@ import warnings import numpy as np -from numpy import array as nxarray -from numpy import ndarray +from numpy import array as nxarray, ndarray from numpy.lib._function_base_impl import _ureduce from numpy.lib._index_tricks_impl import AxisConcatenator from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index 0da915101511..e0b0db24904c 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -8,9 +8,11 @@ import numpy as np import numpy.ma as ma -from numpy._core.records import fromarrays as recfromarrays -from numpy._core.records import fromrecords as recfromrecords -from numpy._core.records import recarray +from numpy._core.records import ( + fromarrays as recfromarrays, + fromrecords as recfromrecords, + recarray, +) from numpy.ma import masked, nomask from numpy.ma.mrecords import ( MaskedRecords, @@ -20,11 +22,7 @@ fromtextfile, mrecarray, ) -from numpy.ma.testutils import ( - assert_, - assert_equal, - assert_equal_records, -) +from numpy.ma.testutils import assert_, assert_equal, assert_equal_records from numpy.testing import temppath diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index 30c3311798fc..f5d6d3ec27b9 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -83,11 +83,7 @@ where, zeros, ) -from numpy.testing import ( - assert_, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_equal, assert_raises pi = np.pi diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py index 4e639653bda4..6ce03b36abde 100644 --- a/numpy/matrixlib/tests/test_matrix_linalg.py +++ b/numpy/matrixlib/tests/test_matrix_linalg.py @@ -12,13 +12,13 @@ PinvCases, SolveCases, SVDCases, + TestQR as _TestQR, _TestNorm2D, _TestNormDoubleBase, _TestNormInt64Base, _TestNormSingleBase, apply_tag, ) -from numpy.linalg.tests.test_linalg import TestQR as _TestQR CASES = [] diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index ec342df0f9d1..85c92816a261 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,6 +1,5 @@ from collections.abc import Callable, Iterable -from typing import Any, Concatenate, Final, Self, TypeVar, overload -from typing import Literal as L +from typing import Any, Concatenate, Final, Literal as L, Self, TypeVar, overload import numpy as np import numpy.typing as npt diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index f7d907c1b39d..07db43d0c000 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,5 +1,4 @@ -from typing import Any, Final, TypeVar -from typing import Literal as L +from typing import Any, Final, Literal as L, TypeVar import numpy as np diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index e8013e66b62f..94ad7248f268 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,5 +1,4 @@ -from typing import Any, Final, TypeVar -from typing import Literal as L +from typing import Any, Final, Literal as L, TypeVar import numpy as np diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index 6f67257a607c..a2b84f72bab7 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index 35ea2ffd2bf2..d81f3e6f54a4 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index b4c784492b50..2942adf2afa8 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index c627e16dca1d..65ae4e5503b2 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,12 +1,5 @@ from collections.abc import Callable, Iterable, Sequence -from typing import ( - Final, - Literal, - SupportsIndex, - TypeAlias, - TypeVar, - overload, -) +from typing import Final, Literal, SupportsIndex, TypeAlias, TypeVar, overload import numpy as np import numpy.typing as npt diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 2cead454631c..14777ac60375 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.chebyshev as cheb from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises def trim(x): diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index d10aafbda866..156dccf6ea88 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -18,12 +18,7 @@ Legendre, Polynomial, ) -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises # # fixtures diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index 8bd3951f4241..a289ba0b50cc 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.hermite as herm from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises H0 = np.array([1]) H1 = np.array([0, 2]) diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index 29f34f66380e..233dfb28254a 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.hermite_e as herme from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises He0 = np.array([1]) He1 = np.array([0, 1]) diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 6793b780416d..884f15a9fe8f 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.laguerre as lag from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises L0 = np.array([1]) / 1 L1 = np.array([1, -1]) / 1 diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index d0ed7060cbe7..6c87f44ee707 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.legendre as leg from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises L0 = np.array([1]) L1 = np.array([0, 1]) diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py index 96e88b9de1fa..a6f5e3990b6b 100644 --- a/numpy/polynomial/tests/test_polyutils.py +++ b/numpy/polynomial/tests/test_polyutils.py @@ -3,12 +3,7 @@ """ import numpy as np import numpy.polynomial.polyutils as pu -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises class TestMisc: diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index 6ccc6180657c..e71be8acd981 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -4,11 +4,7 @@ import numpy as np from numpy import random -from numpy.testing import ( - assert_, - assert_array_equal, - assert_raises, -) +from numpy.testing import assert_, assert_array_equal, assert_raises class TestRegression: diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index 39b7d8c719ac..de52582c2b56 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -2,11 +2,7 @@ import numpy as np from numpy import random -from numpy.testing import ( - assert_, - assert_array_equal, - assert_raises, -) +from numpy.testing import assert_, assert_array_equal, assert_raises class TestRegression: diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 59a7539b69f1..d5584e511796 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -14,6 +14,7 @@ from typing import ( ClassVar, Final, Generic, + Literal as L, NoReturn, ParamSpec, Self, @@ -23,7 +24,6 @@ from typing import ( overload, type_check_only, ) -from typing import Literal as L from typing_extensions import TypeVar from unittest.case import SkipTest diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index 3e6ded326941..9787bcbbc101 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -7,13 +7,7 @@ import pytest import numpy.exceptions as ex -from numpy.testing import ( - IS_WASM, - assert_, - assert_equal, - assert_raises, - assert_warns, -) +from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises, assert_warns def test_numpy_reloading(): diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index d8ce95887bce..01b743941cf2 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -5,8 +5,7 @@ import os import subprocess import sys -from os.path import dirname, isfile -from os.path import join as pathjoin +from os.path import dirname, isfile, join as pathjoin import pytest diff --git a/ruff.toml b/ruff.toml index 7454c6c05e5b..61e849af1d69 100644 --- a/ruff.toml +++ b/ruff.toml @@ -130,3 +130,5 @@ builtins-allowed-modules = ["random", "typing"] # these are treated as stdlib within .pyi stubs extra-standard-library = ["_typeshed", "typing_extensions"] known-first-party = ["numpy"] +combine-as-imports = true +split-on-trailing-comma = false From 7311f0a55bdf6f363024467247bef4464df08e25 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 26 Jun 2025 03:08:15 +0200 Subject: [PATCH 0154/1018] MAINT: Fix ``I001`` ruff error on main (#29272) --- numpy/_core/__init__.pyi | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/numpy/_core/__init__.pyi b/numpy/_core/__init__.pyi index a8884917f34a..ce5427bbfcd9 100644 --- a/numpy/_core/__init__.pyi +++ b/numpy/_core/__init__.pyi @@ -65,9 +65,9 @@ from .fromnumeric import ( take, trace, transpose, + transpose as permute_dims, var, ) -from .fromnumeric import transpose as permute_dims from .function_base import geomspace, linspace, logspace from .getlimits import finfo, iinfo from .memmap import memmap @@ -91,6 +91,7 @@ from .numeric import ( broadcast, can_cast, concatenate, + concatenate as concat, convolve, copyto, correlate, @@ -145,7 +146,6 @@ from .numeric import ( zeros, zeros_like, ) -from .numeric import concatenate as concat from .numerictypes import ( ScalarType, bool, @@ -228,14 +228,22 @@ from .shape_base import ( ) from .umath import ( absolute, + absolute as abs, add, arccos, + arccos as acos, arccosh, + arccosh as acosh, arcsin, + arcsin as asin, arcsinh, + arcsinh as asinh, arctan, + arctan as atan, arctan2, + arctan2 as atan2, arctanh, + arctanh as atanh, bitwise_and, bitwise_count, bitwise_or, @@ -272,6 +280,7 @@ from .umath import ( heaviside, hypot, invert, + invert as bitwise_invert, isfinite, isinf, isnan, @@ -279,6 +288,7 @@ from .umath import ( lcm, ldexp, left_shift, + left_shift as bitwise_left_shift, less, less_equal, log, @@ -303,11 +313,13 @@ from .umath import ( pi, positive, power, + power as pow, rad2deg, radians, reciprocal, remainder, right_shift, + right_shift as bitwise_right_shift, rint, sign, signbit, @@ -323,18 +335,6 @@ from .umath import ( trunc, vecmat, ) -from .umath import absolute as abs -from .umath import arccos as acos -from .umath import arccosh as acosh -from .umath import arcsin as asin -from .umath import arcsinh as asinh -from .umath import arctan as atan -from .umath import arctan2 as atan2 -from .umath import arctanh as atanh -from .umath import invert as bitwise_invert -from .umath import left_shift as bitwise_left_shift -from .umath import power as pow -from .umath import right_shift as bitwise_right_shift __all__ = [ "False_", From 004458d21696a2867ed8142b172ada4bb4607b03 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 26 Jun 2025 05:41:55 +0200 Subject: [PATCH 0155/1018] TYP: Work around a mypy issue with bool arrays (#29248) --- numpy/__init__.pyi | 6 +++--- numpy/typing/tests/data/reveal/array_constructors.pyi | 2 ++ numpy/typing/tests/data/reveal/ma.pyi | 3 +-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 9fae513bdf39..d85c506ae659 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3848,14 +3848,14 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property def imag(self) -> np.bool[L[False]]: ... - @overload - def __init__(self: np.bool[L[False]], /) -> None: ... + @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 + def __init__(self: np.bool[builtins.bool], value: Never, /) -> None: ... @overload def __init__(self: np.bool[L[False]], value: _Falsy = ..., /) -> None: ... @overload def __init__(self: np.bool[L[True]], value: _Truthy, /) -> None: ... @overload - def __init__(self, value: object, /) -> None: ... + def __init__(self: np.bool[builtins.bool], value: object, /) -> None: ... def __bool__(self, /) -> _BoolItemT_co: ... @overload diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 7b27d57bfe23..45cc986d33a6 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -39,6 +39,8 @@ assert_type(np.array(B, subok=True), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) +# https://github.com/numpy/numpy/issues/29245 +assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 60ee04218489..0fa657240f0b 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -357,8 +357,7 @@ assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) assert_type(np.ma.nomask, np.bool[Literal[False]]) -# https://github.com/python/mypy/issues/18974 -assert_type(np.ma.MaskType, type[np.bool]) # type: ignore[assert-type] +assert_type(np.ma.MaskType, type[np.bool]) assert_type(MAR_1d.__setmask__([True, False]), None) assert_type(MAR_1d.__setmask__(np.False_), None) From 8f5ce9cbe49a8f4191455c70adca28f61cf774cf Mon Sep 17 00:00:00 2001 From: carlosgmartin Date: Thu, 26 Jun 2025 08:25:12 -0400 Subject: [PATCH 0156/1018] ENH: Extend numpy.pad to handle pad_width dictionary argument. --- .../upcoming_changes/29273.new_feature.rst | 1 + numpy/lib/_arraypad_impl.py | 38 ++++++++++++++++++- numpy/lib/_arraypad_impl.pyi | 14 +++++-- numpy/lib/tests/test_arraypad.py | 12 ++++++ numpy/typing/tests/data/reveal/arraypad.pyi | 5 +++ 5 files changed, 65 insertions(+), 5 deletions(-) create mode 100644 doc/release/upcoming_changes/29273.new_feature.rst diff --git a/doc/release/upcoming_changes/29273.new_feature.rst b/doc/release/upcoming_changes/29273.new_feature.rst new file mode 100644 index 000000000000..3e380ca0dbe6 --- /dev/null +++ b/doc/release/upcoming_changes/29273.new_feature.rst @@ -0,0 +1 @@ +Extend ``numpy.pad`` to accept a dictionary for the ``pad_width`` argument. diff --git a/numpy/lib/_arraypad_impl.py b/numpy/lib/_arraypad_impl.py index 507a0ab51b52..681b92fc8a72 100644 --- a/numpy/lib/_arraypad_impl.py +++ b/numpy/lib/_arraypad_impl.py @@ -3,6 +3,8 @@ of an n-dimensional array. """ +import typing + import numpy as np from numpy._core.overrides import array_function_dispatch from numpy.lib._index_tricks_impl import ndindex @@ -550,7 +552,7 @@ def pad(array, pad_width, mode='constant', **kwargs): ---------- array : array_like of rank N The array to pad. - pad_width : {sequence, array_like, int} + pad_width : {sequence, array_like, int, dict} Number of values padded to the edges of each axis. ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths for each axis. @@ -558,6 +560,9 @@ def pad(array, pad_width, mode='constant', **kwargs): and after pad for each axis. ``(pad,)`` or ``int`` is a shortcut for before = after = pad width for all axes. + If a ``dict``, each key is an axis and its corresponding value is an ``int`` or + ``int`` pair describing the padding ``(before, after)`` or ``pad`` width for + that axis. mode : str or function, optional One of the following string values or a user supplied function. @@ -745,8 +750,39 @@ def pad(array, pad_width, mode='constant', **kwargs): [100, 100, 3, 4, 5, 100, 100], [100, 100, 100, 100, 100, 100, 100], [100, 100, 100, 100, 100, 100, 100]]) + + >>> a = np.arange(1, 7).reshape(2, 3) + >>> np.pad(a, {1: (1, 2)}) + array([[0, 1, 2, 3, 0, 0], + [0, 4, 5, 6, 0, 0]]) + >>> np.pad(a, {-1: 2}) + array([[0, 0, 1, 2, 3, 0, 0], + [0, 0, 4, 5, 6, 0, 0]]) + >>> np.pad(a, {0: (3, 0)}) + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [1, 2, 3], + [4, 5, 6]]) + >>> np.pad(a, {0: (3, 0), 1: 2}) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 2, 3, 0, 0], + [0, 0, 4, 5, 6, 0, 0]]) """ array = np.asarray(array) + if isinstance(pad_width, dict): + seq = [(0, 0)] * array.ndim + for axis, width in pad_width.items(): + match width: + case int(both): + seq[axis] = both, both + case tuple((int(before), int(after))): + seq[axis] = before, after + case _ as invalid: + typing.assert_never(invalid) + pad_width = seq pad_width = np.asarray(pad_width) if not pad_width.dtype.kind == 'i': diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index 6158f299efe2..cfabdecf669e 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -43,11 +43,17 @@ _ModeKind: TypeAlias = L[ # TODO: In practice each keyword argument is exclusive to one or more # specific modes. Consider adding more overloads to express this in the future. +_PadWidth: TypeAlias = ( + _ArrayLikeInt + | dict[int, int] + | dict[int, tuple[int, int]] + | dict[int, int | tuple[int, int]] +) # Expand `**kwargs` into explicit keyword-only arguments @overload def pad( array: _ArrayLike[_ScalarT], - pad_width: _ArrayLikeInt, + pad_width: _PadWidth, mode: _ModeKind = ..., *, stat_length: _ArrayLikeInt | None = ..., @@ -58,7 +64,7 @@ def pad( @overload def pad( array: ArrayLike, - pad_width: _ArrayLikeInt, + pad_width: _PadWidth, mode: _ModeKind = ..., *, stat_length: _ArrayLikeInt | None = ..., @@ -69,14 +75,14 @@ def pad( @overload def pad( array: _ArrayLike[_ScalarT], - pad_width: _ArrayLikeInt, + pad_width: _PadWidth, mode: _ModeFunc, **kwargs: Any, ) -> NDArray[_ScalarT]: ... @overload def pad( array: ArrayLike, - pad_width: _ArrayLikeInt, + pad_width: _PadWidth, mode: _ModeFunc, **kwargs: Any, ) -> NDArray[Any]: ... diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index 6efbe348ca81..14383e743e47 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -1413,3 +1413,15 @@ def test_dtype_persistence(dtype, mode): arr = np.zeros((3, 2, 1), dtype=dtype) result = np.pad(arr, 1, mode=mode) assert result.dtype == dtype + + +@pytest.mark.parametrize("input_shape, pad_width, expected_shape", [ + ((3, 4, 5), {-2: (1, 3)}, (3, 4 + 1 + 3, 5)), + ((3, 4, 5), {0: (5, 2)}, (3 + 5 + 2, 4, 5)), + ((3, 4, 5), {0: (5, 2), -1: (3, 4)}, (3 + 5 + 2, 4, 5 + 3 + 4)), + ((3, 4, 5), {1: 5}, (3, 4 + 2 * 5, 5)), +]) +def test_pad_dict_pad_width(input_shape, pad_width, expected_shape): + a = np.zeros(input_shape) + result = np.pad(a, pad_width) + assert result.shape == expected_shape diff --git a/numpy/typing/tests/data/reveal/arraypad.pyi b/numpy/typing/tests/data/reveal/arraypad.pyi index c5a443d93fe3..3d53d913a770 100644 --- a/numpy/typing/tests/data/reveal/arraypad.pyi +++ b/numpy/typing/tests/data/reveal/arraypad.pyi @@ -20,3 +20,8 @@ assert_type(np.pad(AR_LIKE, (2, 3), "constant"), npt.NDArray[Any]) assert_type(np.pad(AR_f8, (2, 3), mode_func), npt.NDArray[np.float64]) assert_type(np.pad(AR_f8, (2, 3), mode_func, a=1, b=2), npt.NDArray[np.float64]) + +assert_type(np.pad(AR_i8, {-1: (2, 3)}), npt.NDArray[np.int64]) +assert_type(np.pad(AR_i8, {-2: 4}), npt.NDArray[np.int64]) +pad_width: dict[int, int | tuple[int, int]] = {-1: (2, 3), -2: 4} +assert_type(np.pad(AR_i8, pad_width), npt.NDArray[np.int64]) From bd683804a22657972c3b63987ed2fcace5fbf1b8 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 26 Jun 2025 16:58:22 +0100 Subject: [PATCH 0157/1018] TYP: Add overloads for `MaskedArray.__{div,rdiv,floordiv,rfloordiv}__` (#29271) --- numpy/__init__.pyi | 4 + numpy/ma/core.pyi | 134 +++++++++++++++++++-- numpy/typing/tests/data/reveal/ma.pyi | 165 ++++++++++++++++++++++++++ 3 files changed, 296 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d85c506ae659..e6a104d6b269 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3091,6 +3091,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__truediv__` @overload def __truediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3122,6 +3123,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rtruediv__` @overload def __rtruediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3151,6 +3153,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__floordiv__` @overload def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload @@ -3180,6 +3183,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rfloordiv__` @overload def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 6c0de39594dd..caea284bb566 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -30,6 +30,8 @@ from numpy import ( floating, generic, inexact, + int8, + int64, int_, integer, intp, @@ -260,16 +262,18 @@ _ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) +_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` _MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] _MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] _MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] -_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] -_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] _MaskedArrayFloat64_co: TypeAlias = _MaskedArray[floating[_64Bit] | float32 | float16 | integer | np.bool] -_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] _MaskedArrayFloat_co: TypeAlias = _MaskedArray[floating | integer | np.bool] +_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] +_MaskedArrayNumber_co: TypeAlias = _MaskedArray[number | np.bool] +_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] @@ -742,10 +746,126 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - def __truediv__(self, other): ... - def __rtruediv__(self, other): ... - def __floordiv__(self, other): ... - def __rfloordiv__(self, other): ... + # Keep in sync with `ndarray.__truediv__` + @overload + def __truediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __truediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __truediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __truediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rtruediv__` + @overload + def __rtruediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __rtruediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rtruediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[integer | floating], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rtruediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__floordiv__` + @overload + def __floordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __floordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rfloordiv__` + @overload + def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[floating | integer], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __pow__(self, other, mod: None = None, /): ... def __rpow__(self, other, mod: None = None, /): ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 0fa657240f0b..a101ba0d48b0 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -50,6 +50,7 @@ MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] b: np.bool f4: np.float32 f: float +i: int assert_type(MAR_1d.shape, tuple[int]) @@ -719,3 +720,167 @@ assert_type(AR_LIKE_c * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_td64 * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_dt64 * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] + +# MaskedArray "true" division + +assert_type(MAR_f8 / b, MaskedArray[np.float64]) +assert_type(MAR_f8 / i, MaskedArray[np.float64]) +assert_type(MAR_f8 / f, MaskedArray[np.float64]) + +assert_type(b / MAR_f8, MaskedArray[np.float64]) +assert_type(i / MAR_f8, MaskedArray[np.float64]) +assert_type(f / MAR_f8, MaskedArray[np.float64]) + +assert_type(MAR_b / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_b, Any) + +assert_type(MAR_u4 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_u4, Any) + +assert_type(MAR_i8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_i8, Any) + +assert_type(MAR_f8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_f8, Any) + +assert_type(MAR_td64 / AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_td64, MaskedArray[np.float64]) +assert_type(MAR_td64 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 / MAR_td64, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_td64, Any) + +assert_type(MAR_o / AR_LIKE_b, Any) +assert_type(MAR_o / AR_LIKE_u, Any) +assert_type(MAR_o / AR_LIKE_i, Any) +assert_type(MAR_o / AR_LIKE_f, Any) +assert_type(MAR_o / AR_LIKE_td64, Any) +assert_type(MAR_o / AR_LIKE_dt64, Any) +assert_type(MAR_o / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_o, Any) +assert_type(AR_LIKE_u / MAR_o, Any) +assert_type(AR_LIKE_i / MAR_o, Any) +assert_type(AR_LIKE_f / MAR_o, Any) +assert_type(AR_LIKE_td64 / MAR_o, Any) +assert_type(AR_LIKE_dt64 / MAR_o, Any) +assert_type(AR_LIKE_o / MAR_o, Any) + +# MaskedArray floor division + +assert_type(MAR_b // AR_LIKE_b, MaskedArray[np.int8]) +assert_type(MAR_b // AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_b, MaskedArray[np.int8]) +assert_type(AR_LIKE_u // MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i // MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_o // MAR_b, Any) + +assert_type(MAR_u4 // AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 // AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u // MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i // MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_u4, Any) + +assert_type(MAR_i8 // AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 // AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_i8, Any) + +assert_type(MAR_f8 // AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 // MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_f8, Any) + +assert_type(MAR_td64 // AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_td64, MaskedArray[np.int64]) +assert_type(MAR_td64 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 // MAR_td64, MaskedArray[np.int64]) +assert_type(AR_LIKE_o // MAR_td64, Any) + +assert_type(MAR_o // AR_LIKE_b, Any) +assert_type(MAR_o // AR_LIKE_u, Any) +assert_type(MAR_o // AR_LIKE_i, Any) +assert_type(MAR_o // AR_LIKE_f, Any) +assert_type(MAR_o // AR_LIKE_td64, Any) +assert_type(MAR_o // AR_LIKE_dt64, Any) +assert_type(MAR_o // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_o, Any) +assert_type(AR_LIKE_u // MAR_o, Any) +assert_type(AR_LIKE_i // MAR_o, Any) +assert_type(AR_LIKE_f // MAR_o, Any) +assert_type(AR_LIKE_td64 // MAR_o, Any) +assert_type(AR_LIKE_dt64 // MAR_o, Any) +assert_type(AR_LIKE_o // MAR_o, Any) From 6eeaae25ddfacf586ee02ddc600618127fa79cfd Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 26 Jun 2025 19:42:09 +0100 Subject: [PATCH 0158/1018] TYP: fix overloads where `out: _ArrayT` was typed as being the default (#29278) --- numpy/__init__.pyi | 34 +++++++++- numpy/_core/multiarray.pyi | 62 +++++++++++++++++-- .../tests/data/reveal/array_constructors.pyi | 2 +- 3 files changed, 88 insertions(+), 10 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e6a104d6b269..d12e63a8bed9 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2401,7 +2401,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., dtype: DTypeLike = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, + ) -> _ArrayT: ... + @overload + def trace( + self, # >= 2D array + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, ) -> _ArrayT: ... @overload @@ -2425,7 +2435,16 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, + mode: _ModeKind = ..., + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, mode: _ModeKind = ..., ) -> _ArrayT: ... @@ -3655,7 +3674,16 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, + mode: _ModeKind = ..., + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, mode: _ModeKind = ..., ) -> _ArrayT: ... diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index aa9e796da10b..560822d68466 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -528,7 +528,6 @@ def concatenate( # type: ignore[misc] casting: _CastingKind | None = ... ) -> NDArray[_ScalarT]: ... @overload -@overload def concatenate( # type: ignore[misc] arrays: SupportsLenAndGetItem[ArrayLike], /, @@ -553,7 +552,17 @@ def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, + dtype: DTypeLike = ..., + casting: _CastingKind | None = ... +) -> _ArrayT: ... +@overload +def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None, + out: _ArrayT, *, dtype: DTypeLike = ..., casting: _CastingKind | None = ... @@ -1094,7 +1103,17 @@ def busday_count( weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_count( + begindates: ArrayLike | dt.date | _NestedSequence[dt.date], + enddates: ArrayLike | dt.date | _NestedSequence[dt.date], + weekmask: ArrayLike, + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, ) -> _ArrayT: ... # `roll="raise"` is (more or less?) equivalent to `casting="safe"` @@ -1126,7 +1145,18 @@ def busday_offset( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_offset( # type: ignore[misc] + dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], + roll: L["raise"], + weekmask: ArrayLike, + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, ) -> _ArrayT: ... @overload def busday_offset( # type: ignore[misc] @@ -1156,7 +1186,18 @@ def busday_offset( weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_offset( + dates: ArrayLike | dt.date | _NestedSequence[dt.date], + offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + roll: _RollKind, + weekmask: ArrayLike, + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, ) -> _ArrayT: ... @overload @@ -1181,7 +1222,16 @@ def is_busday( weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def is_busday( + dates: ArrayLike | _NestedSequence[dt.date], + weekmask: ArrayLike, + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, ) -> _ArrayT: ... @overload diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 45cc986d33a6..766547e54b60 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -53,7 +53,7 @@ assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any]) assert_type(np.empty(mixed_shape), npt.NDArray[np.float64]) assert_type(np.concatenate(A), npt.NDArray[np.float64]) -assert_type(np.concatenate([A, A]), Any) # pyright correctly infers this as NDArray[float64] +assert_type(np.concatenate([A, A]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.concatenate([[1], A]), npt.NDArray[Any]) assert_type(np.concatenate([[1], [1]]), npt.NDArray[Any]) assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) From d4054614bdbd5593f249e1cff90dc132ea5b0986 Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Fri, 25 Apr 2025 12:44:47 -0700 Subject: [PATCH 0159/1018] DOC: Add flat examples to argmax and armgin --- numpy/_core/fromnumeric.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 34fe1798f45e..8103a8733265 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -1303,6 +1303,8 @@ def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): Indexes of the maximal elements of a N-dimensional array: + >>> a.flat[np.argmax(a)] + 15 >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) >>> ind (1, 2) @@ -1401,6 +1403,8 @@ def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): Indices of the minimum elements of a N-dimensional array: + >>> a.flat[np.argmin(a)] + 10 >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) >>> ind (0, 0) From 202df739922a994227771c31371669f50b5b1e27 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 11:36:33 -0600 Subject: [PATCH 0160/1018] MAINT: Bump github/codeql-action from 3.29.0 to 3.29.1 (#29285) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.0 to 3.29.1. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/ce28f5bb42b7a9f2c824e633a3f6ee835bab6858...39edc492dbe16b1465b0cafca41432d857bdb31a) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 0342fd92c924..1b8f958b5a21 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/autobuild@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index e64789006e2c..c23baa2f2b42 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v2.1.27 + uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v2.1.27 with: sarif_file: results.sarif From 0f18e7c42e4136cdb1eaa1e1c1eff331f2019950 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 27 Jun 2025 14:38:45 -0600 Subject: [PATCH 0161/1018] BUG: handle case in mapiter where descriptors might get replaced (#29286) Need to use the actual dtype, not the one passed to the array creation (because it can get replaced). Fixes #29279. --- numpy/_core/src/multiarray/mapping.c | 2 ++ numpy/_core/tests/test_stringdtype.py | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 7953e32fcbf0..2ce7dcdb234a 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -3013,6 +3013,8 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, if (extra_op == NULL) { goto fail; } + // extra_op_dtype might have been replaced, so get a new reference + extra_op_dtype = PyArray_DESCR(extra_op); } /* diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 8197822cd0e3..52a225619ccf 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -517,6 +517,18 @@ def test_fancy_indexing(string_list): assert_array_equal(a, b) assert a[0] == 'd' * 25 + # see gh-29279 + data = [ + ["AAAAAAAAAAAAAAAAA"], + ["BBBBBBBBBBBBBBBBBBBBBBBBBBBBB"], + ["CCCCCCCCCCCCCCCCC"], + ["DDDDDDDDDDDDDDDDD"], + ] + sarr = np.array(data, dtype=np.dtypes.StringDType()) + uarr = np.array(data, dtype="U30") + for ind in [[0], [1], [2], [3], [[0, 0]], [[1, 1, 3]], [[1, 1]]]: + assert_array_equal(sarr[ind], uarr[ind]) + def test_creation_functions(): assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""]) From fb7135f9a512d6e35544aa331a0e38f3d60863ac Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 28 Jun 2025 07:58:33 -0600 Subject: [PATCH 0162/1018] BUG: Fix macro redefinition (#29289) Also update .clang-format to indent preprocessor conditional blocks. --- .clang-format | 1 + numpy/_core/src/multiarray/alloc.c | 13 ++++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/.clang-format b/.clang-format index 034478ae2466..7e94a6fdb47c 100644 --- a/.clang-format +++ b/.clang-format @@ -27,6 +27,7 @@ IncludeCategories: Priority: 1 - Regex: '^<[[:alnum:]_.]+"' Priority: 2 +IndentPPDirectives: AfterHash Language: Cpp PointerAlignment: Right ReflowComments: true diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index cc9c5762a196..8061feed24e5 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -30,14 +30,17 @@ /* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN * use-of-uninitialized-memory warnings less useful. */ -#define USE_ALLOC_CACHE 1 #ifdef Py_GIL_DISABLED -# define USE_ALLOC_CACHE 0 +# define USE_ALLOC_CACHE 0 #elif defined(__has_feature) -# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) -# define USE_ALLOC_CACHE 0 -# endif +# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) +# define USE_ALLOC_CACHE 0 +# endif #endif +#ifndef USE_ALLOC_CACHE +# define USE_ALLOC_CACHE 1 +#endif + # define NBUCKETS 1024 /* number of buckets for data*/ # define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ From 2bb4005d189667b71a6088db358f8848802f36e9 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Sat, 28 Jun 2025 17:49:51 +0300 Subject: [PATCH 0163/1018] DOC: avoid searching some directories for doxygen-commented source code (#29275) * DOC: avoid searching some directories for doxygen-commented source code * lint [skip azp][skip actions][skip cirrus] * use known paths [skip azp][skip actions][skip cirrus] --- doc/preprocess.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/doc/preprocess.py b/doc/preprocess.py index b2e64ab6393a..bc43e89764f8 100755 --- a/doc/preprocess.py +++ b/doc/preprocess.py @@ -4,7 +4,7 @@ def main(): - doxy_gen(os.path.abspath(os.path.join('..'))) + doxy_gen(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) def doxy_gen(root_path): """ @@ -24,6 +24,7 @@ def doxy_gen(root_path): class DoxyTpl(Template): delimiter = '@' + def doxy_config(root_path): """ Fetch all Doxygen sub-config files and gather it with the main config file. @@ -35,13 +36,14 @@ def doxy_config(root_path): conf = DoxyTpl(fd.read()) confs.append(conf.substitute(CUR_DIR=dsrc_path, **sub)) - for dpath, _, files in os.walk(root_path): - if ".doxyfile" not in files: - continue - conf_path = os.path.join(dpath, ".doxyfile") - with open(conf_path) as fd: - conf = DoxyTpl(fd.read()) - confs.append(conf.substitute(CUR_DIR=dpath, **sub)) + for subdir in ["doc", "numpy"]: + for dpath, _, files in os.walk(os.path.join(root_path, subdir)): + if ".doxyfile" not in files: + continue + conf_path = os.path.join(dpath, ".doxyfile") + with open(conf_path) as fd: + conf = DoxyTpl(fd.read()) + confs.append(conf.substitute(CUR_DIR=dpath, **sub)) return confs From 907b73ce9befb95deb5d0c2097bf3f307fd64cac Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sun, 29 Jun 2025 21:22:53 +0200 Subject: [PATCH 0164/1018] MAIN: Enforce ruff E501 rule --- numpy/_core/tests/test_multithreading.py | 13 ++++++++----- ruff.toml | 1 - 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 09f907561ae5..5f8a2f11ea1f 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -272,12 +272,15 @@ def closure(b): def test_nonzero(dtype): # See: gh-28361 # - # np.nonzero uses np.count_nonzero to determine the size of the output array - # In a second pass the indices of the non-zero elements are determined, but they can have changed + # np.nonzero uses np.count_nonzero to determine the size of the output. + # array. In a second pass the indices of the non-zero elements are + # determined, but they can have changed # - # This test triggers a data race which is suppressed in the TSAN CI. The test is to ensure - # np.nonzero does not generate a segmentation fault + # This test triggers a data race which is suppressed in the TSAN CI. + # The test is to ensure np.nonzero does not generate a segmentation fault x = np.random.randint(4, size=100).astype(dtype) + expected_warning = ('number of non-zero array elements changed' + ' during function execution') def func(index): for _ in range(10): @@ -287,6 +290,6 @@ def func(index): try: _ = np.nonzero(x) except RuntimeError as ex: - assert 'number of non-zero array elements changed during function execution' in str(ex) + assert expected_warning in str(ex) run_threaded(func, max_workers=10, pass_count=True, outer_iterations=5) diff --git a/ruff.toml b/ruff.toml index 8db6df67377a..323f8cc5b3b9 100644 --- a/ruff.toml +++ b/ruff.toml @@ -81,7 +81,6 @@ ignore = [ "numpy/_core/tests/test_defchararray.py" = ["E501"] "numpy/_core/tests/test_einsum.py" = ["E501"] "numpy/_core/tests/test_multiarray.py" = ["E501"] -"numpy/_core/tests/test_multithreading.py" = ["E501"] "numpy/_core/tests/test_nditer*py" = ["E501"] "numpy/_core/tests/test_ufunc*py" = ["E501"] "numpy/_core/tests/test_umath*py" = ["E501"] From 6aeb7c6e5d7a40bc49ed2cb5980d5a706583bc26 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sun, 29 Jun 2025 22:50:06 +0200 Subject: [PATCH 0165/1018] More linting --- numpy/_core/tests/test_datetime.py | 6 ++-- numpy/_core/tests/test_simd_module.py | 6 ++-- numpy/_core/tests/test_strings.py | 42 ++++++++++++++++++--------- ruff.toml | 4 +-- 4 files changed, 37 insertions(+), 21 deletions(-) diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 81ac9778971b..7ccb080a75c1 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -874,9 +874,11 @@ def test_pickle(self): def test_setstate(self): "Verify that datetime dtype __setstate__ can handle bad arguments" dt = np.dtype('>M8[us]') - assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) + assert_raises(ValueError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, 1)) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) - assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) + assert_raises(TypeError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) def test_dtype_promotion(self): diff --git a/numpy/_core/tests/test_simd_module.py b/numpy/_core/tests/test_simd_module.py index dca83fd427b6..3de1596aa10a 100644 --- a/numpy/_core/tests/test_simd_module.py +++ b/numpy/_core/tests/test_simd_module.py @@ -23,7 +23,8 @@ int_sfx = unsigned_sfx + signed_sfx all_sfx = unsigned_sfx + int_sfx -@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support") +@pytest.mark.skipif(not npyv, + reason="could not find any SIMD extension with NPYV support") class Test_SIMD_MODULE: @pytest.mark.parametrize('sfx', all_sfx) @@ -47,7 +48,8 @@ def test_raises(self): pytest.raises(TypeError, vcb("setall"), [1]) pytest.raises(TypeError, vcb("load"), 1) pytest.raises(ValueError, vcb("load"), [1]) - pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a)) + value = getattr(npyv, f"reinterpret_{sfx}_u32")(a) + pytest.raises(ValueError, vcb("store"), [1], value) @pytest.mark.skipif(not npyv2, reason=( "could not find a second SIMD extension with NPYV support" diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 56e928df4d7b..1b77a535eee6 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -989,22 +989,36 @@ def test_slice_unsupported(self, dt): with pytest.raises(TypeError, match="did not contain a loop"): np.strings.slice(np.array([1, 2, 3]), 4) - with pytest.raises(TypeError, match=r"Cannot cast ufunc '_slice' input .* from .* to dtype\('int(64|32)'\)"): - np.strings.slice(np.array(['foo', 'bar'], dtype=dt), np.array(['foo', 'bar'], dtype=dt)) - - @pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64]) + regexp = (r"Cannot cast ufunc '_slice' input .* " + r"from .* to dtype\('int(64|32)'\)") + with pytest.raises(TypeError, match=regexp): + np.strings.slice(np.array(['foo', 'bar'], dtype=dt), + np.array(['foo', 'bar'], dtype=dt)) + + @pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32, + np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]) def test_slice_int_type_promotion(self, int_dt, dt): buf = np.array(["hello", "world"], dtype=dt) - - assert_array_equal(np.strings.slice(buf, int_dt(4)), np.array(["hell", "worl"], dtype=dt)) - assert_array_equal(np.strings.slice(buf, np.array([4, 4], dtype=int_dt)), np.array(["hell", "worl"], dtype=dt)) - - assert_array_equal(np.strings.slice(buf, int_dt(2), int_dt(4)), np.array(["ll", "rl"], dtype=dt)) - assert_array_equal(np.strings.slice(buf, np.array([2, 2], dtype=int_dt), np.array([4, 4], dtype=int_dt)), np.array(["ll", "rl"], dtype=dt)) - - assert_array_equal(np.strings.slice(buf, int_dt(0), int_dt(4), int_dt(2)), np.array(["hl", "wr"], dtype=dt)) - assert_array_equal(np.strings.slice(buf, np.array([0, 0], dtype=int_dt), np.array([4, 4], dtype=int_dt), np.array([2, 2], dtype=int_dt)), np.array(["hl", "wr"], dtype=dt)) + np_slice = np.strings.slice + assert_array_equal(np_slice(buf, int_dt(4)), + np.array(["hell", "worl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([4, 4], dtype=int_dt)), + np.array(["hell", "worl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(2), int_dt(4)), + np.array(["ll", "rl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([2, 2], dtype=int_dt), + np.array([4, 4], dtype=int_dt)), + np.array(["ll", "rl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(0), int_dt(4), int_dt(2)), + np.array(["hl", "wr"], dtype=dt)) + assert_array_equal(np_slice(buf, + np.array([0, 0], dtype=int_dt), + np.array([4, 4], dtype=int_dt), + np.array([2, 2], dtype=int_dt)), + np.array(["hl", "wr"], dtype=dt)) @pytest.mark.parametrize("dt", ["U", "T"]) class TestMethodsWithUnicode: diff --git a/ruff.toml b/ruff.toml index 323f8cc5b3b9..9916097420b6 100644 --- a/ruff.toml +++ b/ruff.toml @@ -76,7 +76,6 @@ ignore = [ "numpy/_core/tests/test_arrayprint.py" = ["E501"] "numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] "numpy/_core/tests/test_cpu_features.py" = ["E501"] -"numpy/_core/tests/test_datetime.py" = ["E501"] "numpy/_core/tests/test_dtype.py" = ["E501"] "numpy/_core/tests/test_defchararray.py" = ["E501"] "numpy/_core/tests/test_einsum.py" = ["E501"] @@ -86,8 +85,7 @@ ignore = [ "numpy/_core/tests/test_umath*py" = ["E501"] "numpy/_core/tests/test_numeric*.py" = ["E501"] "numpy/_core/tests/test_regression.py" = ["E501"] -"numpy/_core/tests/test_simd*.py" = ["E501"] -"numpy/_core/tests/test_strings.py" = ["E501"] +"numpy/_core/tests/test_simd.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] From 7bdf6b4b9ed4976c0e6df512d95f09ea87aaa7f0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 29 Jun 2025 14:52:03 -0600 Subject: [PATCH 0166/1018] BUG: Fix version check in blas_utils.c Co-Authored-By: Daniel Bertalan --- numpy/_core/src/common/blas_utils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c index aaf976ed70e4..155963891071 100644 --- a/numpy/_core/src/common/blas_utils.c +++ b/numpy/_core/src/common/blas_utils.c @@ -43,7 +43,7 @@ is_macOS_version_15_4_or_later(void){ goto cleanup; } - if(major >= 15 && minor >= 4){ + if (major > 15 || (major == 15 && minor >= 4)) { ret = true; } From c96fcc8f7a5030098f8e0c304ce20864c3509021 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Jun 2025 21:07:46 -0600 Subject: [PATCH 0167/1018] MAINT: Bump github/codeql-action from 3.29.1 to 3.29.2 (#29296) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.1 to 3.29.2. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/39edc492dbe16b1465b0cafca41432d857bdb31a...181d5eefc20863364f96762470ba6f862bdef56b) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 1b8f958b5a21..cbee65f0b713 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index c23baa2f2b42..aa3bf1aec7cc 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v2.1.27 + uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v2.1.27 with: sarif_file: results.sarif From c0c972ddf58b891d6b9851348413a29fd8e45624 Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 1 Jul 2025 11:40:06 +0100 Subject: [PATCH 0168/1018] DOCS: Remove incorrect "Returns" section from `MaskedArray.sort` --- numpy/ma/core.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 5e231fbbadb7..2860bec848aa 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5812,11 +5812,6 @@ def sort(self, axis=-1, kind=None, order=None, endwith=True, stable : bool, optional Only for compatibility with ``np.sort``. Ignored. - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - See Also -------- numpy.ndarray.sort : Method to sort an array in-place. From 82ae5ab13162e7c1e0c49cad6106d0d8cfa90fd1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 1 Jul 2025 12:24:22 +0200 Subject: [PATCH 0169/1018] DEP: Give a visible warning when `align=` to dtype is a non-bool This seems generally confusing. I would like to make it keyword only, but this already gives a warning when loading _very_ old pickles, meaning I am not quite sure we should change away from a warning quickly. We should fix things around pickling and start pickling in a way that makes it easier to move to keyword only arguments. (I suppose one could detect the case of `np.dtype(obj, False, True)` and assume it is via unpickling, but... I am assuming that it is OK to (eventually) break unpickling these 10+ year old files, but I am not in a rush to actually do so and go through with the deprecation. Signed-off-by: Sebastian Berg --- .../upcoming_changes/29301.deprecation.rst | 7 ++++ numpy/_core/src/common/npy_import.h | 1 + numpy/_core/src/multiarray/conversion_utils.c | 20 +++------ numpy/_core/src/multiarray/descriptor.c | 41 +++++++++++++------ numpy/_core/tests/test_datetime.py | 26 ++++++------ numpy/_core/tests/test_deprecations.py | 20 +++++++++ numpy/_core/tests/test_multiarray.py | 14 +++++++ numpy/_core/tests/test_regression.py | 12 +++++- numpy/lib/tests/test_format.py | 17 ++++---- 9 files changed, 111 insertions(+), 47 deletions(-) create mode 100644 doc/release/upcoming_changes/29301.deprecation.rst diff --git a/doc/release/upcoming_changes/29301.deprecation.rst b/doc/release/upcoming_changes/29301.deprecation.rst new file mode 100644 index 000000000000..e520b692458d --- /dev/null +++ b/doc/release/upcoming_changes/29301.deprecation.rst @@ -0,0 +1,7 @@ +``align=`` must be passed as boolean to ``np.dtype()`` +------------------------------------------------------ +When creating a new ``dtype`` a ``VisibleDeprecationWarning`` will be +given if ``align=`` is not a boolean. +This is mainly to prevent accidentally passing a subarray align flag where it +has no effect, such as ``np.dtype("f8", 3)`` instead of ``np.dtype(("f8", 3))``. +We strongly suggest to always pass ``align=`` as a keyword argument. diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 970efa8f549e..87944989e95d 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -41,6 +41,7 @@ typedef struct npy_runtime_imports_struct { PyObject *_std; PyObject *_sum; PyObject *_ufunc_doc_signature_formatter; + PyObject *_usefields; PyObject *_var; PyObject *_view_is_safe; PyObject *_void_scalar_to_string; diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 5ada3e6e4faf..1994dd0ee8f7 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -438,15 +438,11 @@ PyArray_ConvertMultiAxis(PyObject *axis_in, int ndim, npy_bool *out_axis_flags) NPY_NO_EXPORT int PyArray_BoolConverter(PyObject *object, npy_bool *val) { - if (PyObject_IsTrue(object)) { - *val = NPY_TRUE; - } - else { - *val = NPY_FALSE; - } - if (PyErr_Occurred()) { + int bool_val = PyObject_IsTrue(object); + if (bool_val == -1) { return NPY_FAIL; } + *val = (npy_bool)bool_val; return NPY_SUCCEED; } @@ -460,15 +456,11 @@ PyArray_OptionalBoolConverter(PyObject *object, int *val) if (object == Py_None) { return NPY_SUCCEED; } - if (PyObject_IsTrue(object)) { - *val = 1; - } - else { - *val = 0; - } - if (PyErr_Occurred()) { + int bool_val = PyObject_IsTrue(object); + if (bool_val == -1) { return NPY_FAIL; } + *val = (npy_bool)bool_val; return NPY_SUCCEED; } diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index f520e3c4bceb..c32b0e5e3f9f 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -1029,17 +1029,13 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) static PyArray_Descr * _convert_from_field_dict(PyObject *obj, int align) { - PyObject *_numpy_internal; - PyArray_Descr *res; - - _numpy_internal = PyImport_ImportModule("numpy._core._internal"); - if (_numpy_internal == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_usefields", &npy_runtime_imports._usefields) < 0) { return NULL; } - res = (PyArray_Descr *)PyObject_CallMethod(_numpy_internal, - "_usefields", "Oi", obj, align); - Py_DECREF(_numpy_internal); - return res; + + return (PyArray_Descr *)PyObject_CallFunctionObjArgs( + npy_runtime_imports._usefields, obj, align ? Py_True : Py_False, NULL); } /* @@ -2554,7 +2550,9 @@ arraydescr_new(PyTypeObject *subtype, return NULL; } - PyObject *odescr, *metadata=NULL; + PyObject *odescr; + PyObject *oalign = NULL; + PyObject *metadata = NULL; PyArray_Descr *conv; npy_bool align = NPY_FALSE; npy_bool copy = NPY_FALSE; @@ -2562,14 +2560,33 @@ arraydescr_new(PyTypeObject *subtype, static char *kwlist[] = {"dtype", "align", "copy", "metadata", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&O!:dtype", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O!:dtype", kwlist, &odescr, - PyArray_BoolConverter, &align, + &oalign, PyArray_BoolConverter, ©, &PyDict_Type, &metadata)) { return NULL; } + if (oalign != NULL) { + /* + * In the future, reject non Python (or NumPy) boolean, including integers to avoid any + * possibility of thinking that an integer alignment makes sense here. + */ + if (!PyBool_Check(oalign) && !PyArray_IsScalar(oalign, Bool)) { + /* Deprecated 2025-07-01: NumPy 2.4 */ + if (PyErr_WarnFormat(npy_static_pydata.VisibleDeprecationWarning, 1, + "dtype(): align should be passed as Python or NumPy boolean but got `align=%.100R`. " + "Did you mean to pass a tuple to create a subarray type? (Deprecated NumPy 2.4)", + oalign) < 0) { + return NULL; + } + } + if (!PyArray_BoolConverter(oalign, &align)) { + return NULL; + } + } + conv = _convert_from_any(odescr, align); if (conv == NULL) { return NULL; diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 7ccb080a75c1..c5d2ca459e6a 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -858,18 +858,20 @@ def test_pickle(self): delta) # Check that loading pickles from 1.6 works - pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\ - b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n"\ - b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ - b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + with pytest.warns(np.exceptions.VisibleDeprecationWarning, + match=r".*align should be passed"): + pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\ + b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n"\ + b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) def test_setstate(self): "Verify that datetime dtype __setstate__ can handle bad arguments" diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 7d4875d6d149..df27a1f9d076 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -453,3 +453,23 @@ def test_deprecated(self): struct_ufunc.add_triplet, "new docs" ) ) + + +class TestDTypeAlignBool(_VisibleDeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + # NOTE: As you can see, finalizing this deprecation breaks some (very) old + # pickle files. This may be fine, but sneeds to be done with some care since + # it breaks all of them and not just some. + # (Maybe it should be a 3.0 or only after warning more explicitly around pickles.) + message = r"dtype\(\): align should be passed as Python or NumPy boolean but got " + + def test_deprecated(self): + # in particular integers should be rejected because one may think they mean + # alignment, or pass them accidentally as a subarray shape (meaning to pass + # a tuple). + self.assert_deprecated(lambda: np.dtype("f8", align=3)) + + @pytest.mark.parametrize("align", [True, False, np.True_, np.False_]) + def test_not_deprecated(self, align): + # if the user passes a bool, it is accepted. + self.assert_not_deprecated(lambda: np.dtype("f8", align=align)) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 0faf35c64b98..4ee021d18f6f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4505,18 +4505,24 @@ def _loads(self, obj): # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_version0_int8(self): s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(s) assert_equal(a, p) + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_version0_float32(self): s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01 Date: Tue, 1 Jul 2025 17:34:13 +0200 Subject: [PATCH 0170/1018] MAINT: Enable linting with ruff E501 (#29300) * MAINT: Enforce ruff E501 * fix merge --- benchmarks/benchmarks/bench_linalg.py | 13 ++++--- numpy/_core/tests/test_simd.py | 12 ++++--- numpy/_core/tests/test_ufunc.py | 13 ++++--- numpy/_core/tests/test_umath_accuracy.py | 12 +++++-- numpy/_core/tests/test_umath_complex.py | 44 +++++++++++++----------- numpy/tests/test_configtool.py | 12 ++++--- ruff.toml | 9 ++--- 7 files changed, 68 insertions(+), 47 deletions(-) diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index 03e2fd77f4f2..49a7ae84fde6 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -148,7 +148,9 @@ def setup(self, dtype): self.non_contiguous_dim1_small = np.arange(1, 80, 2, dtype=dtype) self.non_contiguous_dim1 = np.arange(1, 4000, 2, dtype=dtype) self.non_contiguous_dim2 = np.arange(1, 2400, 2, dtype=dtype).reshape(30, 40) - self.non_contiguous_dim3 = np.arange(1, 48000, 2, dtype=dtype).reshape(20, 30, 40) + + non_contiguous_dim3 = np.arange(1, 48000, 2, dtype=dtype) + self.non_contiguous_dim3 = non_contiguous_dim3.reshape(20, 30, 40) # outer(a,b): trigger sum_of_products_contig_stride0_outcontig_two def time_einsum_outer(self, dtype): @@ -180,11 +182,13 @@ def time_einsum_contig_outstride0(self, dtype): # outer(a,b): non_contiguous arrays def time_einsum_noncon_outer(self, dtype): - np.einsum("i,j", self.non_contiguous_dim1, self.non_contiguous_dim1, optimize=True) + np.einsum("i,j", self.non_contiguous_dim1, + self.non_contiguous_dim1, optimize=True) # multiply(a, b):non_contiguous arrays def time_einsum_noncon_multiply(self, dtype): - np.einsum("..., ...", self.non_contiguous_dim2, self.non_contiguous_dim3, optimize=True) + np.einsum("..., ...", self.non_contiguous_dim2, + self.non_contiguous_dim3, optimize=True) # sum and multiply:non_contiguous arrays def time_einsum_noncon_sum_mul(self, dtype): @@ -200,7 +204,8 @@ def time_einsum_noncon_mul(self, dtype): # contig_contig_outstride0_two: non_contiguous arrays def time_einsum_noncon_contig_contig(self, dtype): - np.einsum("ji,i->", self.non_contiguous_dim2, self.non_contiguous_dim1_small, optimize=True) + np.einsum("ji,i->", self.non_contiguous_dim2, + self.non_contiguous_dim1_small, optimize=True) # sum_of_products_contig_outstride0_one: non_contiguous arrays def time_einsum_noncon_contig_outstride0(self, dtype): diff --git a/numpy/_core/tests/test_simd.py b/numpy/_core/tests/test_simd.py index acea4315e679..335abc98c84e 100644 --- a/numpy/_core/tests/test_simd.py +++ b/numpy/_core/tests/test_simd.py @@ -271,7 +271,8 @@ def test_operators_shift(self): shr = self.shr(vdata_a, count) assert shr == data_shr_a - # shift by zero or max or out-range immediate constant is not applicable and illogical + # shift by zero or max or out-range immediate constant is not + # applicable and illogical for count in range(1, self._scalar_size()): # load to cast data_shl_a = self.load([a << count for a in data_a]) @@ -419,7 +420,8 @@ def test_sqrt(self): sqrt = self.sqrt(self.setall(case)) assert sqrt == pytest.approx(data_sqrt, nan_ok=True) - data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision + # load to truncate precision + data_sqrt = self.load([math.sqrt(x) for x in data]) sqrt = self.sqrt(vdata) assert sqrt == data_sqrt @@ -1334,8 +1336,10 @@ def test_mask_conditional(self): for sfx in sfxes: skip_m = skip_sfx.get(sfx, skip) inhr = (cls,) - attr = {"npyv": targets[target_name], "sfx": sfx, "target_name": target_name} - tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr) + attr = {"npyv": targets[target_name], "sfx": sfx, + "target_name": target_name} + type_name = f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}" + tcls = type(type_name, inhr, attr) if skip_m: pytest.mark.skip(reason=skip_m)(tcls) globals()[tcls.__name__] = tcls diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index a1cd63aec523..836be1245399 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1100,17 +1100,15 @@ def test_output_ellipsis_errors(self): match=r"out=\.\.\. is only allowed as a keyword argument."): np.add.reduce(1, (), None, ...) - with pytest.raises(TypeError, - match=r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple"): + type_error = r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple" + with pytest.raises(TypeError, match=type_error): np.negative(1, out=(...,)) - with pytest.raises(TypeError, - match=r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple"): + with pytest.raises(TypeError, match=type_error): # We only allow out=... not individual args for now np.divmod(1, 2, out=(np.empty(()), ...)) - with pytest.raises(TypeError, - match=r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple"): + with pytest.raises(TypeError, match=type_error): np.add.reduce(1, out=(...,)) def test_axes_argument(self): @@ -1556,7 +1554,8 @@ def __eq__(self, other): arr1d = np.array([HasComparisons()]) assert_equal(arr1d == arr1d, np.array([True])) - assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast + # normal behavior is a cast + assert_equal(np.equal(arr1d, arr1d), np.array([True])) assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) def test_object_array_reduction(self): diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index da9419d63a8a..3ca2f508672e 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -68,8 +68,16 @@ def test_validate_transcendentals(self): npfunc = getattr(np, npname) for datatype in np.unique(data['type']): data_subset = data[data['type'] == datatype] - inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) - outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + data_input_str = data_subset['input'].astype(str) + data_output_str = data_subset['output'].astype(str) + data_type_str = data_subset['type'].astype(str) + + inval = np.array(str_to_float(data_input_str, + data_type_str), + dtype=eval(datatype)) + outval = np.array(str_to_float(data_output_str, + data_type_str), + dtype=eval(datatype)) perm = np.random.permutation(len(inval)) inval = inval[perm] outval = outval[perm] diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index 8f6f5c682a91..7012e7e357fe 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -562,31 +562,35 @@ class TestSpecialComplexAVX: @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) def test_array(self, stride, astype): - arr = np.array([complex(np.nan, np.nan), - complex(np.nan, np.inf), - complex(np.inf, np.nan), - complex(np.inf, np.inf), - complex(0., np.inf), - complex(np.inf, 0.), - complex(0., 0.), - complex(0., np.nan), - complex(np.nan, 0.)], dtype=astype) - abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype) - sq_true = np.array([complex(np.nan, np.nan), - complex(np.nan, np.nan), - complex(np.nan, np.nan), - complex(np.nan, np.inf), - complex(-np.inf, np.nan), - complex(np.inf, np.nan), - complex(0., 0.), - complex(np.nan, np.nan), - complex(np.nan, np.nan)], dtype=astype) + nan = np.nan + inf = np.inf + arr = np.array([complex(nan, nan), + complex(nan, inf), + complex(inf, nan), + complex(inf, inf), + complex(0., inf), + complex(inf, 0.), + complex(0., 0.), + complex(0., nan), + complex(nan, 0.)], dtype=astype) + abs_true = np.array([nan, inf, inf, inf, inf, inf, 0., nan, nan], + dtype=arr.real.dtype) + sq_true = np.array([complex(nan, nan), + complex(nan, nan), + complex(nan, nan), + complex(nan, inf), + complex(-inf, nan), + complex(inf, nan), + complex(0., 0.), + complex(nan, nan), + complex(nan, nan)], dtype=astype) with np.errstate(invalid='ignore'): assert_equal(np.abs(arr[::stride]), abs_true[::stride]) assert_equal(np.square(arr[::stride]), sq_true[::stride]) class TestComplexAbsoluteAVX: - @pytest.mark.parametrize("arraysize", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19]) + @pytest.mark.parametrize("arraysize", + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19]) @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) # test to ensure masking and strides work as intended in the AVX implementation diff --git a/numpy/tests/test_configtool.py b/numpy/tests/test_configtool.py index e0b9bb1b7aff..a9c23b5cc007 100644 --- a/numpy/tests/test_configtool.py +++ b/numpy/tests/test_configtool.py @@ -15,8 +15,10 @@ PKG_CONFIG_DIR = NUMPY_ROOT / '_core' / 'lib' / 'pkgconfig' -@pytest.mark.skipif(not IS_INSTALLED, reason="`numpy-config` not expected to be installed") -@pytest.mark.skipif(IS_WASM, reason="wasm interpreter cannot start subprocess") +@pytest.mark.skipif(not IS_INSTALLED, + reason="`numpy-config` not expected to be installed") +@pytest.mark.skipif(IS_WASM, + reason="wasm interpreter cannot start subprocess") class TestNumpyConfig: def check_numpyconfig(self, arg): p = subprocess.run(['numpy-config', arg], capture_output=True, text=True) @@ -36,13 +38,15 @@ def test_configtool_pkgconfigdir(self): assert pathlib.Path(stdout) == PKG_CONFIG_DIR -@pytest.mark.skipif(not IS_INSTALLED, reason="numpy must be installed to check its entrypoints") +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy must be installed to check its entrypoints") def test_pkg_config_entrypoint(): (entrypoint,) = importlib.metadata.entry_points(group='pkg_config', name='numpy') assert entrypoint.value == numpy._core.lib.pkgconfig.__name__ -@pytest.mark.skipif(not IS_INSTALLED, reason="numpy.pc is only available when numpy is installed") +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy.pc is only available when numpy is installed") @pytest.mark.skipif(IS_EDITABLE, reason="editable installs don't have a numpy.pc") def test_pkg_config_config_exists(): assert PKG_CONFIG_DIR.joinpath('numpy.pc').is_file() diff --git a/ruff.toml b/ruff.toml index 9916097420b6..ed6026ed0665 100644 --- a/ruff.toml +++ b/ruff.toml @@ -72,7 +72,6 @@ ignore = [ "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] -"benchmarks/benchmarks/bench_linalg.py" = ["E501"] "numpy/_core/tests/test_arrayprint.py" = ["E501"] "numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] "numpy/_core/tests/test_cpu_features.py" = ["E501"] @@ -81,17 +80,15 @@ ignore = [ "numpy/_core/tests/test_einsum.py" = ["E501"] "numpy/_core/tests/test_multiarray.py" = ["E501"] "numpy/_core/tests/test_nditer*py" = ["E501"] -"numpy/_core/tests/test_ufunc*py" = ["E501"] -"numpy/_core/tests/test_umath*py" = ["E501"] -"numpy/_core/tests/test_numeric*.py" = ["E501"] +"numpy/_core/tests/test_umath.py" = ["E501"] +"numpy/_core/tests/test_numeric.py" = ["E501"] +"numpy/_core/tests/test_numerictypes.py" = ["E501"] "numpy/_core/tests/test_regression.py" = ["E501"] -"numpy/_core/tests/test_simd.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] -"numpy/tests/test_configtool.py" = ["E501"] "numpy/f2py/*py" = ["E501"] # for typing related files we follow https://typing.python.org/en/latest/guides/writing_stubs.html#maximum-line-length "numpy/_typing/_array_like.py" = ["E501"] From 3727d5b23e33c3c495b8025ad8c7413d72c9db0d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 1 Jul 2025 18:39:39 +0100 Subject: [PATCH 0171/1018] TYP: Add shape typing to return values of `np.nonzero` and `ndarray.nonzero`, simplify `MaskedArray.nonzero` return type (#29303) --- numpy/__init__.pyi | 2 +- numpy/_core/fromnumeric.pyi | 2 +- numpy/ma/core.pyi | 2 +- numpy/typing/tests/data/reveal/fromnumeric.pyi | 8 ++++---- numpy/typing/tests/data/reveal/ma.pyi | 2 +- numpy/typing/tests/data/reveal/ndarray_misc.pyi | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d12e63a8bed9..d071f3e6b92d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2355,7 +2355,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... # `nonzero()` is deprecated for 0d arrays/generics - def nonzero(self) -> tuple[NDArray[intp], ...]: ... + def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... # `put` is technically available to `generic`, # but is pointless as `generic`s are immutable diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 2aedc727e6dc..ba6669936571 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -576,7 +576,7 @@ def ravel( @overload def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ... -def nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ... +def nonzero(a: _ArrayLike[Any]) -> tuple[np.ndarray[tuple[int], np.dtype[intp]], ...]: ... # this prevents `Any` from being returned with Pyright @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index caea284bb566..3ca5d9be0b97 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1102,7 +1102,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... - def nonzero(self) -> tuple[_Array1D[intp], *tuple[_Array1D[intp], ...]]: ... + def nonzero(self) -> tuple[_Array1D[intp], ...]: ... def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... def dot(self, b, out=..., strict=...): ... def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 5438e001a13f..62bc926c765b 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -131,10 +131,10 @@ assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | np.int_ | assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.nonzero(AR_b), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_f4), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_1d), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_nd), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_b), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_f4), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_1d), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_nd), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) assert_type(np.shape(b), tuple[()]) assert_type(np.shape(f), tuple[()]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index a101ba0d48b0..48311a7e87fa 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -380,7 +380,7 @@ assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.d assert_type(MAR_b.T, MaskedArray[np.bool]) assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) -assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) +assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) # Masked Array addition diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 4cbb90621ca9..7f0a214f8f52 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -170,7 +170,7 @@ assert_type(AR_f8.dot(1), npt.NDArray[Any]) assert_type(AR_f8.dot([1]), Any) assert_type(AR_f8.dot(1, out=B), SubClass) -assert_type(AR_f8.nonzero(), tuple[npt.NDArray[np.intp], ...]) +assert_type(AR_f8.nonzero(), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) assert_type(AR_f8.searchsorted(1), np.intp) assert_type(AR_f8.searchsorted([1]), npt.NDArray[np.intp]) From ec34c6d3f6ff7969bdd82f9b2347070d11d8b7de Mon Sep 17 00:00:00 2001 From: ianlv <168640168+ianlv@users.noreply.github.com> Date: Wed, 2 Jul 2025 16:38:32 +0800 Subject: [PATCH 0172/1018] chore: remove redundant words in comment (#29306) --- doc/source/reference/c-api/array.rst | 2 +- numpy/_core/src/common/dlpack/dlpack.h | 2 +- numpy/_core/src/multiarray/alloc.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 02db78ebb2b1..dc043b77f187 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -3814,7 +3814,7 @@ In this case, the helper C files typically do not have a canonical place where ``PyArray_ImportNumPyAPI`` should be called (although it is OK and fast to call it often). -To solve this, NumPy provides the following pattern that the the main +To solve this, NumPy provides the following pattern that the main file is modified to define ``PY_ARRAY_UNIQUE_SYMBOL`` before the include: .. code-block:: c diff --git a/numpy/_core/src/common/dlpack/dlpack.h b/numpy/_core/src/common/dlpack/dlpack.h index 19ecc27761f8..4dc164fe9c1b 100644 --- a/numpy/_core/src/common/dlpack/dlpack.h +++ b/numpy/_core/src/common/dlpack/dlpack.h @@ -270,7 +270,7 @@ typedef struct DLManagedTensor { void (*deleter)(struct DLManagedTensor * self); } DLManagedTensor; -// bit masks used in in the DLManagedTensorVersioned +// bit masks used in the DLManagedTensorVersioned /*! \brief bit mask to indicate that the tensor is read only. */ #define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL) diff --git a/numpy/_core/src/multiarray/alloc.h b/numpy/_core/src/multiarray/alloc.h index f5600c99aaa5..8cd763f971ed 100644 --- a/numpy/_core/src/multiarray/alloc.h +++ b/numpy/_core/src/multiarray/alloc.h @@ -75,7 +75,7 @@ _npy_init_workspace( /* * Helper definition macro for a small work/scratchspace. - * The `NAME` is the C array to to be defined of with the type `TYPE`. + * The `NAME` is the C array to be defined of with the type `TYPE`. * * The usage pattern for this is: * From 888c4ff4be6adbada62bbb4a35523c05f9cff663 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 2 Jul 2025 10:00:14 +0100 Subject: [PATCH 0173/1018] TYP: Add type annotations for `MaskedArray.__{pow,rpow}__` (#29277) --- numpy/__init__.pyi | 2 + numpy/ma/core.pyi | 69 +++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 84 +++++++++++++++++++++++++++ 3 files changed, 153 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d071f3e6b92d..6f52d1e37ce6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3230,6 +3230,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__pow__` @overload def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -3263,6 +3264,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + # Keep in sync with `MaskedArray.__rpow__` @overload def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 3ca5d9be0b97..5d29577b308b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -866,8 +866,73 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - def __pow__(self, other, mod: None = None, /): ... - def __rpow__(self, other, mod: None = None, /): ... + # Keep in sync with `ndarray.__pow__` + @overload + def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, mod: None = None, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> _MaskedArray[complex128]: ... + @overload + def __pow__( + self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> _MaskedArray[complex128]: ... + @overload + def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> _MaskedArray[complexfloating]: ... + @overload + def __pow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> _MaskedArray[number]: ... + @overload + def __pow__(self: _MaskedArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __pow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + + # Keep in sync with `ndarray.__rpow__` + @overload + def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, mod: None = None, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> _MaskedArray[complex128]: ... + @overload + def __rpow__( + self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> _MaskedArray[complex128]: ... + @overload + def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rpow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> _MaskedArray[number]: ... + @overload + def __rpow__(self: _MaskedArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __rpow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... # Keep in sync with `ndarray.__iadd__` @overload diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 48311a7e87fa..f2e9b136259a 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -884,3 +884,87 @@ assert_type(AR_LIKE_f // MAR_o, Any) assert_type(AR_LIKE_td64 // MAR_o, Any) assert_type(AR_LIKE_dt64 // MAR_o, Any) assert_type(AR_LIKE_o // MAR_o, Any) + +# Masked Array power + +assert_type(MAR_b ** AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_u ** MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i ** MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_b, Any) + +assert_type(MAR_u4 ** AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 ** AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_u4 , MaskedArray[np.uint32]) +assert_type(AR_LIKE_u ** MAR_u4 , MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i ** MAR_u4 , MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_u4 , MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_u4 , MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_u4 , Any) + +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 ** AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 ** AR_LIKE_o, Any) +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) + +assert_type(AR_LIKE_u ** MAR_i8 , MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i ** MAR_i8 , MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_i8 , MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_i8 , MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_i8 , Any) + +assert_type(MAR_f8 ** AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c ** MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_f8, Any) + +assert_type(MAR_c16 ** AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o ** MAR_c16, Any) + +assert_type(MAR_o ** AR_LIKE_b, Any) +assert_type(MAR_o ** AR_LIKE_u, Any) +assert_type(MAR_o ** AR_LIKE_i, Any) +assert_type(MAR_o ** AR_LIKE_f, Any) +assert_type(MAR_o ** AR_LIKE_c, Any) +assert_type(MAR_o ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_o, Any) +assert_type(AR_LIKE_u ** MAR_o, Any) +assert_type(AR_LIKE_i ** MAR_o, Any) +assert_type(AR_LIKE_f ** MAR_o, Any) +assert_type(AR_LIKE_c ** MAR_o, Any) +assert_type(AR_LIKE_o ** MAR_o, Any) From 34d12a963351a0ec1a58e42f37592e49d4f7eed7 Mon Sep 17 00:00:00 2001 From: Shyok Mutsuddi Date: Wed, 2 Jul 2025 11:05:30 +0000 Subject: [PATCH 0174/1018] test: add regression test for grammar in ufunc TypeError message --- numpy/_core/tests/test_umath.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index c54b2ac86bc2..b5145e9d642b 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4917,3 +4917,12 @@ def test_ufunc_arg(self): @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") def test_string_arg(self): assert_raises(TypeError, ncu._add_newdoc_ufunc, np.add, 3) + +class TestHypotErrorMessages: + def test_hypot_error_message_single_arg(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 1 was given"): + np.hypot(5) + + def test_hypot_error_message_multiple_args(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 4 were given"): + np.hypot(1, 2, 3, 4) From 99f894a76ebf91638d05c8611d4e0bc6f1ec9b78 Mon Sep 17 00:00:00 2001 From: Shyok Mutsuddi Date: Wed, 2 Jul 2025 11:06:34 +0000 Subject: [PATCH 0175/1018] fix: correct singular/plural grammar in ufunc TypeError message --- numpy/_core/src/common/npy_argparse.c | 10 ++++++---- numpy/_core/src/umath/ufunc_object.c | 7 ++++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index 6766b17043ac..aa011be9c585 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -243,16 +243,18 @@ static int raise_incorrect_number_of_positional_args(const char *funcname, const _NpyArgParserCache *cache, Py_ssize_t len_args) { + const char *verb = (len_args == 1) ? "was" : "were"; if (cache->npositional == cache->nrequired) { PyErr_Format(PyExc_TypeError, - "%s() takes %d positional arguments but %zd were given", - funcname, cache->npositional, len_args); + "%s() takes %d positional arguments but %zd %s given", + funcname, cache->npositional, len_args, verb); } else { PyErr_Format(PyExc_TypeError, "%s() takes from %d to %d positional arguments but " - "%zd were given", - funcname, cache->nrequired, cache->npositional, len_args); + "%zd %s given", + funcname, cache->nrequired, cache->npositional, + len_args, verb); } return -1; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 4cdde8d3d77d..485364af1ff2 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4319,10 +4319,11 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* Check number of arguments */ if (NPY_UNLIKELY((len_args < nin) || (len_args > nop))) { + const char *verb = (len_args == 1) ? "was" : "were"; PyErr_Format(PyExc_TypeError, - "%s() takes from %d to %d positional arguments but " - "%zd were given", - ufunc_get_name_cstr(ufunc) , nin, nop, len_args); + "%s() takes from %d to %d positional arguments but " + "%zd %s given", + ufunc_get_name_cstr(ufunc), nin, nop, len_args, verb); goto fail; } From a04953939d91490103d308491479217ef4a9802b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 2 Jul 2025 13:10:01 +0200 Subject: [PATCH 0176/1018] Update numpy/_core/tests/test_deprecations.py Co-authored-by: Matti Picus --- numpy/_core/tests/test_deprecations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index df27a1f9d076..b83a2ac610ee 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -458,7 +458,7 @@ def test_deprecated(self): class TestDTypeAlignBool(_VisibleDeprecationTestCase): # Deprecated in Numpy 2.4, 2025-07 # NOTE: As you can see, finalizing this deprecation breaks some (very) old - # pickle files. This may be fine, but sneeds to be done with some care since + # pickle files. This may be fine, but needs to be done with some care since # it breaks all of them and not just some. # (Maybe it should be a 3.0 or only after warning more explicitly around pickles.) message = r"dtype\(\): align should be passed as Python or NumPy boolean but got " From 0578bdfe74c914b602ba17fef833dd5c5b2ac546 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 2 Jul 2025 15:19:28 +0100 Subject: [PATCH 0177/1018] TYP: Type ``MaskedArray.{trace,round,cumsum,cumprod}`` (#29307) --- numpy/__init__.pyi | 4 ++ numpy/ma/core.pyi | 63 +++++++++++++++++++++++++-- numpy/typing/tests/data/reveal/ma.pyi | 12 +++++ 3 files changed, 75 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6f52d1e37ce6..6bcbe5b68662 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1741,6 +1741,7 @@ class _ArrayOrScalarCommon: @overload def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + # Keep in sync with `MaskedArray.round` @overload # out=None (default) def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... @overload # out=ndarray @@ -1776,6 +1777,7 @@ class _ArrayOrScalarCommon: @overload def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ... + # Keep in sync with `MaskedArray.cumprod` @overload # out: None (default) def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray @@ -1783,6 +1785,7 @@ class _ArrayOrScalarCommon: @overload def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + # Keep in sync with `MaskedArray.cumsum` @overload # out: None (default) def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray @@ -2385,6 +2388,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): stable: bool | None = ..., ) -> None: ... + # Keep in sync with `MaskedArray.trace` @overload def trace( self, # >= 2D array diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 5d29577b308b..3fad41a1434e 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -46,6 +46,7 @@ from numpy import ( from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, + DTypeLike, NDArray, _32Bit, _64Bit, @@ -1168,18 +1169,72 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): ) -> _ArrayT: ... def nonzero(self) -> tuple[_Array1D[intp], ...]: ... - def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... + + # Keep in sync with `ndarray.trace` + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> Any: ... + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, + ) -> _ArrayT: ... + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, + ) -> _ArrayT: ... + def dot(self, b, out=..., strict=...): ... def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... - def cumsum(self, axis=..., dtype=..., out=...): ... + + # Keep in sync with `ndarray.cumsum` + @overload # out: None (default) + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + @overload # out: ndarray + def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... product: Any - def cumprod(self, axis=..., dtype=..., out=...): ... + + # Keep in sync with `ndarray.cumprod` + @overload # out: None (default) + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + @overload # out: ndarray + def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... def anom(self, axis=..., dtype=...): ... def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... - def round(self, decimals=..., out=...): ... + + # Keep in sync with `ndarray.round` + @overload # out=None (default) + def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... + @overload # out=ndarray + def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + @overload + def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... # Keep in-sync with np.ma.argmin diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index f2e9b136259a..9e8ab98ec4ce 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -383,6 +383,18 @@ assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32] assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) +assert_type(MAR_f8.trace(), Any) +assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.round(), MaskedArray[np.float64]) +assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.cumprod(), MaskedArray[Any]) +assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.cumsum(), MaskedArray[Any]) +assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) + # Masked Array addition assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) From 1cc6113ca186b24d08cb7be94e2834bec4768c07 Mon Sep 17 00:00:00 2001 From: Sachin Shah <39803835+inventshah@users.noreply.github.com> Date: Wed, 2 Jul 2025 11:25:35 -0400 Subject: [PATCH 0178/1018] TYP: add explicit types for np.quantile (#29305) --- numpy/lib/_function_base_impl.pyi | 161 +++++++++++++++++++++++++++++- 1 file changed, 158 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 78947b1b3b46..14e48f1cc3fd 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -615,6 +615,7 @@ _MethodKind = L[ "nearest", ] +# NOTE: keep in sync with `quantile` @overload def percentile( a: _ArrayLikeFloat_co, @@ -772,9 +773,163 @@ def percentile( weights: _ArrayLikeFloat_co | None = ..., ) -> _ArrayT: ... -# NOTE: Not an alias, but they do have identical signatures -# (that we can reuse) -quantile = percentile +# NOTE: keep in sync with `percentile` +@overload +def quantile( + a: _ArrayLikeFloat_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> floating: ... +@overload +def quantile( + a: _ArrayLikeComplex_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> complexfloating: ... +@overload +def quantile( + a: _ArrayLikeTD64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> timedelta64: ... +@overload +def quantile( + a: _ArrayLikeDT64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> datetime64: ... +@overload +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> Any: ... +@overload +def quantile( + a: _ArrayLikeFloat_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[floating]: ... +@overload +def quantile( + a: _ArrayLikeComplex_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[complexfloating]: ... +@overload +def quantile( + a: _ArrayLikeTD64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[timedelta64]: ... +@overload +def quantile( + a: _ArrayLikeDT64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[datetime64]: ... +@overload +def quantile( + a: _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[object_]: ... +@overload +def quantile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> Any: ... +@overload +def quantile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None, + out: _ArrayT, + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> _ArrayT: ... +@overload +def quantile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + weights: _ArrayLikeFloat_co | None = ..., +) -> _ArrayT: ... _ScalarT_fm = TypeVar( "_ScalarT_fm", From 75e86ba27ae9f99ee67660204d7ca1e16cb323da Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 2 Jul 2025 18:38:59 +0100 Subject: [PATCH 0179/1018] DOCS: Fix rendering of ``MaskedArray.anom`` ``dtype`` (#29311) --- numpy/ma/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 2860bec848aa..e7d8a20f6c6c 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5454,8 +5454,8 @@ def anom(self, axis=None, dtype=None): The default is to use the mean of the flattened array as reference. dtype : dtype, optional Type to use in computing the variance. For arrays of integer type - the default is float32; for arrays of float types it is the same as - the array type. + the default is float32; for arrays of float types it is the same as + the array type. See Also -------- From 84234016c4676ffe0215f300e5b3f24df62f8efd Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Wed, 2 Jul 2025 22:50:59 +0300 Subject: [PATCH 0180/1018] BLD: remove unused github workflow (#29312) --- .github/workflows/windows_arm64.yml | 208 ---------------------------- 1 file changed, 208 deletions(-) delete mode 100644 .github/workflows/windows_arm64.yml diff --git a/.github/workflows/windows_arm64.yml b/.github/workflows/windows_arm64.yml deleted file mode 100644 index 3eaf02eb062c..000000000000 --- a/.github/workflows/windows_arm64.yml +++ /dev/null @@ -1,208 +0,0 @@ -name: Windows Arm64 - -on: - workflow_dispatch: - -env: - python_version: 3.12 - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read # to fetch code (actions/checkout) - -jobs: - windows_arm: - runs-on: windows-2022 - - # To enable this job on a fork, comment out: - if: github.repository == 'numpy/numpy' - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - - name: Setup Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - with: - python-version: ${{env.python_version}} - architecture: x64 - - - name: Install build dependencies from PyPI - run: | - python -m pip install -r requirements/build_requirements.txt - - - name: Prepare python - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - #Detecting python location and version - $PythonDir = (Split-Path -Parent (get-command python).Path) - $PythonVersionParts = ( -split (python -V)) - $PythonVersion = $PythonVersionParts[1] - - #Downloading the package for appropriate python version from nuget - $PythonARM64NugetLink = "https://www.nuget.org/api/v2/package/pythonarm64/$PythonVersion" - $PythonARM64NugetZip = "nuget_python.zip" - $PythonARM64NugetDir = "temp_nuget" - Invoke-WebRequest $PythonARM64NugetLink -OutFile $PythonARM64NugetZip - - #Changing the libs folder to enable python libraries to be linked for arm64 - Expand-Archive $PythonARM64NugetZip $PythonARM64NugetDir - Copy-Item $PythonARM64NugetDir\tools\libs\* $PythonDir\libs - Remove-Item -Force -Recurse $PythonARM64NugetDir - Remove-Item -Force $PythonARM64NugetZip - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Prepare Licence - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - $CurrentDir = (get-location).Path - $LicenseFile = "$CurrentDir\LICENSE.txt" - Set-Content $LicenseFile ([Environment]::NewLine) - Add-Content $LicenseFile "----" - Add-Content $LicenseFile ([Environment]::NewLine) - Add-Content $LicenseFile (Get-Content "$CurrentDir\LICENSES_bundled.txt") - Add-Content $LicenseFile (Get-Content "$CurrentDir\tools\wheels\LICENSE_win32.txt") - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Wheel build - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - #Creating cross compile script for messon subsystem - $CurrentDir = (get-location) - $CrossScript = "$CurrentDir\arm64_w64.txt" - $CrossScriptContent = - { - [host_machine] - system = 'windows' - subsystem = 'windows' - kernel = 'nt' - cpu_family = 'aarch64' - cpu = 'aarch64' - endian = 'little' - - [binaries] - c='cl.exe' - cpp = 'cl.exe' - - [properties] - sizeof_short = 2 - sizeof_int = 4 - sizeof_long = 4 - sizeof_long_long = 8 - sizeof_float = 4 - sizeof_double = 8 - sizeof_long_double = 8 - sizeof_size_t = 8 - sizeof_wchar_t = 2 - sizeof_off_t = 4 - sizeof_Py_intptr_t = 8 - sizeof_PY_LONG_LONG = 8 - longdouble_format = 'IEEE_DOUBLE_LE' - } - Set-Content $CrossScript $CrossScriptContent.ToString() - - #Setting up cross compilers from MSVC - $Products = 'Community', 'Professional', 'Enterprise', 'BuildTools' | % { "Microsoft.VisualStudio.Product.$_" } - $VsInstallPath = (vswhere -products $Products -latest -format json | ConvertFrom-Json).installationPath - $VSVars = (Get-ChildItem -Path $VsInstallPath -Recurse -Filter "vcvarsamd64_arm64.bat").FullName - $ScriptingObj = New-Object -ComObject Scripting.FileSystemObject - $VSVarsShort = $ScriptingObj.GetFile($VSVars).ShortPath - cmd /c "$VSVarsShort && set" | - ForEach-Object { - if ($_ -match "=") { - $Var = $_.split("=") - set-item -force -path "ENV:\$($Var[0])" -value "$($Var[1])" - } - } - - #Building the wheel - pip wheel . --config-settings=setup-args="--cross-file=$CrossScript" - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Fix wheel - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - #Finding whl file - $CurrentDir = (get-location) - $WhlName = ((Get-ChildItem -Filter "*.whl").FullName) - $ZipWhlName = "$CurrentDir\ZipWhlName.zip" - $UnzippedWhl = "$CurrentDir\unzipedWhl" - - #Expanding whl file - Rename-Item -Path $WhlName $ZipWhlName - if (Test-Path $UnzippedWhl) { - Remove-Item -Force -Recurse $UnzippedWhl - } - Expand-Archive -Force -Path $ZipWhlName $UnzippedWhl - - #Renaming all files to show that their arch is arm64 - Get-ChildItem -Recurse -Path $UnzippedWhl *win_amd64* | Rename-Item -NewName { $_.Name -replace 'win_amd64', 'win_arm64' } - $DIST_DIR = (Get-ChildItem -Recurse -Path $UnzippedWhl *dist-info).FullName - - #Changing amd64 references from metafiles - (GET-Content $DIST_DIR/RECORD) -replace 'win_amd64', 'win_arm64' | Set-Content $DIST_DIR/RECORD - (GET-Content $DIST_DIR/WHEEL) -replace 'win_amd64', 'win_arm64' | Set-Content $DIST_DIR/WHEEL - - #Packing whl file - Compress-Archive -Path $UnzippedWhl\* -DestinationPath $ZipWhlName -Force - $WhlName = $WhlName.Replace("win_amd64", "win_arm64") - Rename-Item -Path $ZipWhlName $WhlName - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Upload Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: ${{ env.python_version }}-win_arm64 - path: ./*.whl - - - name: Setup Mamba - uses: mamba-org/setup-micromamba@b09ef9b599704322748535812ca03efb2625677b - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # Note that this step is *after* specific pythons have been used to - # build and test the wheel - # for installation of anaconda-client, for upload to anaconda.org - # environment will be activated after creation, and in future bash steps - init-shell: bash - environment-name: upload-env - create-args: >- - anaconda-client - - # - name: Upload wheels - # if: success() - # shell: bash -el {0} - # # see https://github.com/marketplace/actions/setup-miniconda for why - # # `-el {0}` is required. - # env: - # NUMPY_STAGING_UPLOAD_TOKEN: ${{ secrets.NUMPY_STAGING_UPLOAD_TOKEN }} - # NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - # run: | - # source tools/wheels/upload_wheels.sh - # set_upload_vars - # # trigger an upload to - # # https://anaconda.org/scientific-python-nightly-wheels/numpy - # # for cron jobs or "Run workflow" (restricted to main branch). - # # Tags will upload to - # # https://anaconda.org/multibuild-wheels-staging/numpy - # # The tokens were originally generated at anaconda.org - # upload_wheels - From ec8edceb1a8b94883a1e6e474d615d9be0b6b9e7 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 3 Jul 2025 11:47:22 +0100 Subject: [PATCH 0181/1018] TYP: rename `_T` to `_ScalarT` in `matlib.pyi` for consistency (#29310) --- numpy/matlib.pyi | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index baeadc078028..93bee1975df3 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -507,8 +507,8 @@ __all__ += np.__all__ ### -_T = TypeVar("_T", bound=np.generic) -_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_T]] +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_ScalarT]] _Order: TypeAlias = Literal["C", "F"] ### @@ -517,7 +517,7 @@ _Order: TypeAlias = Literal["C", "F"] @overload def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -525,7 +525,7 @@ def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -533,7 +533,7 @@ def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C" @overload def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -541,7 +541,7 @@ def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... @overload -def identity(n: int, dtype: _DTypeLike[_T]) -> _Matrix[_T]: ... +def identity(n: int, dtype: _DTypeLike[_ScalarT]) -> _Matrix[_ScalarT]: ... @overload def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... @@ -555,9 +555,9 @@ def eye( order: _Order = "C", ) -> _Matrix[np.float64]: ... @overload -def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload -def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike = ..., order: _Order = "C") -> _Matrix[Any]: ... @@ -575,8 +575,8 @@ def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... # @overload -def repmat(a: _Matrix[_T], m: int, n: int) -> _Matrix[_T]: ... +def repmat(a: _Matrix[_ScalarT], m: int, n: int) -> _Matrix[_ScalarT]: ... @overload -def repmat(a: _ArrayLike[_T], m: int, n: int) -> npt.NDArray[_T]: ... +def repmat(a: _ArrayLike[_ScalarT], m: int, n: int) -> npt.NDArray[_ScalarT]: ... @overload def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... From 85708b3c0bd3d2fdbcb00cb08d0a7b323ce3af2d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 3 Jul 2025 12:05:06 +0100 Subject: [PATCH 0182/1018] TYP: Type ``MaskedArray.dot`` and ``MaskedArray.anom`` (#29309) --- numpy/ma/core.pyi | 16 ++++++++++++++-- numpy/typing/tests/data/reveal/ma.pyi | 9 +++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 3fad41a1434e..ca9826c7d083 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1200,7 +1200,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): out: _ArrayT, ) -> _ArrayT: ... - def dot(self, b, out=..., strict=...): ... + # This differs from `ndarray.dot`, in that 1D dot 1D returns a 0D array. + @overload + def dot(self, b: ArrayLike, out: None = ..., strict: bool = ...) -> _MaskedArray[Any]: ... + @overload + def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = ...) -> _ArrayT: ... + def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... # Keep in sync with `ndarray.cumsum` @@ -1223,7 +1228,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... - def anom(self, axis=..., dtype=...): ... + + @overload + def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... + @overload + def anom(self, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + @overload + def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 9e8ab98ec4ce..c26dca0ed9ec 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -373,6 +373,11 @@ assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) assert_type(MAR_i8.hardmask, bool) assert_type(MAR_i8.sharedmask, bool) +assert_type(MAR_2d_f4.anom(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, 'float16'), np.ma.MaskedArray[tuple[int, int], np.dtype]) + assert_type(MAR_b.transpose(), MaskedArray[np.bool]) assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) @@ -380,6 +385,10 @@ assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.d assert_type(MAR_b.T, MaskedArray[np.bool]) assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.dot(1), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot([1]), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclass) + assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) From 1dac511d00980b2a30e7fff23312bf84922dc0c2 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 3 Jul 2025 15:37:03 -0600 Subject: [PATCH 0183/1018] MAINT: remove out-of-date comment --- numpy/_core/src/multiarray/mapping.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 2ce7dcdb234a..7483448e632b 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -2110,8 +2110,6 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) /* May need a generic copy function (only for refs and odd sizes) */ NPY_ARRAYMETHOD_FLAGS transfer_flags; npy_intp itemsize = PyArray_ITEMSIZE(self); - // TODO: the heuristic used here to determine the src_dtype might be subtly wrong - // for non-REFCHK user DTypes. See gh-27057 for the prior discussion about this. if (PyArray_GetDTypeTransferFunction( 1, itemsize, itemsize, descr, PyArray_DESCR(self), From 2273cd8d1e8be9a9ec237f6115cf59d4c071ebd2 Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Fri, 4 Jul 2025 13:14:00 -0700 Subject: [PATCH 0184/1018] DOC: Fix spelling (#29320) --- doc/source/reference/arrays.classes.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 8a2e804eb36b..80821c2c08fa 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -32,7 +32,7 @@ Note that :func:`asarray` always returns the base-class ndarray. If you are confident that your use of the array object can handle any subclass of an ndarray, then :func:`asanyarray` can be used to allow subclasses to propagate more cleanly through your subroutine. In -principal a subclass could redefine any aspect of the array and +principle, a subclass could redefine any aspect of the array and therefore, under strict guidelines, :func:`asanyarray` would rarely be useful. However, most subclasses of the array object will not redefine certain aspects of the array object such as the buffer From c21df31bb298384430c0f99765a55926ff69e39f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 5 Jul 2025 16:14:09 -0600 Subject: [PATCH 0185/1018] MAINT: Rename nep-0049.rst. Rename to nep-0049-data-allocation-strategies.rst. Closes #29323. --- .../{nep-0049.rst => nep-0049-data-allocation-strategies.rst} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename doc/neps/{nep-0049.rst => nep-0049-data-allocation-strategies.rst} (100%) diff --git a/doc/neps/nep-0049.rst b/doc/neps/nep-0049-data-allocation-strategies.rst similarity index 100% rename from doc/neps/nep-0049.rst rename to doc/neps/nep-0049-data-allocation-strategies.rst From 2a445f43a7574ab1d80ac6a7981b62829a863064 Mon Sep 17 00:00:00 2001 From: Charlie Lin Date: Sun, 6 Jul 2025 20:32:45 -0400 Subject: [PATCH 0186/1018] BLD: update `highway` submodule to latest master Fixes #29130 --- numpy/_core/src/highway | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 12b325bc1793..37c08e5528f6 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 12b325bc1793dee68ab2157995a690db859fe9e0 +Subproject commit 37c08e5528f63ead9c7e4fd99ba454c1b1a3e3f7 From 48bc85af1e8aae77f8325de0babb8bbb24cb2fab Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Mon, 7 Jul 2025 00:26:33 -0700 Subject: [PATCH 0187/1018] DOC: Clarify assert_allclose differences vs. allclose (#29325) Resolves #9988 --- numpy/testing/_private/utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 5f8dfc209283..2d45fa30b8a6 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1625,9 +1625,10 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, contrast to the standard usage in numpy, NaNs are compared like numbers, no assertion is raised if both objects have NaNs in the same positions. - The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note - that ``allclose`` has different default values). It compares the difference - between `actual` and `desired` to ``atol + rtol * abs(desired)``. + The test is equivalent to ``allclose(actual, desired, rtol, atol)``, + except that it is stricter: it doesn't broadcast its operands, and has + tighter default tolerance values. It compares the difference between + `actual` and `desired` to ``atol + rtol * abs(desired)``. Parameters ---------- From 6a6b171ef8123c236f7afc59ff9ce3e011895ed9 Mon Sep 17 00:00:00 2001 From: DWesl <22566757+DWesl@users.noreply.github.com> Date: Mon, 7 Jul 2025 03:29:31 -0400 Subject: [PATCH 0188/1018] TST: Add test for non-npy files in npz and different names (#29313) * TST: Add test for non-npy files in npz and different names Retrieving `a` from an npz file will first check for a file `a`, then for `a.npy`; this checks both. * BUG: Argument to read should be int not str Old behavior is to read everything, which will happen if I pass no argument. --------- Co-authored-by: Sebastian Berg --- numpy/lib/_npyio_impl.py | 2 +- numpy/lib/tests/test_io.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 36ead97a1aae..e9a8509fc685 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -261,7 +261,7 @@ def __getitem__(self, key): max_header_size=self.max_header_size ) else: - return bytes.read(key) + return bytes.read() def __contains__(self, key): return (key in self._files) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 48f579d7ddc7..05ef0be92211 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -7,6 +7,7 @@ import threading import time import warnings +import zipfile from ctypes import c_bool from datetime import datetime from io import BytesIO, StringIO @@ -217,6 +218,22 @@ def roundtrip(self, *args, **kwargs): self.arr_reloaded.fid.close() os.remove(self.arr_reloaded.fid.name) + def test_load_non_npy(self): + """Test loading non-.npy files and name mapping in .npz.""" + with temppath(prefix="numpy_test_npz_load_non_npy_", suffix=".npz") as tmp: + with zipfile.ZipFile(tmp, "w") as npz: + with npz.open("test1.npy", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("test2", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("metadata", "w") as out_file: + out_file.write(b"Name: Test") + with np.load(tmp) as npz: + assert len(npz["test1"]) == 10 + assert len(npz["test1.npy"]) == 10 + assert len(npz["test2"]) == 10 + assert npz["metadata"] == b"Name: Test" + @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") @pytest.mark.slow From d844b79201d966ae3668f12386d0a585ea766630 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 7 Jul 2025 12:42:13 +0100 Subject: [PATCH 0189/1018] TYP: `svd` overload incorrectly noted `Literal[False]` to be the default for `compute_uv` (#29331) --- numpy/linalg/_linalg.pyi | 20 ++++++++++++++++++-- numpy/typing/tests/data/reveal/linalg.pyi | 2 ++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 68bd6d933921..e9b4cc0eb0e4 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -277,14 +277,30 @@ def svd( def svd( a: _ArrayLikeInt_co, full_matrices: bool = ..., - compute_uv: L[False] = ..., + *, + compute_uv: L[False], + hermitian: bool = ..., +) -> NDArray[float64]: ... +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool, + compute_uv: L[False], hermitian: bool = ..., ) -> NDArray[float64]: ... @overload def svd( a: _ArrayLikeComplex_co, full_matrices: bool = ..., - compute_uv: L[False] = ..., + *, + compute_uv: L[False], + hermitian: bool = ..., +) -> NDArray[floating]: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool, + compute_uv: L[False], hermitian: bool = ..., ) -> NDArray[floating]: ... diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 417fb0d8c558..663fb888f012 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -74,8 +74,10 @@ assert_type(np.linalg.svd(AR_i8), SVDResult) assert_type(np.linalg.svd(AR_f8), SVDResult) assert_type(np.linalg.svd(AR_c16), SVDResult) assert_type(np.linalg.svd(AR_i8, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_i8, True, False), npt.NDArray[np.float64]) assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating]) assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating]) +assert_type(np.linalg.svd(AR_c16, True, False), npt.NDArray[np.floating]) assert_type(np.linalg.cond(AR_i8), Any) assert_type(np.linalg.cond(AR_f8), Any) From 85cb82c79c4923f1e3289f80498064f1b0c70f25 Mon Sep 17 00:00:00 2001 From: polaris-3 <214435818+polaris-3@users.noreply.github.com> Date: Mon, 7 Jul 2025 16:41:49 +0200 Subject: [PATCH 0190/1018] ENH: Improve error message in numpy.testing.assert_array_compare (#29112) Improve the error message in numpy.testing.assert_array_compare to show the position of differing elements. Currently, if there is an assertion error in functions like numpy.testing.assert_allclose, both arrays are printed. Typically the arrays are so large that they are truncated before being printed, making it hard to spot the differences. I suggest to print the indices of the differing elements as well as the corresponding values. To avoid excessive output, the number of printed out differences is limited to five. * MAINT: Different error messages for different numbers of mismatches --- .../upcoming_changes/29112.improvement.rst | 5 ++ numpy/testing/_private/utils.py | 33 ++++++++ numpy/testing/tests/test_utils.py | 83 +++++++++++++++++++ 3 files changed, 121 insertions(+) create mode 100644 doc/release/upcoming_changes/29112.improvement.rst diff --git a/doc/release/upcoming_changes/29112.improvement.rst b/doc/release/upcoming_changes/29112.improvement.rst new file mode 100644 index 000000000000..01baa668b9fe --- /dev/null +++ b/doc/release/upcoming_changes/29112.improvement.rst @@ -0,0 +1,5 @@ +Improved error message for `assert_array_compare` +------------------------------------------------- +The error message generated by `assert_array_compare` which is used by functions +like `assert_allclose`, `assert_array_less` etc. now also includes information +about the indices at which the assertion fails. \ No newline at end of file diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 2d45fa30b8a6..78f9e9a004b1 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -574,6 +574,8 @@ def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): Arrays are not almost equal to 9 decimals Mismatched elements: 1 / 2 (50%) + Mismatch at index: + [1]: 2.3333333333333 (ACTUAL), 2.33333334 (DESIRED) Max absolute difference among violations: 6.66669964e-09 Max relative difference among violations: 2.85715698e-09 ACTUAL: array([1. , 2.333333333]) @@ -875,6 +877,31 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): percent_mismatch = 100 * n_mismatch / n_elements remarks = [f'Mismatched elements: {n_mismatch} / {n_elements} ' f'({percent_mismatch:.3g}%)'] + if invalids.ndim != 0: + if flagged.ndim > 0: + positions = np.argwhere(np.asarray(~flagged))[invalids] + else: + positions = np.argwhere(np.asarray(invalids)) + s = "\n".join( + [ + f" {p.tolist()}: {ox if ox.ndim == 0 else ox[tuple(p)]} " + f"({names[0]}), {oy if oy.ndim == 0 else oy[tuple(p)]} " + f"({names[1]})" + for p in positions[:5] + ] + ) + if len(positions) == 1: + remarks.append( + f"Mismatch at index:\n{s}" + ) + elif len(positions) <= 5: + remarks.append( + f"Mismatch at indices:\n{s}" + ) + else: + remarks.append( + f"First 5 mismatches are at indices:\n{s}" + ) with errstate(all='ignore'): # ignore errors for non-numeric types @@ -1013,6 +1040,8 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Arrays are not equal Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 3.141592653589793 (ACTUAL), 3.1415926535897927 (DESIRED) Max absolute difference among violations: 4.4408921e-16 Max relative difference among violations: 1.41357986e-16 ACTUAL: array([1. , 3.141593, nan]) @@ -1126,6 +1155,8 @@ def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', Arrays are not almost equal to 5 decimals Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 2.33333 (ACTUAL), 2.33339 (DESIRED) Max absolute difference among violations: 6.e-05 Max relative difference among violations: 2.57136612e-05 ACTUAL: array([1. , 2.33333, nan]) @@ -1247,6 +1278,8 @@ def assert_array_less(x, y, err_msg='', verbose=True, *, strict=False): Arrays are not strictly ordered `x < y` Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [0]: 1.0 (x), 1.0 (y) Max absolute difference among violations: 0. Max relative difference among violations: 0. x: array([ 1., 1., nan]) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index fcf20091ca8e..b09df821680b 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -265,6 +265,8 @@ def test_array_vs_array_not_equal(self): b = np.array([34986, 545676, 439655, 0]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 563766 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: 563766\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -272,6 +274,9 @@ def test_array_vs_array_not_equal(self): a = np.array([34986, 545676, 439655.2, 563766]) expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 439655.2 (ACTUAL), 439655 (DESIRED)\n' + ' [3]: 563766.0 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: ' '563766.\n' 'Max relative difference among violations: ' @@ -466,6 +471,8 @@ def test_closeness(self): self._assert_func([1.499999], [0.0], decimal=0) expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1.5 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 1.5\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -474,12 +481,16 @@ def test_closeness(self): a = [1.4999999, 0.00003] b = [1.49999991, 0] expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 3e-05 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 3.e-05\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(a, b, decimal=7) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 3e-05 (DESIRED)\n' 'Max absolute difference among violations: 3.e-05\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -493,6 +504,8 @@ def test_simple(self): self._assert_func(x, y, decimal=4) expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1234.2222 (ACTUAL), 1234.2223 (DESIRED)\n' 'Max absolute difference among violations: ' '1.e-04\n' 'Max relative difference among violations: ' @@ -504,6 +517,9 @@ def test_array_vs_scalar(self): a = [5498.42354, 849.54345, 0.00] b = 5498.42354 expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 849.54345 (ACTUAL), 5498.42354 (DESIRED)\n' + ' [2]: 0.0 (ACTUAL), 5498.42354 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: 1.') @@ -511,6 +527,9 @@ def test_array_vs_scalar(self): self._assert_func(a, b, decimal=9) expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 5498.42354 (ACTUAL), 849.54345 (DESIRED)\n' + ' [2]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: 5.4722099') @@ -519,6 +538,8 @@ def test_array_vs_scalar(self): a = [5498.42354, 0.00] expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: inf') @@ -527,6 +548,8 @@ def test_array_vs_scalar(self): b = 0 expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [0]: 5498.42354 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: inf') @@ -603,6 +626,8 @@ def all(self, *args, **kwargs): all(z) b = np.array([1., 202]).view(MyArray) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 2.0 (ACTUAL), 202.0 (DESIRED)\n' 'Max absolute difference among violations: 200.\n' 'Max relative difference among violations: 0.99009') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -693,6 +718,10 @@ def test_error_message(self): # Test with a different amount of decimal digits expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1.00000000001 (ACTUAL), 1.00000000002 (DESIRED)\n' + ' [1]: 2.00000000002 (ACTUAL), 2.00000000003 (DESIRED)\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' 'Max absolute difference among violations: 1.e-05\n' 'Max relative difference among violations: ' '3.33328889e-06\n' @@ -708,6 +737,8 @@ def test_error_message(self): # differs. Note that we only check for the formatting of the arrays # themselves. expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' 'Max absolute difference among violations: 1.e-05\n' 'Max relative difference among violations: ' '3.33328889e-06\n' @@ -720,6 +751,8 @@ def test_error_message(self): x = np.array([np.inf, 0]) y = np.array([np.inf, 1]) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 1.0 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 1.\n' ' ACTUAL: array([inf, 0.])\n' @@ -731,6 +764,9 @@ def test_error_message(self): x = np.array([1, 2]) y = np.array([0, 0]) expected_msg = ('Mismatched elements: 2 / 2 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1 (ACTUAL), 0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: 2\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -742,6 +778,12 @@ def test_error_message_2(self): x = 2 y = np.ones(20) expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [2]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [3]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [4]: 2 (ACTUAL), 1.0 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -750,6 +792,12 @@ def test_error_message_2(self): y = 2 x = np.ones(20) expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [1]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [2]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [3]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [4]: 1.0 (ACTUAL), 2 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 0.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -838,6 +886,9 @@ def test_simple_arrays(self): b = np.array([2, 4, 6, 8]) expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 6 (x), 6 (y)\n' + ' [3]: 20 (x), 8 (y)\n' 'Max absolute difference among violations: 12\n' 'Max relative difference among violations: 1.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -849,6 +900,11 @@ def test_rank2(self): self._assert_func(x, y) expected_msg = ('Mismatched elements: 4 / 4 (100%)\n' + 'Mismatch at indices:\n' + ' [0, 0]: 1.2 (x), 1.1 (y)\n' + ' [0, 1]: 2.3 (x), 2.2 (y)\n' + ' [1, 0]: 3.4 (x), 3.3 (y)\n' + ' [1, 1]: 4.5 (x), 4.4 (y)\n' 'Max absolute difference among violations: 0.1\n' 'Max relative difference among violations: 0.09090909') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -867,6 +923,8 @@ def test_rank3(self): y[0, 0, 0] = 0 expected_msg = ('Mismatched elements: 1 / 8 (12.5%)\n' + 'Mismatch at index:\n' + ' [0, 0, 0]: 1.0 (x), 0.0 (y)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -910,12 +968,20 @@ def test_simple_items_and_array(self): y = 999090.54 expected_msg = ('Mismatched elements: 1 / 12 (8.33%)\n' + 'Mismatch at index:\n' + ' [1, 1]: 999090.54 (x), 999090.54 (y)\n' 'Max absolute difference among violations: 0.\n' 'Max relative difference among violations: 0.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 12 / 12 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0, 0]: 999090.54 (x), 3.4536 (y)\n' + ' [0, 1]: 999090.54 (x), 2390.5436 (y)\n' + ' [0, 2]: 999090.54 (x), 435.54657 (y)\n' + ' [0, 3]: 999090.54 (x), 324525.4535 (y)\n' + ' [1, 0]: 999090.54 (x), 5449.54 (y)\n' 'Max absolute difference among violations: ' '999087.0864\n' 'Max relative difference among violations: ' @@ -928,12 +994,17 @@ def test_zeroes(self): y = np.array(87654.) expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [0]: 546456.0 (x), 87654.0 (y)\n' 'Max absolute difference among violations: 458802.\n' 'Max relative difference among violations: 5.23423917') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 87654.0 (x), 0.0 (y)\n' + ' [2]: 87654.0 (x), 15.455 (y)\n' 'Max absolute difference among violations: 87654.\n' 'Max relative difference among violations: ' '5670.5626011') @@ -943,12 +1014,18 @@ def test_zeroes(self): y = 0 expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 546456.0 (x), 0 (y)\n' + ' [1]: 0.0 (x), 0 (y)\n' + ' [2]: 15.455 (x), 0 (y)\n' 'Max absolute difference among violations: 546456.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: 0 (x), 0.0 (y)\n' 'Max absolute difference among violations: 0.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1134,12 +1211,16 @@ def test_simple(self): b = np.array([x, y, x, x]) c = np.array([x, y, x, z]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.001 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 0.001\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): assert_allclose(b, c) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.0 (ACTUAL), 0.001 (DESIRED)\n' 'Max absolute difference among violations: 0.001\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1155,6 +1236,8 @@ def test_report_fail_percentage(self): b = np.array([1, 1, 1, 2]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 1 (ACTUAL), 2 (DESIRED)\n' 'Max absolute difference among violations: 1\n' 'Max relative difference among violations: 0.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): From a63bed470e4e0e2785b1299155e8d4a34e302282 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 7 Jul 2025 15:48:03 +0100 Subject: [PATCH 0191/1018] TYP: Allow passing `dtype=None` to `trace` (#29332) --- numpy/__init__.pyi | 6 +++--- numpy/_core/fromnumeric.pyi | 6 +++--- numpy/linalg/_linalg.pyi | 2 +- numpy/ma/core.pyi | 6 +++--- numpy/typing/tests/data/reveal/fromnumeric.pyi | 1 + numpy/typing/tests/data/reveal/ma.pyi | 1 + 6 files changed, 12 insertions(+), 10 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6bcbe5b68662..d8c57c87cbbe 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2395,7 +2395,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: None = ..., ) -> Any: ... @overload @@ -2404,7 +2404,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT, ) -> _ArrayT: ... @@ -2414,7 +2414,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index ba6669936571..34849c2cc800 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -531,7 +531,7 @@ def trace( offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: None = ..., ) -> Any: ... @overload @@ -540,7 +540,7 @@ def trace( offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload @@ -549,7 +549,7 @@ def trace( offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT, ) -> _ArrayT: ... diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index e9b4cc0eb0e4..51844817e2dc 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -440,7 +440,7 @@ def trace( /, *, offset: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., ) -> Any: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index ca9826c7d083..63dea396de66 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1177,7 +1177,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: None = ..., ) -> Any: ... @overload @@ -1186,7 +1186,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT, ) -> _ArrayT: ... @@ -1196,7 +1196,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 62bc926c765b..a2ba83e6c4c8 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -124,6 +124,7 @@ assert_type(np.diagonal(AR_f4), npt.NDArray[np.float32]) assert_type(np.trace(AR_b), Any) assert_type(np.trace(AR_f4), Any) assert_type(np.trace(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.trace(AR_f4, out=AR_subclass, dtype=None), NDArraySubclass) assert_type(np.ravel(b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index c26dca0ed9ec..7c2cb9d5f05e 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -394,6 +394,7 @@ assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) assert_type(MAR_f8.trace(), Any) assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclass) assert_type(MAR_f8.round(), MaskedArray[np.float64]) assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclass) From fdf5f1b5f2d5268e4908cbe068891bcbbc3b47bc Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 7 Jul 2025 11:25:41 -0600 Subject: [PATCH 0192/1018] MAINT: remove internal uses of assert_warns and suppress_warnings (#29322) * MAINT: remove internal uses of assert_warns and suppress_warnings * MAINT: adjust check for ignore filters in warnings tests * MAINT: simplify filtering per review comments * MAINT: fix warnings test * MAINT: appease linter --- numpy/_core/tests/test_api.py | 3 +- numpy/_core/tests/test_arrayprint.py | 3 +- numpy/_core/tests/test_datetime.py | 21 +-- numpy/_core/tests/test_einsum.py | 6 +- numpy/_core/tests/test_indexing.py | 5 +- numpy/_core/tests/test_mem_policy.py | 4 +- numpy/_core/tests/test_memmap.py | 7 +- numpy/_core/tests/test_multiarray.py | 51 +++---- numpy/_core/tests/test_nditer.py | 13 +- numpy/_core/tests/test_regression.py | 9 +- numpy/_core/tests/test_scalar_ctors.py | 6 +- numpy/_core/tests/test_scalarmath.py | 28 ++-- numpy/_core/tests/test_ufunc.py | 9 +- numpy/_core/tests/test_umath.py | 29 ++-- numpy/distutils/tests/test_exec_command.py | 12 +- numpy/lib/_nanfunctions_impl.py | 1 - numpy/lib/_scimath_impl.py | 6 +- numpy/lib/tests/test_format.py | 5 +- numpy/lib/tests/test_function_base.py | 16 +-- numpy/lib/tests/test_histograms.py | 16 +-- numpy/lib/tests/test_io.py | 23 ++-- numpy/lib/tests/test_nanfunctions.py | 59 ++++---- numpy/lib/tests/test_stride_tricks.py | 5 +- numpy/linalg/tests/test_deprecations.py | 11 +- numpy/linalg/tests/test_linalg.py | 12 +- numpy/ma/tests/test_core.py | 137 +++++++++---------- numpy/ma/tests/test_deprecations.py | 7 +- numpy/ma/tests/test_extras.py | 68 ++++----- numpy/ma/tests/test_regression.py | 14 +- numpy/polynomial/tests/test_polynomial.py | 5 +- numpy/random/tests/test_generator_mt19937.py | 12 +- numpy/random/tests/test_random.py | 17 +-- numpy/random/tests/test_randomstate.py | 45 +++--- numpy/tests/test_reloading.py | 6 +- numpy/tests/test_warnings.py | 9 +- 35 files changed, 318 insertions(+), 362 deletions(-) diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index da4a8f423bc5..c2e5bf8909f9 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -12,7 +12,6 @@ assert_array_equal, assert_equal, assert_raises, - assert_warns, ) @@ -315,7 +314,7 @@ def test_object_array_astype_to_void(): def test_array_astype_warning(t): # test ComplexWarning when casting from complex to float or int a = np.array(10, dtype=np.complex128) - assert_warns(np.exceptions.ComplexWarning, a.astype, t) + pytest.warns(np.exceptions.ComplexWarning, a.astype, t) @pytest.mark.parametrize(["dtype", "out_dtype"], [(np.bytes_, np.bool), diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 1fd4ac2fddb7..a054f408e954 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -15,7 +15,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) from numpy.testing._private.utils import run_threaded @@ -736,7 +735,7 @@ def test_0d_arrays(self): assert_equal(str(x), "1") # check `style` arg raises - assert_warns(DeprecationWarning, np.array2string, + pytest.warns(DeprecationWarning, np.array2string, np.array(1.), style=repr) # but not in legacy mode np.array2string(np.array(1.), style=repr, legacy='1.13') diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index c5d2ca459e6a..a10ca15bc373 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1,5 +1,6 @@ import datetime import pickle +import warnings from zoneinfo import ZoneInfo, ZoneInfoNotFoundError import pytest @@ -13,8 +14,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, - suppress_warnings, ) try: @@ -1282,8 +1281,9 @@ def test_datetime_multiply(self): assert_raises(TypeError, np.multiply, 1.5, dta) # NaTs - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in multiply") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "invalid value encountered in multiply", RuntimeWarning) nat = np.timedelta64('NaT') def check(a, b, res): @@ -1344,7 +1344,7 @@ def test_timedelta_floor_divide(self, op1, op2, exp): np.timedelta64(-1)), ]) def test_timedelta_floor_div_warnings(self, op1, op2): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = op1 // op2 assert_equal(actual, 0) assert_equal(actual.dtype, np.int64) @@ -1428,9 +1428,9 @@ def test_timedelta_divmod_typeerror(self, op1, op2): np.timedelta64(-1)), ]) def test_timedelta_divmod_warnings(self, op1, op2): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): expected = (op1 // op2, op1 % op2) - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = divmod(op1, op2) assert_equal(actual, expected) @@ -1482,8 +1482,9 @@ def test_datetime_divide(self): assert_raises(TypeError, np.divide, 1.5, dta) # NaTs - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, r".*encountered in divide") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', r".*encountered in divide", RuntimeWarning) nat = np.timedelta64('NaT') for tp in (int, float): assert_equal(np.timedelta64(1) / tp(0), nat) @@ -2045,7 +2046,7 @@ def test_timedelta_modulus_error(self, val1, val2): @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_timedelta_modulus_div_by_zero(self): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = np.timedelta64(10, 's') % np.timedelta64(0, 's') assert_equal(actual, np.timedelta64('NaT')) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 0bd180b5e41f..84d4af1707b6 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -1,4 +1,5 @@ import itertools +import warnings import pytest @@ -11,7 +12,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) # Setup for optimize einsum @@ -455,8 +455,8 @@ def check_einsum_sums(self, dtype, do_opt=False): np.outer(a, b)) # Suppress the complex warnings for the 'as f8' tests - with suppress_warnings() as sup: - sup.filter(np.exceptions.ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) # matvec(a,b) / a.dot(b) where a is matrix, b is vector for n in range(1, 17): diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 81ba85ea4648..2a8a669d0787 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -16,7 +16,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) @@ -784,12 +783,12 @@ def test_boolean_index_cast_assign(self): assert_equal(zero_array[0, 1], 1) # Fancy indexing works, although we get a cast warning. - assert_warns(ComplexWarning, + pytest.warns(ComplexWarning, zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) assert_equal(zero_array[0, 1], 2) # No complex part # Cast complex to float, throwing away the imaginary portion. - assert_warns(ComplexWarning, + pytest.warns(ComplexWarning, zero_array.__setitem__, bool_index, np.array([1j])) assert_equal(zero_array[0, 1], 0) diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index b9f971e73249..70befb8bd324 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -9,7 +9,7 @@ import numpy as np from numpy._core.multiarray import get_handler_name -from numpy.testing import IS_EDITABLE, IS_WASM, assert_warns, extbuild +from numpy.testing import IS_EDITABLE, IS_WASM, extbuild @pytest.fixture @@ -432,7 +432,7 @@ def test_switch_owner(get_module, policy): # The policy should be NULL, so we have to assume we can call # "free". A warning is given if the policy == "1" if policy: - with assert_warns(RuntimeWarning) as w: + with pytest.warns(RuntimeWarning) as w: del a gc.collect() else: diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index cbd825205844..49931e1680e8 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -1,6 +1,7 @@ import mmap import os import sys +import warnings from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryFile @@ -26,7 +27,6 @@ assert_array_equal, assert_equal, break_cycles, - suppress_warnings, ) @@ -167,8 +167,9 @@ def test_ufunc_return_ndarray(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data - with suppress_warnings() as sup: - sup.filter(FutureWarning, "np.average currently does not preserve") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "np.average currently does not preserve", FutureWarning) for unary_op in [sum, average, prod]: result = unary_op(fp) assert_(isscalar(result)) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 4ee021d18f6f..930c736c6076 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -48,11 +48,9 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, break_cycles, check_support_sve, runstring, - suppress_warnings, temppath, ) from numpy.testing._private.utils import _no_tracing, requires_memory @@ -3805,11 +3803,11 @@ def test__complex__(self): ap = complex(a) assert_equal(ap, a, msg) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): bp = complex(b) assert_equal(bp, b, msg) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): cp = complex(c) assert_equal(cp, c, msg) @@ -3833,7 +3831,7 @@ def test__complex__should_not_work(self): assert_raises(TypeError, complex, d) e = np.array(['1+1j'], 'U') - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): assert_raises(TypeError, complex, e) class TestCequenceMethods: @@ -4932,9 +4930,11 @@ class TestArgmax: @pytest.mark.parametrize('data', nan_arr) def test_combinations(self, data): arr, pos = data - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in reduce", + RuntimeWarning) val = np.max(arr) assert_equal(np.argmax(arr), pos, err_msg=f"{arr!r}") @@ -5074,9 +5074,11 @@ class TestArgmin: @pytest.mark.parametrize('data', nan_arr) def test_combinations(self, data): arr, pos = data - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in reduce", + RuntimeWarning) min_val = np.min(arr) assert_equal(np.argmin(arr), pos, err_msg=f"{arr!r}") @@ -7268,8 +7270,8 @@ def test_out_arg(self): out = np.zeros((5, 2), dtype=np.complex128) c = self.matmul(a, b, out=out) assert_(c is out) - with suppress_warnings() as sup: - sup.filter(ComplexWarning, '') + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) c = c.astype(tgt.dtype) assert_array_equal(c, tgt) @@ -8816,8 +8818,9 @@ def test_multiarray_writable_attributes_deletion(self): # ticket #2046, should not seqfault, raise AttributeError a = np.ones(2) attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat'] - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "Assigning the 'data' attribute") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "Assigning the 'data' attribute", DeprecationWarning) for s in attr: assert_raises(AttributeError, delattr, a, s) @@ -9071,9 +9074,9 @@ def test_to_int_scalar(self): int_funcs = (int, lambda x: x.__int__()) for int_func in int_funcs: assert_equal(int_func(np.array(0)), 0) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): assert_equal(int_func(np.array([1])), 1) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): assert_equal(int_func(np.array([[42]])), 42) assert_raises(TypeError, int_func, np.array([1, 2])) @@ -9087,7 +9090,7 @@ def __int__(self): raise NotImplementedError assert_raises(NotImplementedError, int_func, np.array(NotConvertible())) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): assert_raises(NotImplementedError, int_func, np.array([NotConvertible()])) @@ -9677,12 +9680,10 @@ def test_view_assign(self): @pytest.mark.leaks_references( reason="increments self in dealloc; ignore since deprecated path.") def test_dealloc_warning(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - arr = np.arange(9).reshape(3, 3) - v = arr.T + arr = np.arange(9).reshape(3, 3) + v = arr.T + with pytest.warns(RuntimeWarning): _multiarray_tests.npy_abuse_writebackifcopy(v) - assert len(sup.log) == 1 def test_view_discard_refcount(self): from numpy._core._multiarray_tests import ( @@ -10202,8 +10203,8 @@ def test_strided_loop_alignments(self): xf128 = _aligned_zeros(3, np.longdouble, align=align) # test casting, both to and from misaligned - with suppress_warnings() as sup: - sup.filter(ComplexWarning, "Casting complex values") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "Casting complex values", ComplexWarning) xc64.astype('f8') xf64.astype(np.complex64) test = xc64 + xf64 diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index f71130f16331..f0e10333808c 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -1,6 +1,7 @@ import subprocess import sys import textwrap +import warnings import pytest @@ -15,7 +16,6 @@ assert_array_equal, assert_equal, assert_raises, - suppress_warnings, ) from numpy.testing._private.utils import requires_memory @@ -1899,8 +1899,8 @@ def test_iter_buffered_cast_byteswapped(): assert_equal(a, 2 * np.arange(10, dtype='f4')) - with suppress_warnings() as sup: - sup.filter(np.exceptions.ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) a = np.arange(10, dtype='f8') a = a.view(a.dtype.newbyteorder()).byteswap() @@ -3310,13 +3310,10 @@ def test_warn_noclose(): a = np.arange(6, dtype='f4') au = a.byteswap() au = au.view(au.dtype.newbyteorder()) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): it = np.nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) + casting='equiv', op_dtypes=[np.dtype('f4')]) del it - assert len(sup.log) == 1 - @pytest.mark.parametrize(["in_dtype", "buf_dtype"], [("i", "O"), ("O", "i"), # most simple cases diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index d9a3a529c9ea..2aeb29a1320d 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -3,6 +3,7 @@ import pickle import sys import tempfile +import warnings from io import BytesIO from itertools import chain from os import path @@ -27,8 +28,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, - suppress_warnings, ) from numpy.testing._private.utils import _no_tracing, requires_memory @@ -1619,9 +1618,9 @@ def test_fromfile_tofile_seeks(self): def test_complex_scalar_warning(self): for tp in [np.csingle, np.cdouble, np.clongdouble]: x = tp(1 + 2j) - assert_warns(ComplexWarning, float, x) - with suppress_warnings() as sup: - sup.filter(ComplexWarning) + pytest.warns(ComplexWarning, float, x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) assert_equal(float(x), float(x.real)) def test_complex_scalar_complex_cast(self): diff --git a/numpy/_core/tests/test_scalar_ctors.py b/numpy/_core/tests/test_scalar_ctors.py index ea78cd9d9f51..05ede01b5973 100644 --- a/numpy/_core/tests/test_scalar_ctors.py +++ b/numpy/_core/tests/test_scalar_ctors.py @@ -4,7 +4,7 @@ import pytest import numpy as np -from numpy.testing import assert_almost_equal, assert_equal, assert_warns +from numpy.testing import assert_almost_equal, assert_equal class TestFromString: @@ -25,7 +25,7 @@ def test_floating_overflow(self): assert_equal(fsingle, np.inf) fdouble = np.double('1e10000') assert_equal(fdouble, np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000') + flongdouble = pytest.warns(RuntimeWarning, np.longdouble, '1e10000') assert_equal(flongdouble, np.inf) fhalf = np.half('-1e10000') @@ -34,7 +34,7 @@ def test_floating_overflow(self): assert_equal(fsingle, -np.inf) fdouble = np.double('-1e10000') assert_equal(fdouble, -np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000') + flongdouble = pytest.warns(RuntimeWarning, np.longdouble, '-1e10000') assert_equal(flongdouble, -np.inf) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 746b410f79d2..8c24b4cfdc88 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -23,7 +23,6 @@ assert_equal, assert_raises, check_support_sve, - suppress_warnings, ) types = [np.bool, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, @@ -369,12 +368,7 @@ def test_float_modulus_corner_cases(self): assert_(rem >= -b, f'dt: {dt}') # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - sup.filter(RuntimeWarning, "divide by zero encountered in remainder") - sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide") - sup.filter(RuntimeWarning, "divide by zero encountered in divmod") - sup.filter(RuntimeWarning, "invalid value encountered in divmod") + with warnings.catch_warnings(), np.errstate(all='ignore'): for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) @@ -522,21 +516,17 @@ def test_int_from_infinite_longdouble(self): # gh-627 x = np.longdouble(np.inf) assert_raises(OverflowError, int, x) - with suppress_warnings() as sup: - sup.record(ComplexWarning) + with pytest.warns(ComplexWarning): x = np.clongdouble(np.inf) assert_raises(OverflowError, int, x) - assert_equal(len(sup.log), 1) @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") def test_int_from_infinite_longdouble___int__(self): x = np.longdouble(np.inf) assert_raises(OverflowError, x.__int__) - with suppress_warnings() as sup: - sup.record(ComplexWarning) + with pytest.warns(ComplexWarning): x = np.clongdouble(np.inf) assert_raises(OverflowError, x.__int__) - assert_equal(len(sup.log), 1) @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), reason="long double is same as double") @@ -731,8 +721,8 @@ def test_exceptions(self): def test_result(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for dt in types: a = np.ones((), dtype=dt)[()] if dt in np.typecodes['UnsignedInteger']: @@ -749,8 +739,8 @@ def test_exceptions(self): def test_result(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for dt in types: a = np.ones((), dtype=dt)[()] assert_equal(operator.sub(a, a), 0) @@ -771,8 +761,8 @@ def _test_abs_func(self, absfunc, test_dtype): x = test_dtype(np.finfo(test_dtype).max) assert_equal(absfunc(x), x.real) - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) x = test_dtype(np.finfo(test_dtype).tiny) assert_equal(absfunc(x), x.real) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 836be1245399..b86e22917734 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -26,7 +26,6 @@ assert_equal, assert_no_warnings, assert_raises, - suppress_warnings, ) from numpy.testing._private.utils import requires_memory @@ -686,8 +685,8 @@ def test_true_divide(self): tgt = float(x) / float(y) rtol = max(np.finfo(dtout).resolution, 1e-15) # The value of tiny for double double is NaN - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(dtout).tiny): atol = max(np.finfo(dtout).tiny, 3e-308) else: @@ -706,8 +705,8 @@ def test_true_divide(self): tgt = complex(x) / complex(y) rtol = max(np.finfo(dtout).resolution, 1e-15) # The value of tiny for double double is NaN - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(dtout).tiny): atol = max(np.finfo(dtout).tiny, 3e-308) else: diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index b5145e9d642b..d8ed56c31b93 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -30,7 +30,6 @@ assert_no_warnings, assert_raises, assert_raises_regex, - suppress_warnings, ) from numpy.testing._private.utils import _glibc_older_than @@ -703,8 +702,8 @@ def test_floor_division_corner_cases(self, dtype): fone = np.array(1.0, dtype=dtype) fzer = np.array(0.0, dtype=dtype) finf = np.array(np.inf, dtype=dtype) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in floor_divide") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in floor_divide", RuntimeWarning) div = np.floor_divide(fnan, fone) assert np.isnan(div), f"div: {div}" div = np.floor_divide(fone, fnan) @@ -859,9 +858,9 @@ def test_float_divmod_corner_cases(self): fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in divmod") - sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in divmod", RuntimeWarning) + warnings.filterwarnings('ignore', "divide by zero encountered in divmod", RuntimeWarning) div, rem = np.divmod(fone, fzer) assert np.isinf(div), f'dt: {dt}, div: {rem}' assert np.isnan(rem), f'dt: {dt}, rem: {rem}' @@ -898,9 +897,9 @@ def test_float_remainder_corner_cases(self): assert_(rem >= -b, f'dt: {dt}') # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - sup.filter(RuntimeWarning, "invalid value encountered in fmod") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in remainder", RuntimeWarning) + warnings.filterwarnings('ignore', "invalid value encountered in fmod", RuntimeWarning) for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) @@ -2957,9 +2956,11 @@ def test_minmax_blocked(self): inp[:] = np.arange(inp.size, dtype=dt) inp[i] = np.nan emsg = lambda: f'{inp!r}\n{msg}' - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in reduce", + RuntimeWarning) assert_(np.isnan(inp.max()), msg=emsg) assert_(np.isnan(inp.min()), msg=emsg) @@ -4603,8 +4604,8 @@ def test_nextafter_0(): for t, direction in itertools.product(np._core.sctypes['float'], (1, -1)): # The value of tiny for double double is NaN, so we need to pass the # assert - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(t).tiny): tiny = np.finfo(t).tiny assert_( diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py index d1a20056a5a2..749523528e63 100644 --- a/numpy/distutils/tests/test_exec_command.py +++ b/numpy/distutils/tests/test_exec_command.py @@ -5,7 +5,7 @@ from numpy.distutils import exec_command from numpy.distutils.exec_command import get_pythonexe -from numpy.testing import tempdir, assert_, assert_warns, IS_WASM +from numpy.testing import tempdir, assert_, IS_WASM # In python 3 stdout, stderr are text (unicode compliant) devices, so to @@ -68,7 +68,7 @@ def test_exec_command_stdout(): # Test posix version: with redirect_stdout(StringIO()): with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): exec_command.exec_command("cd '.'") if os.name == 'posix': @@ -76,14 +76,14 @@ def test_exec_command_stdout(): with emulate_nonposix(): with redirect_stdout(StringIO()): with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): exec_command.exec_command("cd '.'") def test_exec_command_stderr(): # Test posix version: with redirect_stdout(TemporaryFile(mode='w+')): with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): exec_command.exec_command("cd '.'") if os.name == 'posix': @@ -91,7 +91,7 @@ def test_exec_command_stderr(): with emulate_nonposix(): with redirect_stdout(TemporaryFile()): with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): exec_command.exec_command("cd '.'") @@ -206,7 +206,7 @@ def check_execute_in(self, **kws): def test_basic(self): with redirect_stdout(StringIO()): with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): if os.name == "posix": self.check_posix(use_tee=0) self.check_posix(use_tee=1) diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index 4a01490301c8..aec60d484ba4 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -717,7 +717,6 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, inf >>> np.nansum([1, np.nan, -np.inf]) -inf - >>> from numpy.testing import suppress_warnings >>> with np.errstate(invalid="ignore"): ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present np.float64(nan) diff --git a/numpy/lib/_scimath_impl.py b/numpy/lib/_scimath_impl.py index 8136a7d54515..b33f42b3d10d 100644 --- a/numpy/lib/_scimath_impl.py +++ b/numpy/lib/_scimath_impl.py @@ -628,9 +628,9 @@ def arctanh(x): >>> np.emath.arctanh(0.5) 0.5493061443340549 - >>> from numpy.testing import suppress_warnings - >>> with suppress_warnings() as sup: - ... sup.filter(RuntimeWarning) + >>> import warnings + >>> with warnings.catch_warnings(): + ... warnings.simplefilter('ignore', RuntimeWarning) ... np.emath.arctanh(np.eye(2)) array([[inf, 0.], [ 0., inf]]) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index cf11dd25dbda..a6de6238d269 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -291,7 +291,6 @@ assert_array_equal, assert_raises, assert_raises_regex, - assert_warns, ) from numpy.testing._private.utils import requires_memory @@ -1008,7 +1007,7 @@ def test_unicode_field_names(tmpdir): # notifies the user that 3.0 is selected with open(fname, 'wb') as f: - with assert_warns(UserWarning): + with pytest.warns(UserWarning): format.write_array(f, arr, version=None) def test_header_growth_axis(): @@ -1041,7 +1040,7 @@ def test_metadata_dtype(dt): # gh-14142 arr = np.ones(10, dtype=dt) buf = BytesIO() - with assert_warns(UserWarning): + with pytest.warns(UserWarning): np.save(buf, arr) buf.seek(0) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index eccf4bcfb019..37f8bbadc31a 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -61,8 +61,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, - suppress_warnings, ) np_floats = [np.half, np.single, np.double, np.longdouble] @@ -2473,10 +2471,10 @@ def test_simple(self): def test_ddof(self): # ddof raises DeprecationWarning - with suppress_warnings() as sup: + with warnings.catch_warnings(): warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1) - sup.filter(DeprecationWarning) + pytest.warns(DeprecationWarning, corrcoef, self.A, ddof=-1) + warnings.simplefilter('ignore', DeprecationWarning) # ddof has no or negligible effect on the function assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) @@ -2485,11 +2483,11 @@ def test_ddof(self): def test_bias(self): # bias raises DeprecationWarning - with suppress_warnings() as sup: + with warnings.catch_warnings(): warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) - assert_warns(DeprecationWarning, corrcoef, self.A, bias=0) - sup.filter(DeprecationWarning) + pytest.warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) + pytest.warns(DeprecationWarning, corrcoef, self.A, bias=0) + warnings.simplefilter('ignore', DeprecationWarning) # bias has no or negligible effect on the function assert_almost_equal(corrcoef(self.A, bias=1), self.res1) diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index 4ba953f462fc..fec8828d242e 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -1,3 +1,5 @@ +import warnings + import pytest import numpy as np @@ -12,7 +14,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) @@ -138,11 +139,9 @@ def test_bool_conversion(self): # Should raise an warning on booleans # Ensure that the histograms are equivalent, need to suppress # the warnings to get the actual outputs - with suppress_warnings() as sup: - rec = sup.record(RuntimeWarning, 'Converting input from .*') + with pytest.warns(RuntimeWarning, match='Converting input from .*'): hist, edges = np.histogram([True, True, False]) # A warning should be issued - assert_equal(len(rec), 1) assert_array_equal(hist, int_hist) assert_array_equal(edges, int_edges) @@ -284,9 +283,8 @@ def test_some_nan_values(self): all_nan = np.array([np.nan, np.nan]) # the internal comparisons with NaN give warnings - sup = suppress_warnings() - sup.filter(RuntimeWarning) - with sup: + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) # can't infer range with nan assert_raises(ValueError, histogram, one_nan, bins='auto') assert_raises(ValueError, histogram, all_nan, bins='auto') @@ -618,9 +616,9 @@ def test_integer(self, bins): """ Test that bin width for integer data is at least 1. """ - with suppress_warnings() as sup: + with warnings.catch_warnings(): if bins == 'stone': - sup.filter(RuntimeWarning) + warnings.simplefilter('ignore', RuntimeWarning) assert_equal( np.histogram_bin_edges(np.tile(np.arange(9), 1000), bins), np.arange(9)) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 05ef0be92211..a1698f240bff 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -36,9 +36,7 @@ assert_no_warnings, assert_raises, assert_raises_regex, - assert_warns, break_cycles, - suppress_warnings, tempdir, temppath, ) @@ -336,8 +334,9 @@ def test_closing_fid(self): # goes to zero. Python running in debug mode raises a # ResourceWarning when file closing is left to the garbage # collector, so we catch the warnings. - with suppress_warnings() as sup: - sup.filter(ResourceWarning) # TODO: specify exact message + with warnings.catch_warnings(): + # TODO: specify exact message + warnings.simplefilter('ignore', ResourceWarning) for i in range(1, 1025): try: np.load(tmp)["data"] @@ -1446,8 +1445,8 @@ def test_skip_footer(self): assert_equal(test, ctrl) def test_skip_footer_with_invalid(self): - with suppress_warnings() as sup: - sup.filter(ConversionWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ConversionWarning) basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' # Footer too small to get rid of all invalid values assert_raises(ValueError, np.genfromtxt, @@ -1845,8 +1844,8 @@ def test_usecols_with_named_columns(self): def test_empty_file(self): # Test that an empty file raises the proper warning. - with suppress_warnings() as sup: - sup.filter(message="genfromtxt: Empty input file:") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', message="genfromtxt: Empty input file:") data = TextIO() test = np.genfromtxt(data) assert_equal(test, np.array([])) @@ -1991,7 +1990,7 @@ def test_invalid_raise(self): def f(): return np.genfromtxt(mdata, invalid_raise=False, **kwargs) - mtest = assert_warns(ConversionWarning, f) + mtest = pytest.warns(ConversionWarning, f) assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) # @@ -2012,7 +2011,7 @@ def test_invalid_raise_with_usecols(self): def f(): return np.genfromtxt(mdata, usecols=(0, 4), **kwargs) - mtest = assert_warns(ConversionWarning, f) + mtest = pytest.warns(ConversionWarning, f) assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) # @@ -2424,8 +2423,8 @@ def test_max_rows(self): assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) # Test with invalid not raise - with suppress_warnings() as sup: - sup.filter(ConversionWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ConversionWarning) test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 89a6d1f95fed..447b84db3edc 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -15,7 +15,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) # Test data @@ -281,8 +280,9 @@ def test_mutation(self): def test_result_values(self): for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): for row in _ndat: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "invalid value encountered in", RuntimeWarning) ind = f(row) val = row[ind] # comparing with NaN is tricky as the result @@ -491,10 +491,10 @@ def test_dtype_from_dtype(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: - with suppress_warnings() as sup: + with warnings.catch_warnings(): if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 - sup.filter(ComplexWarning) + warnings.simplefilter('ignore', ComplexWarning) tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type assert_(res is tgt) @@ -508,10 +508,10 @@ def test_dtype_from_char(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: - with suppress_warnings() as sup: + with warnings.catch_warnings(): if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 - sup.filter(ComplexWarning) + warnings.simplefilter('ignore', ComplexWarning) tgt = rf(mat, dtype=c, axis=1).dtype.type res = nf(mat, dtype=c, axis=1).dtype.type assert_(res is tgt) @@ -723,22 +723,22 @@ def test_ddof(self): res = nf(_ndat, axis=1, ddof=ddof) assert_almost_equal(res, tgt) - def test_ddof_too_big(self): + def test_ddof_too_big(self, recwarn): nanfuncs = [np.nanvar, np.nanstd] stdfuncs = [np.var, np.std] dsize = [len(d) for d in _rdat] for nf, rf in zip(nanfuncs, stdfuncs): for ddof in range(5): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - sup.filter(ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) tgt = [ddof >= d for d in dsize] res = nf(_ndat, axis=1, ddof=ddof) assert_equal(np.isnan(res), tgt) - if any(tgt): - assert_(len(sup.log) == 1) - else: - assert_(len(sup.log) == 0) + if any(tgt): + assert_(len(recwarn) == 1) + recwarn.pop(RuntimeWarning) + else: + assert_(len(recwarn) == 0) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) @@ -860,8 +860,8 @@ def test_keepdims(self): w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) res = np.nanmedian(d, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanmedian(d, axis=(0, 1), keepdims=True) @@ -946,17 +946,15 @@ def test_result_values(self): @pytest.mark.parametrize("dtype", _TYPE_CODES) def test_allnans(self, dtype, axis): mat = np.full((3, 3), np.nan).astype(dtype) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - + with pytest.warns(RuntimeWarning) as r: output = np.nanmedian(mat, axis=axis) assert output.dtype == mat.dtype assert np.isnan(output).all() if axis is None: - assert_(len(sup.log) == 1) + assert_(len(r) == 1) else: - assert_(len(sup.log) == 3) + assert_(len(r) == 3) # Check scalar scalar = np.array(np.nan).astype(dtype)[()] @@ -965,9 +963,9 @@ def test_allnans(self, dtype, axis): assert np.isnan(output_scalar) if axis is None: - assert_(len(sup.log) == 2) + assert_(len(r) == 2) else: - assert_(len(sup.log) == 4) + assert_(len(r) == 4) def test_empty(self): mat = np.zeros((0, 3)) @@ -995,8 +993,8 @@ def test_extended_axis_invalid(self): assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) def test_float_special(self): - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for inf in [np.inf, -np.inf]: a = np.array([[inf, np.nan], [np.nan, np.nan]]) assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) @@ -1063,8 +1061,8 @@ def test_keepdims(self): w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) res = np.nanpercentile(d, 90, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) @@ -1233,8 +1231,9 @@ def test_multiple_percentiles(self): large_mat[:, :, 3:] *= 2 for axis in [None, 0, 1]: for keepdim in [False, True]: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "All-NaN slice encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "All-NaN slice encountered", RuntimeWarning) val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) nan_val = np.nanpercentile(nan_mat, perc, axis=axis, keepdims=keepdim) diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index fe40c953a147..fb654b4cfb85 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -16,7 +16,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) @@ -593,9 +592,9 @@ def test_writeable(): for array_is_broadcast, result in zip(is_broadcast, results): # This will change to False in a future version if array_is_broadcast: - with assert_warns(FutureWarning): + with pytest.warns(FutureWarning): assert_equal(result.flags.writeable, True) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): result[:] = 0 # Warning not emitted, writing to the array resets it assert_equal(result.flags.writeable, True) diff --git a/numpy/linalg/tests/test_deprecations.py b/numpy/linalg/tests/test_deprecations.py index cd4c10832e7e..7fb5008f1ff8 100644 --- a/numpy/linalg/tests/test_deprecations.py +++ b/numpy/linalg/tests/test_deprecations.py @@ -1,8 +1,9 @@ """Test deprecation and future warnings. """ +import pytest + import numpy as np -from numpy.testing import assert_warns def test_qr_mode_full_future_warning(): @@ -14,7 +15,7 @@ def test_qr_mode_full_future_warning(): """ a = np.eye(2) - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='full') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='f') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index cbf7dd63be5e..8ad1c3ed6d16 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -8,6 +8,7 @@ import textwrap import threading import traceback +import warnings import pytest @@ -42,7 +43,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) try: @@ -1318,8 +1318,9 @@ def test_vector_return_type(self): self.check_dtype(at, an) assert_almost_equal(an, 0.0) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 0.0) @@ -1481,8 +1482,9 @@ def test_matrix_return_type(self): self.check_dtype(at, an) assert_almost_equal(an, 2.0) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 1.0) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 091ba6c99fff..2704857308a3 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -139,32 +139,24 @@ assert_not_equal, fail_if_equal, ) -from numpy.testing import ( - IS_WASM, - assert_raises, - assert_warns, - suppress_warnings, - temppath, -) +from numpy.testing import IS_WASM, assert_raises, temppath from numpy.testing._private.utils import requires_memory pi = np.pi -suppress_copy_mask_on_assignment = suppress_warnings() -suppress_copy_mask_on_assignment.filter( - numpy.ma.core.MaskedArrayFutureWarning, - "setting an item on a masked array which has a shared mask will not copy") - - # For parametrized numeric testing num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] num_ids = [dt_.char for dt_ in num_dts] +WARNING_MESSAGE = ("setting an item on a masked array which has a shared " + "mask will not copy") +WARNING_MARK_SPEC = f"ignore:.*{WARNING_MESSAGE}:numpy.ma.core.MaskedArrayFutureWarning" class TestMaskedArray: # Base test class for MaskedArrays. + # message for warning filters def setup_method(self): # Base data definition. x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) @@ -492,7 +484,7 @@ def test_setitem_no_warning(self): x[...] = value x[[0, 1, 2]] = value - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_copy(self): # Tests of some subtle points of copying and sizing. n = [0, 0, 1, 0, 0] @@ -511,9 +503,9 @@ def test_copy(self): y1a = array(y1) # Default for masked array is not to copy; see gh-10318. assert_(y1a._data.__array_interface__ == - y1._data.__array_interface__) + y1._data.__array_interface__) assert_(y1a._mask.__array_interface__ == - y1._mask.__array_interface__) + y1._mask.__array_interface__) y2 = array(x1, mask=m3) assert_(y2._data.__array_interface__ == x1.__array_interface__) @@ -612,7 +604,7 @@ def test_format(self): # assert_equal(format(masked, " <5"), "-- ") # Expect a FutureWarning for using format_spec with MaskedElement - with assert_warns(FutureWarning): + with pytest.warns(FutureWarning): with_format_string = format(masked, " >5") assert_equal(with_format_string, "--") @@ -793,8 +785,9 @@ def test_topython(self): assert_equal(1.0, float(array([[1]]))) assert_raises(TypeError, float, array([1, 1])) - with suppress_warnings() as sup: - sup.filter(UserWarning, 'Warning: converting a masked element') + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', 'Warning: converting a masked element', UserWarning) assert_(np.isnan(float(array([1], mask=[1])))) a = array([1, 2, 3], mask=[1, 0, 0]) @@ -847,14 +840,17 @@ def test_oddfeatures_2(self): assert_(z[1] is not masked) assert_(z[2] is masked) - @suppress_copy_mask_on_assignment def test_oddfeatures_3(self): - # Tests some generic features - atest = array([10], mask=True) - btest = array([20]) - idx = atest.mask - atest[idx] = btest[idx] - assert_equal(atest, [20]) + msg = "setting an item on a masked array which has a shared mask will not copy" + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', msg, numpy.ma.core.MaskedArrayFutureWarning) + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) def test_filled_with_object_dtype(self): a = np.ma.masked_all(1, dtype='O') @@ -2004,6 +2000,7 @@ def test_comparisons_strings(self, op, fill): ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill) assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + @pytest.mark.filterwarnings("ignore:.*Comparison to `None`.*:FutureWarning") def test_eq_with_None(self): # Really, comparisons with None should not be done, but check them # anyway. Note that pep8 will flag these tests. @@ -2011,23 +2008,21 @@ def test_eq_with_None(self): # test will fail (and have to be changed accordingly). # With partial mask - with suppress_warnings() as sup: - sup.filter(FutureWarning, "Comparison to `None`") - a = array([None, 1], mask=[0, 1]) - assert_equal(a == None, array([True, False], mask=[0, 1])) # noqa: E711 - assert_equal(a.data == None, [True, False]) # noqa: E711 - assert_equal(a != None, array([False, True], mask=[0, 1])) # noqa: E711 - # With nomask - a = array([None, 1], mask=False) - assert_equal(a == None, [True, False]) # noqa: E711 - assert_equal(a != None, [False, True]) # noqa: E711 - # With complete mask - a = array([None, 2], mask=True) - assert_equal(a == None, array([False, True], mask=True)) # noqa: E711 - assert_equal(a != None, array([True, False], mask=True)) # noqa: E711 - # Fully masked, even comparison to None should return "masked" - a = masked - assert_equal(a == None, masked) # noqa: E711 + a = array([None, 1], mask=[0, 1]) + assert_equal(a == None, array([True, False], mask=[0, 1])) # noqa: E711 + assert_equal(a.data == None, [True, False]) # noqa: E711 + assert_equal(a != None, array([False, True], mask=[0, 1])) # noqa: E711 + # With nomask + a = array([None, 1], mask=False) + assert_equal(a == None, [True, False]) # noqa: E711 + assert_equal(a != None, [False, True]) # noqa: E711 + # With complete mask + a = array([None, 2], mask=True) + assert_equal(a == None, array([False, True], mask=True)) # noqa: E711 + assert_equal(a != None, array([True, False], mask=True)) # noqa: E711 + # Fully masked, even comparison to None should return "masked" + a = masked + assert_equal(a == None, masked) # noqa: E711 def test_eq_with_scalar(self): a = array(1) @@ -2383,16 +2378,15 @@ def test_fillvalue(self): assert_equal(x.fill_value, 999.) assert_equal(x._fill_value, np.array(999.)) + @pytest.mark.filterwarnings("ignore:.*Numpy has detected.*:FutureWarning") def test_subarray_fillvalue(self): # gh-10483 test multi-field index fill value fields = array([(1, 1, 1)], dtype=[('i', int), ('s', '|S8'), ('f', float)]) - with suppress_warnings() as sup: - sup.filter(FutureWarning, "Numpy has detected") - subfields = fields[['i', 'f']] - assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) - # test comparison does not raise: - subfields[1:] == subfields[:-1] + subfields = fields[['i', 'f']] + assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) + # test comparison does not raise: + subfields[1:] == subfields[:-1] def test_fillvalue_exotic_dtype(self): # Tests yet more exotic flexible dtypes @@ -3138,14 +3132,14 @@ def test_inplace_floor_division_array_type(self): def test_inplace_division_scalar_type(self): # Test of inplace division - for t in self.othertypes: - with suppress_warnings() as sup: - sup.record(UserWarning) - + with warnings.catch_warnings(): + warnings.simplefilter('error', DeprecationWarning) + for t in self.othertypes: (x, y, xm) = (_.astype(t) for _ in self.uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) xm[2] = masked + nwarns = 0 # May get a DeprecationWarning or a TypeError. # @@ -3159,28 +3153,29 @@ def test_inplace_division_scalar_type(self): try: x /= t(2) assert_equal(x, y) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 try: xm /= t(2) assert_equal(xm, y) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 if issubclass(t, np.integer): - assert_equal(len(sup.log), 2, f'Failed on type={t}.') + assert_equal(nwarns, 2, f'Failed on type={t}.') else: - assert_equal(len(sup.log), 0, f'Failed on type={t}.') + assert_equal(nwarns, 0, f'Failed on type={t}.') def test_inplace_division_array_type(self): # Test of inplace division - for t in self.othertypes: - with suppress_warnings() as sup: - sup.record(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('error', DeprecationWarning) + for t in self.othertypes: (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked + nwarns = 0 # May get a DeprecationWarning or a TypeError. # @@ -3194,8 +3189,8 @@ def test_inplace_division_array_type(self): try: x /= a assert_equal(x, y / a) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 try: xm /= a assert_equal(xm, y / a) @@ -3203,13 +3198,13 @@ def test_inplace_division_array_type(self): xm.mask, mask_or(mask_or(m, a.mask), (a == t(0))) ) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 if issubclass(t, np.integer): - assert_equal(len(sup.log), 2, f'Failed on type={t}.') + assert_equal(nwarns, 2, f'Failed on type={t}.') else: - assert_equal(len(sup.log), 0, f'Failed on type={t}.') + assert_equal(nwarns, 0, f'Failed on type={t}.') def test_inplace_pow_type(self): # Test keeping data w/ (inplace) power @@ -3508,7 +3503,7 @@ def test_ones(self): b = a.view() assert_(np.may_share_memory(a.mask, b.mask)) - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_put(self): # Tests put. d = arange(5) @@ -4226,7 +4221,7 @@ def test_varstd(self): assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_varstd_specialcases(self): # Test a special case for var nout = np.array(-1, dtype=float) @@ -5582,7 +5577,7 @@ def test_coercion_int(self): def test_coercion_float(self): a_f = np.zeros((), float) - assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + pytest.warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) assert_(np.isnan(a_f[()])) @pytest.mark.xfail(reason="See gh-9750") diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py index 8cc8b9c72bb9..a2c98d0c229d 100644 --- a/numpy/ma/tests/test_deprecations.py +++ b/numpy/ma/tests/test_deprecations.py @@ -9,7 +9,6 @@ import numpy as np from numpy.ma.core import MaskedArrayFutureWarning from numpy.ma.testutils import assert_equal -from numpy.testing import assert_warns class TestArgsort: @@ -23,7 +22,7 @@ def _test_base(self, argsort, cls): # argsort has a bad default for >1d arrays arr_2d = np.array([[1, 2], [3, 4]]).view(cls) - result = assert_warns( + result = pytest.warns( np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) assert_equal(result, argsort(arr_2d, axis=None)) @@ -53,10 +52,10 @@ def test_axis_default(self): ma_max = np.ma.maximum.reduce # check that the default axis is still None, but warns on 2d arrays - result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) + result = pytest.warns(MaskedArrayFutureWarning, ma_max, data2d) assert_equal(result, ma_max(data2d, axis=None)) - result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) + result = pytest.warns(MaskedArrayFutureWarning, ma_min, data2d) assert_equal(result, ma_min(data2d, axis=None)) # no warnings on 1d, as both new and old defaults are equivalent diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 3d10e839cbc9..34a032087536 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -68,7 +68,6 @@ assert_array_equal, assert_equal, ) -from numpy.testing import assert_warns, suppress_warnings class TestGeneric: @@ -746,7 +745,7 @@ def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): res = func(x, axis=axis) assert_equal(res, mask_rowcols(x, rowcols_axis)) @@ -1287,19 +1286,14 @@ def test_special(self): def test_empty(self): # empty arrays a = np.ma.masked_array(np.array([], dtype=float)) - with suppress_warnings() as w: - w.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) # multiple dimensions a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) # no axis - with suppress_warnings() as w: - w.record(RuntimeWarning) - warnings.filterwarnings('always', '', RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) # axis 0 and 1 b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) @@ -1308,10 +1302,8 @@ def test_empty(self): # axis 2 b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_equal(np.ma.median(a, axis=2), b) - assert_(w[0].category is RuntimeWarning) def test_object(self): o = np.ma.masked_array(np.arange(7.)) @@ -1418,10 +1410,13 @@ def test_ddof(self): x, y = self.data, self.data2 expected = np.corrcoef(x) expected2 = np.corrcoef(x, y) - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with pytest.warns(DeprecationWarning): + corrcoef(x, ddof=-1) + + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) + # ddof has no or negligible effect on the function assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) assert_almost_equal(corrcoef(x, ddof=-1), expected) @@ -1433,12 +1428,16 @@ def test_bias(self): x, y = self.data, self.data2 expected = np.corrcoef(x) # bias raises DeprecationWarning - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, y, True, False) - assert_warns(DeprecationWarning, corrcoef, x, y, True, True) - assert_warns(DeprecationWarning, corrcoef, x, bias=False) - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with pytest.warns(DeprecationWarning): + corrcoef(x, y, True, False) + with pytest.warns(DeprecationWarning): + corrcoef(x, y, True, True) + with pytest.warns(DeprecationWarning): + corrcoef(x, y, bias=False) + + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) # bias has no or negligible effect on the function assert_almost_equal(corrcoef(x, bias=1), expected) @@ -1448,8 +1447,9 @@ def test_1d_without_missing(self): assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) @@ -1459,8 +1459,9 @@ def test_2d_without_missing(self): assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) @@ -1473,8 +1474,9 @@ def test_1d_with_missing(self): assert_almost_equal(np.corrcoef(nx), corrcoef(x)) assert_almost_equal(np.corrcoef(nx, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) try: @@ -1486,8 +1488,9 @@ def test_1d_with_missing(self): assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), corrcoef(x, x[::-1], rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) # ddof and bias have no or negligible effect on the function assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1], bias=1)) @@ -1503,8 +1506,9 @@ def test_2d_with_missing(self): test = corrcoef(x) control = np.corrcoef(x) assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) # ddof and bias have no or negligible effect on the function assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], control[:-1, :-1]) diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py index 025387ba454c..2a08234cba61 100644 --- a/numpy/ma/tests/test_regression.py +++ b/numpy/ma/tests/test_regression.py @@ -1,10 +1,7 @@ +import warnings + import numpy as np -from numpy.testing import ( - assert_, - assert_allclose, - assert_array_equal, - suppress_warnings, -) +from numpy.testing import assert_, assert_allclose, assert_array_equal class TestRegression: @@ -67,8 +64,9 @@ def test_ddof_corrcoef(self): x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) y = np.array([2, 2.5, 3.1, 3, 5]) # this test can be removed after deprecation. - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) r0 = np.ma.corrcoef(x, y, ddof=0) r1 = np.ma.corrcoef(x, y, ddof=1) # ddof should not have an effect (it gets cancelled out) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 8bfa3c184cf7..5c8e85c7f860 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -6,6 +6,8 @@ from fractions import Fraction from functools import reduce +import pytest + import numpy as np import numpy.polynomial.polynomial as poly import numpy.polynomial.polyutils as pu @@ -16,7 +18,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) @@ -656,7 +657,7 @@ def test_fit_degenerate_domain(self): assert_equal(p.coef, [2.]) p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) assert_almost_equal(p.coef, [2.05]) - with assert_warns(pu.RankWarning): + with pytest.warns(pu.RankWarning): p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) def test_result_type(self): diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index d09cbba4ec39..50c232d4a8e7 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1,6 +1,7 @@ import hashlib import os.path import sys +import warnings import pytest @@ -17,8 +18,6 @@ assert_equal, assert_no_warnings, assert_raises, - assert_warns, - suppress_warnings, ) random = Generator(MT19937()) @@ -1463,8 +1462,8 @@ def test_multivariate_normal(self, method): # Check that non positive-semidefinite covariance warns with # RuntimeWarning cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov, + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov, method='eigh') assert_raises(LinAlgError, random.multivariate_normal, mean, cov, method='cholesky') @@ -1491,10 +1490,9 @@ def test_multivariate_normal(self, method): method='cholesky') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: + with warnings.catch_warnings(): + warnings.simplefilter("error") random.multivariate_normal(mean, cov, method=method) - w = sup.record(RuntimeWarning) - assert len(w) == 0 mu = np.zeros(2) cov = np.eye(2) diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index d5981906f6ef..4eb455eb77be 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -13,8 +13,6 @@ assert_equal, assert_no_warnings, assert_raises, - assert_warns, - suppress_warnings, ) @@ -331,10 +329,8 @@ def test_randint(self): def test_random_integers(self): np.random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = np.random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) @@ -346,11 +342,9 @@ def test_random_integers_max_int(self): # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = np.random.random_integers(np.iinfo('l').max, np.iinfo('l').max) - assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired) @@ -795,7 +789,7 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' assert_no_warnings(np.random.multivariate_normal, mean, cov, @@ -806,10 +800,9 @@ def test_multivariate_normal(self): check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: + with warnings.catch_warnings(): + warnings.simplefilter('error') np.random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 def test_negative_binomial(self): np.random.seed(self.seed) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index cf4488543c12..c56d3c0f186c 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -16,8 +16,6 @@ assert_equal, assert_no_warnings, assert_raises, - assert_warns, - suppress_warnings, ) INT_FUNCS = {'binomial': (100.0, 0.6), @@ -242,12 +240,10 @@ def test_negative_binomial(self): def test_get_state_warning(self): rs = random.RandomState(PCG64()) - with suppress_warnings() as sup: - w = sup.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): state = rs.get_state() - assert_(len(w) == 1) - assert isinstance(state, dict) - assert state['bit_generator'] == 'PCG64' + assert isinstance(state, dict) + assert state['bit_generator'] == 'PCG64' def test_invalid_legacy_state_setting(self): state = self.random_state.get_state() @@ -487,20 +483,16 @@ def test_randint(self): def test_random_integers(self): random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = random.random_integers(198, size=(3, 2)) - assert_(len(w) == 1) assert_array_equal(actual, desired + 100) def test_tomaxint(self): @@ -529,20 +521,16 @@ def test_random_integers_max_int(self): # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = random.random_integers(np.iinfo('l').max, np.iinfo('l').max) - assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): typer = np.dtype('l').type actual = random.random_integers(typer(np.iinfo('l').max), typer(np.iinfo('l').max)) - assert_(len(w) == 1) assert_equal(actual, desired) def test_random_integers_deprecated(self): @@ -879,8 +867,8 @@ def test_geometric_exceptions(self): assert_raises(ValueError, random.geometric, [1.1] * 10) assert_raises(ValueError, random.geometric, -0.1) assert_raises(ValueError, random.geometric, [-0.1] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.geometric, np.nan) assert_raises(ValueError, random.geometric, [np.nan] * 10) @@ -1012,7 +1000,7 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' assert_no_warnings(random.multivariate_normal, mean, cov, @@ -1023,10 +1011,9 @@ def test_multivariate_normal(self): check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 mu = np.zeros(2) cov = np.eye(2) @@ -1048,8 +1035,8 @@ def test_negative_binomial(self): assert_array_equal(actual, desired) def test_negative_binomial_exceptions(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.negative_binomial, 100, np.nan) assert_raises(ValueError, random.negative_binomial, 100, [np.nan] * 10) @@ -1131,8 +1118,8 @@ def test_poisson_exceptions(self): assert_raises(ValueError, random.poisson, [lamneg] * 10) assert_raises(ValueError, random.poisson, lambig) assert_raises(ValueError, random.poisson, [lambig] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.poisson, np.nan) assert_raises(ValueError, random.poisson, [np.nan] * 10) diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index 9787bcbbc101..b70a715237a5 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -7,7 +7,7 @@ import pytest import numpy.exceptions as ex -from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises, assert_warns +from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises def test_numpy_reloading(): @@ -19,14 +19,14 @@ def test_numpy_reloading(): VisibleDeprecationWarning = ex.VisibleDeprecationWarning ModuleDeprecationWarning = ex.ModuleDeprecationWarning - with assert_warns(UserWarning): + with pytest.warns(UserWarning): reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning) assert_raises(RuntimeError, reload, numpy._globals) - with assert_warns(UserWarning): + with pytest.warns(UserWarning): reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py index 560ee6143265..7efa2a1d1896 100644 --- a/numpy/tests/test_warnings.py +++ b/numpy/tests/test_warnings.py @@ -34,10 +34,11 @@ def visit_Call(self, node): ast.NodeVisitor.generic_visit(self, node) if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': - if node.args[0].value == "ignore": - raise AssertionError( - "warnings should have an appropriate stacklevel; " - f"found in {self.__filename} on line {node.lineno}") + if getattr(node.args[0], "value", None) == "ignore": + if not self.__filename.name.startswith("test_"): + raise AssertionError( + "ignore filters should only be used in tests; " + f"found in {self.__filename} on line {node.lineno}") if p.ls[-1] == 'warn' and ( len(p.ls) == 1 or p.ls[-2] == 'warnings'): From 67e5848521b6a857205639be66bb1b29823aa7e6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Jul 2025 17:35:39 -0600 Subject: [PATCH 0193/1018] MAINT: Bump pypa/cibuildwheel from 3.0.0 to 3.0.1 (#29334) Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.0.0 to 3.0.1. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/5f22145df44122af0f5a201f93cf0207171beca7...95d2f3a92fbf80abe066b09418bbf128a8923df2) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.0.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 453a67088adf..ce0c2e803143 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 + - uses: pypa/cibuildwheel@95d2f3a92fbf80abe066b09418bbf128a8923df2 # v3.0.1 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index ab6bbb618899..223ec38898cf 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -175,7 +175,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 + uses: pypa/cibuildwheel@95d2f3a92fbf80abe066b09418bbf128a8923df2 # v3.0.1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 2028c7b0c53868dab9d6efaf07b796640f311970 Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Mon, 7 Jul 2025 16:36:27 -0700 Subject: [PATCH 0194/1018] DOC: vectorize with signature doesn't pre-call function --- numpy/lib/_function_base_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index f5690a829fc6..0379c3c60a1d 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2332,7 +2332,7 @@ class vectorize: cache : bool, optional If `True`, then cache the first function call that determines the number - of outputs if `otypes` is not provided. + of outputs if `otypes` and `signature` are not provided. signature : string, optional Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for @@ -2356,7 +2356,7 @@ class vectorize: The `vectorize` function is provided primarily for convenience, not for performance. The implementation is essentially a for loop. - If `otypes` is not specified, then a call to the function with the + If `otypes` and `signature` are not specified, then a call to the function with the first argument will be used to determine the number of outputs. The results of this call will be cached if `cache` is `True` to prevent calling the function twice. However, to implement the cache, the From d4c09dfde305cefe328cbc8c952dd4c41836a41c Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 8 Jul 2025 16:23:42 +0200 Subject: [PATCH 0195/1018] TST: Avoid uninitialized values in test (#29341) I think I missed this one before once... can cause occasional warnings if uninitialized. Signed-off-by: Sebastian Berg --- numpy/_core/tests/test_ufunc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index b86e22917734..9c5489a614e8 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2129,7 +2129,8 @@ class ArrayPriorityMinus2000(ArrayPriorityBase): assert np.add(y, x) is ArrayPriorityMinus1000 assert np.add(x, xb) is ArrayPriorityMinus1000 assert np.add(xb, x) is ArrayPriorityMinus1000b - assert np.add(np.zeros(2), ArrayPriorityMinus0(2)) is ArrayPriorityMinus0 + y_minus0 = np.zeros(2).view(ArrayPriorityMinus0) + assert np.add(np.zeros(2), y_minus0) is ArrayPriorityMinus0 assert type(np.add(xb, x, np.zeros(2))) is np.ndarray @pytest.mark.parametrize("a", ( From 46c0bffa8b137762dee19cbe1541790bd1108fb0 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 8 Jul 2025 19:19:24 +0100 Subject: [PATCH 0196/1018] TYP: correct default value of `unicode` in `chararray.__new__` (#29340) --- numpy/_core/defchararray.pyi | 14 +++++++++++++- numpy/lib/_polynomial_impl.pyi | 26 ++++++++++++++++++++++++-- 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 25754965c46a..680875240096 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -114,11 +114,23 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload + def __new__( + subtype, + shape: _ShapeLike, + itemsize: SupportsIndex | SupportsInt, + unicode: L[True], + buffer: _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: _ShapeLike = ..., + order: _OrderKACF = ..., + ) -> _CharArray[str_]: ... + @overload def __new__( subtype, shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt = ..., - unicode: L[True] = ..., + *, + unicode: L[True], buffer: _SupportsBuffer = ..., offset: SupportsIndex = ..., strides: _ShapeLike = ..., diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index 3da1a2af60c9..3b0eade1399e 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -151,22 +151,44 @@ def polyfit( cov: L[True, "unscaled"] = ..., ) -> _2Tup[NDArray[complex128]]: ... @overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None, + full: L[True], + w: _ArrayLikeFloat_co | None = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[float64]]: ... +@overload def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, rcond: float | None = ..., - full: L[True] = ..., + *, + full: L[True], w: _ArrayLikeFloat_co | None = ..., cov: bool | L["unscaled"] = ..., ) -> _5Tup[NDArray[float64]]: ... @overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None, + full: L[True], + w: _ArrayLikeFloat_co | None = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[complex128]]: ... +@overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, rcond: float | None = ..., - full: L[True] = ..., + *, + full: L[True], w: _ArrayLikeFloat_co | None = ..., cov: bool | L["unscaled"] = ..., ) -> _5Tup[NDArray[complex128]]: ... From 2e93c4639f46403b995c910f01e2db33000e854a Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Wed, 9 Jul 2025 01:08:36 -0700 Subject: [PATCH 0197/1018] DOC: Add missing `self` in `__array_ufunc__` signature (#29343) --- doc/source/user/basics.subclassing.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index 7b1e8fd34512..a937521a7abb 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -469,7 +469,7 @@ implemented. The signature of ``__array_ufunc__`` is:: - def __array_ufunc__(ufunc, method, *inputs, **kwargs): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - *ufunc* is the ufunc object that was called. - *method* is a string indicating how the Ufunc was called, either From 34776f4c86c3399670144b6b035cb5eaf8a4ec33 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 9 Jul 2025 10:33:27 +0200 Subject: [PATCH 0198/1018] API,BUG: Fix scalar handling in array-interface allowing NULL pointers (#29338) This fixes the scalar handling in the array-interface and also a bug in the shape handling error path. In theory it breaks code that used NULL to trigger the scalar path, but this path was: 1. Completely undocumented, and 2. correctly triggered by *ommitting* the data field instead. I didn't remove/deprecate the scalar path in this PR, maybe we should. But, I do think we can ignore that theoretical use-case since it is nonsensical. Signed-off-by: Sebastian Berg --- doc/release/upcoming_changes/29338.change.rst | 9 ++++ doc/source/reference/arrays.interface.rst | 18 +++++--- numpy/_core/src/multiarray/ctors.c | 28 ++++++++++--- numpy/_core/tests/test_multiarray.py | 41 ++++++++++++++++++- 4 files changed, 84 insertions(+), 12 deletions(-) create mode 100644 doc/release/upcoming_changes/29338.change.rst diff --git a/doc/release/upcoming_changes/29338.change.rst b/doc/release/upcoming_changes/29338.change.rst new file mode 100644 index 000000000000..64bf188009c8 --- /dev/null +++ b/doc/release/upcoming_changes/29338.change.rst @@ -0,0 +1,9 @@ +``__array_interface__`` with NULL pointer changed +------------------------------------------------- +The array interface now accepts NULL pointers (NumPy will do +its own dummy allocation, though). +Previously, these incorrectly triggered an undocumented +scalar path. +In the unlikely event that the scalar path was actually desired, +you can (for now) achieve the previous behavior via the correct +scalar path by not providing a ``data`` field at all. diff --git a/doc/source/reference/arrays.interface.rst b/doc/source/reference/arrays.interface.rst index ebe3f6b68918..75c17060c8fc 100644 --- a/doc/source/reference/arrays.interface.rst +++ b/doc/source/reference/arrays.interface.rst @@ -120,7 +120,7 @@ This approach to the interface consists of the object having an **Default**: ``[('', typestr)]`` - **data** (optional) + **data** A 2-tuple whose first argument is a :doc:`Python integer ` that points to the data-area storing the array contents. @@ -136,15 +136,23 @@ This approach to the interface consists of the object having an This attribute can also be an object exposing the :ref:`buffer interface ` which - will be used to share the data. If this key is not present (or - returns None), then memory sharing will be done - through the buffer interface of the object itself. In this + will be used to share the data. If this key is ``None``, then memory sharing + will be done through the buffer interface of the object itself. In this case, the offset key can be used to indicate the start of the buffer. A reference to the object exposing the array interface must be stored by the new object if the memory area is to be secured. - **Default**: ``None`` + .. note:: + Not specifying this field uses a "scalar" path that we may remove in the future + as we are not aware of any users. In this case, NumPy assigns the original object + as a scalar into the array. + + .. versionchanged:: 2.4 + Prior to NumPy 2.4 a ``NULL`` pointer used the undocumented "scalar" path + and was thus usually not accepted (and triggered crashes on some paths). + After NumPy 2.4, ``NULL`` is accepted, although NumPy will create a 1-byte sized + new allocation for the array. **strides** (optional) Either ``None`` to indicate a C-style contiguous array or diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index f7efe5041ab3..ff30e581cd91 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2141,6 +2141,7 @@ PyArray_FromInterface(PyObject *origin) Py_ssize_t i, n; npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS]; int dataflags = NPY_ARRAY_BEHAVED; + int use_scalar_assign = 0; if (PyArray_LookupSpecial_OnInstance( origin, npy_interned_str.array_interface, &iface) < 0) { @@ -2229,12 +2230,10 @@ PyArray_FromInterface(PyObject *origin) /* Shape must be specified when 'data' is specified */ int result = PyDict_ContainsString(iface, "data"); if (result < 0) { - Py_DECREF(attr); return NULL; } else if (result == 1) { Py_DECREF(iface); - Py_DECREF(attr); PyErr_SetString(PyExc_ValueError, "Missing __array_interface__ shape"); return NULL; @@ -2277,7 +2276,10 @@ PyArray_FromInterface(PyObject *origin) } /* Case for data access through pointer */ - if (attr && PyTuple_Check(attr)) { + if (attr == NULL) { + use_scalar_assign = 1; + } + else if (PyTuple_Check(attr)) { PyObject *dataptr; if (PyTuple_GET_SIZE(attr) != 2) { PyErr_SetString(PyExc_TypeError, @@ -2309,7 +2311,7 @@ PyArray_FromInterface(PyObject *origin) } /* Case for data access through buffer */ - else if (attr) { + else { if (attr != Py_None) { base = attr; } @@ -2366,18 +2368,32 @@ PyArray_FromInterface(PyObject *origin) if (ret == NULL) { goto fail; } - if (data == NULL) { + if (use_scalar_assign) { + /* + * NOTE(seberg): I honestly doubt anyone is using this scalar path and we + * could probably just deprecate (or just remove it in a 3.0 version). + */ if (PyArray_SIZE(ret) > 1) { PyErr_SetString(PyExc_ValueError, "cannot coerce scalar to array with size > 1"); Py_DECREF(ret); goto fail; } - if (PyArray_SETITEM(ret, PyArray_DATA(ret), origin) < 0) { + if (PyArray_Pack(PyArray_DESCR(ret), PyArray_DATA(ret), origin) < 0) { Py_DECREF(ret); goto fail; } } + else if (data == NULL && PyArray_NBYTES(ret) != 0) { + /* Caller should ensure this, but <2.4 used the above scalar coerction path */ + PyErr_SetString(PyExc_ValueError, + "data is NULL but array contains data, in older versions of NumPy " + "this may have used the scalar path. To get the scalar path " + "you must leave the data field undefined."); + Py_DECREF(ret); + goto fail; + } + result = PyDict_GetItemStringRef(iface, "strides", &attr); if (result == -1){ return NULL; diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 930c736c6076..04222025883e 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -8873,6 +8873,8 @@ def __array_interface__(self): (f, {'strides': ()}, 0.5), (f, {'strides': (2,)}, ValueError), (f, {'strides': 16}, TypeError), + # This fails due to going into the buffer protocol path + (f, {'data': None, 'shape': ()}, TypeError), ]) def test_scalar_interface(self, val, iface, expected): # Test scalar coercion within the array interface @@ -8891,13 +8893,50 @@ def test_scalar_interface(self, val, iface, expected): post_cnt = sys.getrefcount(np.dtype('f8')) assert_equal(pre_cnt, post_cnt) -def test_interface_no_shape(): + +def test_interface_empty_shape(): class ArrayLike: array = np.array(1) __array_interface__ = array.__array_interface__ assert_equal(np.array(ArrayLike()), 1) +def test_interface_no_shape_error(): + class ArrayLike: + __array_interface__ = {"data": None, "typestr": "f8"} + + with pytest.raises(ValueError, match="Missing __array_interface__ shape"): + np.array(ArrayLike()) + + +@pytest.mark.parametrize("iface", [ + {"typestr": "f8", "shape": (0, 1)}, + {"typestr": "(0,)f8,", "shape": (1, 3)}, +]) +def test_interface_nullptr(iface): + iface.update({"data": (0, True)}) + + class ArrayLike: + __array_interface__ = iface + + arr = np.asarray(ArrayLike()) + # Note, we currently set the base anyway, but we do an allocation + # (because NumPy doesn't like NULL data pointers everywhere). + assert arr.shape == iface["shape"] + assert arr.dtype == np.dtype(iface["typestr"]) + assert arr.base is not None + assert arr.flags.owndata + + +def test_interface_nullptr_size_check(): + # Note that prior to NumPy 2.4 the below took the scalar path (if shape had size 1) + class ArrayLike: + __array_interface__ = {"data": (0, True), "typestr": "f8", "shape": ()} + + with pytest.raises(ValueError, match="data is NULL but array contains data"): + np.array(ArrayLike()) + + def test_array_interface_itemsize(): # See gh-6361 my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'], From 4ff1cc12479355ade530ae3add7a19e5db69f2ec Mon Sep 17 00:00:00 2001 From: Toshaksha <147024929+Toshaksha@users.noreply.github.com> Date: Wed, 9 Jul 2025 17:17:04 +0530 Subject: [PATCH 0199/1018] DOC: Fix Resolution Links in NEPs 38 and 49 (#29347) * Update nep-0049-data-allocation-strategies.rst * Update nep-0038-SIMD-optimizations.rst --- doc/neps/nep-0038-SIMD-optimizations.rst | 2 +- doc/neps/nep-0049-data-allocation-strategies.rst | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/neps/nep-0038-SIMD-optimizations.rst b/doc/neps/nep-0038-SIMD-optimizations.rst index eb1157342948..445c008a76c3 100644 --- a/doc/neps/nep-0038-SIMD-optimizations.rst +++ b/doc/neps/nep-0038-SIMD-optimizations.rst @@ -8,7 +8,7 @@ NEP 38 — Using SIMD optimization instructions for performance :Status: Final :Type: Standards :Created: 2019-11-25 -:Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/PVWJ74UVBRZ5ZWF6MDU7EUSJXVNILAQB/#PVWJ74UVBRZ5ZWF6MDU7EUSJXVNILAQB +:Resolution: `NumPy Discussion `_ Abstract diff --git a/doc/neps/nep-0049-data-allocation-strategies.rst b/doc/neps/nep-0049-data-allocation-strategies.rst index 180cfea17156..ec18f7a315d9 100644 --- a/doc/neps/nep-0049-data-allocation-strategies.rst +++ b/doc/neps/nep-0049-data-allocation-strategies.rst @@ -8,8 +8,7 @@ NEP 49 — Data allocation strategies :Status: Final :Type: Standards Track :Created: 2021-04-18 -:Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/YZ3PNTXZUT27B6ITFAD3WRSM3T3SRVK4/#PKYXCTG4R5Q6LIRZC4SEWLNBM6GLRF26 - +:Resolution: `NumPy Discussion `_ Abstract -------- From 7f8e3fb168a833e81b8ae5315424106e8f1667cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Wed, 9 Jul 2025 16:28:47 +0200 Subject: [PATCH 0200/1018] BLD: print long double format used MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Print the long double format that is used for the build. This can be helpful when you're about to cross-compile, but can also run Meson first on the native host to check the right value. Signed-off-by: Michał Górny --- numpy/_core/meson.build | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 6098986618e4..1b79aa39781c 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -522,6 +522,7 @@ endif if longdouble_format == 'UNKNOWN' or longdouble_format == 'UNDEFINED' error('Unknown long double format of size: ' + cc.sizeof('long double').to_string()) endif +message(f'Long double format: @longdouble_format@') cdata.set10('HAVE_LDOUBLE_' + longdouble_format, true) if cc.has_header('endian.h') From 8cf1f7dbf583dd7f55906e799a1108d485f54709 Mon Sep 17 00:00:00 2001 From: Carlos Martin Date: Thu, 10 Jul 2025 03:18:44 -0400 Subject: [PATCH 0201/1018] ENH: Let numpy.size accept multiple axes. (#29240) * Let numpy.size accept multiple axes. * Apply suggestions from code review --------- Co-authored-by: Sebastian Berg --- .../upcoming_changes/29240.new_feature.rst | 1 + numpy/_core/fromnumeric.py | 22 ++++++++++++------- numpy/_core/fromnumeric.pyi | 2 +- numpy/_core/tests/test_numeric.py | 4 ++++ 4 files changed, 20 insertions(+), 9 deletions(-) create mode 100644 doc/release/upcoming_changes/29240.new_feature.rst diff --git a/doc/release/upcoming_changes/29240.new_feature.rst b/doc/release/upcoming_changes/29240.new_feature.rst new file mode 100644 index 000000000000..02d43364b200 --- /dev/null +++ b/doc/release/upcoming_changes/29240.new_feature.rst @@ -0,0 +1 @@ +* Let ``np.size`` accept multiple axes. diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 34fe1798f45e..9d01fca8aa32 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -2,6 +2,7 @@ """ import functools +import math import types import warnings @@ -3569,10 +3570,13 @@ def size(a, axis=None): ---------- a : array_like Input data. - axis : int, optional - Axis along which the elements are counted. By default, give + axis : None or int or tuple of ints, optional + Axis or axes along which the elements are counted. By default, give the total number of elements. + .. versionchanged:: 2.4 + Extended to accept multiple axes. + Returns ------- element_count : int @@ -3590,10 +3594,12 @@ def size(a, axis=None): >>> a = np.array([[1,2,3],[4,5,6]]) >>> np.size(a) 6 - >>> np.size(a,1) + >>> np.size(a,axis=1) 3 - >>> np.size(a,0) + >>> np.size(a,axis=0) 2 + >>> np.size(a,axis=(0,1)) + 6 """ if axis is None: @@ -3602,10 +3608,10 @@ def size(a, axis=None): except AttributeError: return asarray(a).size else: - try: - return a.shape[axis] - except AttributeError: - return asarray(a).shape[axis] + _shape = shape(a) + from .numeric import normalize_axis_tuple + axis = normalize_axis_tuple(axis, len(_shape), allow_duplicate=False) + return math.prod(_shape[ax] for ax in axis) def _round_dispatcher(a, decimals=None, out=None): diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 34849c2cc800..95fe7f7d8484 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1397,7 +1397,7 @@ def cumulative_prod( def ndim(a: ArrayLike) -> int: ... -def size(a: ArrayLike, axis: int | None = ...) -> int: ... +def size(a: ArrayLike, axis: int | tuple[int, ...] | None = ...) -> int: ... @overload def around( diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 4ead2fc7ec6f..560d7cf71543 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -291,6 +291,10 @@ def test_size(self): assert_(np.size(A) == 6) assert_(np.size(A, 0) == 2) assert_(np.size(A, 1) == 3) + assert_(np.size(A, ()) == 1) + assert_(np.size(A, (0,)) == 2) + assert_(np.size(A, (1,)) == 3) + assert_(np.size(A, (0, 1)) == 6) def test_squeeze(self): A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] From f68522879dd5cd3f6eb45fd3ec971561e76915c6 Mon Sep 17 00:00:00 2001 From: Mark Ryan Date: Thu, 10 Jul 2025 09:52:26 +0000 Subject: [PATCH 0202/1018] BUG: fix test_npy_uintp_type_enum test_npy_uintp_type_enum will fail if it is run before any of the other tests in test_cython.py. The reason is that although it imports the checks module, it does not contain the install_temp parameter that ensures that the checks module is compiled. Running test_npy_uintp_type_enum before any of the other tests in test_cython.py will result in a No module named 'checks' error. If the test is run after one of the other tests in test_cython.py that do contain the install_temp parameter, test_npy_uintp_type_enum will run fine. Here we fix the issue by adding the install_temp parameter to test_npy_uintp_type_enum. Closes #29354 --- numpy/_core/tests/test_cython.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 2c7b40c5614c..c405a59e535e 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -347,6 +347,6 @@ def test_npystring_allocators_other_dtype(install_temp): @pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='no checks module on win-arm64') -def test_npy_uintp_type_enum(): +def test_npy_uintp_type_enum(install_temp): import checks assert checks.check_npy_uintp_type_enum() From 82bd6ef31ce57859280be7591c3a20f562e69d34 Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Fri, 11 Jul 2025 10:16:59 -0400 Subject: [PATCH 0203/1018] BUG: Fix reference leakage for output arrays in reduction functions (#29358) Add back missing DECREF for `out=` in ufunc.reduce (and etc.) when `out` is passed. * BUG: add Py_XDECREF to `out` for failure case in generic reduction * TST: add reference count checks for `out` in reduction leak tests --- numpy/_core/src/umath/ufunc_object.c | 4 +++ numpy/_core/tests/test_ufunc.py | 39 ++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 485364af1ff2..fd9ae9f1e41d 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -3727,6 +3727,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, if (ret == NULL) { goto fail; } + + Py_XDECREF(out); Py_DECREF(signature[0]); Py_DECREF(signature[1]); @@ -3753,6 +3755,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, return wrapped_result; fail: + Py_XDECREF(out); + Py_XDECREF(signature[0]); Py_XDECREF(signature[1]); Py_XDECREF(signature[2]); diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 9c5489a614e8..84e31bd1bc3c 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -3003,6 +3003,45 @@ def test_reduce_casterrors(offset): assert out[()] < value * offset +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_reduction_no_reference_leak(): + # Test that the generic reduction does not leak references. + # gh-29358 + arr = np.array([1, 2, 3], dtype=np.int32) + count = sys.getrefcount(arr) + + np.add.reduce(arr, dtype=np.int32, initial=0) + assert count == sys.getrefcount(arr) + + np.add.accumulate(arr, dtype=np.int32) + assert count == sys.getrefcount(arr) + + np.add.reduceat(arr, [0, 1], dtype=np.int32) + assert count == sys.getrefcount(arr) + + # with `out=` the reference count is not changed + out = np.empty((), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduce(arr, dtype=np.int32, out=out, initial=0) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty(arr.shape, dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.accumulate(arr, dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty((2,), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduceat(arr, [0, 1], dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + def test_object_reduce_cleanup_on_failure(): # Test cleanup, including of the initial value (manually provided or not) with pytest.raises(TypeError): From 6b595af8e1d53a7da5de5c0cfd2462afebbe62a0 Mon Sep 17 00:00:00 2001 From: Dillon Niederhut Date: Sat, 12 Jul 2025 13:29:25 -0700 Subject: [PATCH 0204/1018] DOC: add typing test to dev env page Adds a small entry to the development environment page on testing to show the spin command for invoking the typing tests. [skip azp] [skip cirrus] [skip actions] Closes https://github.com/numpy/numpy/issues/29365 --- doc/source/dev/development_environment.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index 7a6dc36b680d..da8135def475 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -185,6 +185,16 @@ For more extensive information, see :ref:`testing-guidelines`. Note: do not run the tests from the root directory of your numpy git repo without ``spin``, that will result in strange test errors. +Running type checks +------------------- +Changes that involve static type declarations are also executed using ``spin``. +The invocation will look like the following: + + $ spin mypy + +This will look in the ``typing/tests`` directory for sets of operations to +test for type incompatibility. + Running linting --------------- Lint checks can be performed on newly added lines of Python code. From 031377b7bd547c3e07702cf273cf9fecd8cb46fa Mon Sep 17 00:00:00 2001 From: Dillon Niederhut Date: Sun, 13 Jul 2025 06:56:44 -0500 Subject: [PATCH 0205/1018] TST: refactor typing check for @ (#29364) Co-authored-by: Joren Hammudoglu --- numpy/typing/tests/data/pass/simple.py | 4 +++- numpy/typing/tests/data/pass/simple_py3.py | 6 ------ 2 files changed, 3 insertions(+), 7 deletions(-) delete mode 100644 numpy/typing/tests/data/pass/simple_py3.py diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py index 8f44e6e76f83..408d2482b0ef 100644 --- a/numpy/typing/tests/data/pass/simple.py +++ b/numpy/typing/tests/data/pass/simple.py @@ -165,4 +165,6 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: ~array # Other methods -np.array([1, 2]).transpose() +array.transpose() + +array @ array diff --git a/numpy/typing/tests/data/pass/simple_py3.py b/numpy/typing/tests/data/pass/simple_py3.py deleted file mode 100644 index c05a1ce612ac..000000000000 --- a/numpy/typing/tests/data/pass/simple_py3.py +++ /dev/null @@ -1,6 +0,0 @@ -import numpy as np - -array = np.array([1, 2]) - -# The @ operator is not in python 2 -array @ array From 3ccbc6c29bb56795d32ee685bccdc1e9ca8ae47f Mon Sep 17 00:00:00 2001 From: Iason Krommydas Date: Sun, 13 Jul 2025 08:28:40 -0700 Subject: [PATCH 0206/1018] DOC: specify that `numpy.nan_to_num` supports array like arguments (#29362) * DOC: specify that numpy.nan_to_num supports array_like arguments * DOC: split examples into multiple lines * DOC: more consistent type specification --- numpy/lib/_type_check_impl.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 977609caa299..584088cdc21d 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -398,15 +398,15 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): in-place (False). The in-place operation only occurs if casting to an array does not require a copy. Default is True. - nan : int, float, optional - Value to be used to fill NaN values. If no value is passed + nan : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill NaN values. If no values are passed then NaN values will be replaced with 0.0. - posinf : int, float, optional - Value to be used to fill positive infinity values. If no value is + posinf : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill positive infinity values. If no values are passed then positive infinity values will be replaced with a very large number. - neginf : int, float, optional - Value to be used to fill negative infinity values. If no value is + neginf : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill negative infinity values. If no values are passed then negative infinity values will be replaced with a very small (or negative) number. @@ -445,6 +445,12 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, -1.2800000e+02, 1.2800000e+02]) + >>> nan = np.array([11, 12, -9999, 13, 14]) + >>> posinf = np.array([33333333, 11, 12, 13, 14]) + >>> neginf = np.array([11, 33333333, 12, 13, 14]) + >>> np.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, -1.2800000e+02, + 1.2800000e+02]) >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary -1.28000000e+002, 1.28000000e+002]) @@ -454,6 +460,11 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): 0.00000000e+000 +1.79769313e+308j]) >>> np.nan_to_num(y, nan=111111, posinf=222222) array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) + >>> nan = np.array([11, 12, 13]) + >>> posinf = np.array([21, 22, 23]) + >>> neginf = np.array([31, 32, 33]) + >>> np.nan_to_num(y, nan=nan, posinf=posinf, neginf=neginf) + array([21.+11.j, 12. +0.j, 13.+23.j]) """ x = _nx.array(x, subok=True, copy=copy) xtype = x.dtype.type From 08916dc1f34de32ccce6b1018613de98c3cc68ff Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Sun, 13 Jul 2025 11:35:49 -0700 Subject: [PATCH 0207/1018] BUG: Fix np.testing utils failing for masked scalar vs. scalar (#29317) (#29318) * TST: Add failing test TestArrayEqual.test_masked_scalar The two added test cases fail with: E AssertionError: E Arrays are not equal E E nan location mismatch: E ACTUAL: MaskedArray(3.) E DESIRED: array(3.) and E AssertionError: E Arrays are not equal E E nan location mismatch: E ACTUAL: MaskedArray(3.) E DESIRED: array(nan) * BUG: Fix np.testing utils failing for masked scalar vs. scalar (#29317) TestArrayEqual.test_masked_scalar now passes. This case regressed since 73151451437 (merged in #12119) due to: - ` == ` returning np.ma.masked (not a 0-dim masked bool array), followed by - `np.bool(np.ma.masked)` unintentionally converting it to np._False Note on the modified comment: Confusingly, "isinstance(..., bool) checks" in the previous wording actually incorrectly referred to the ones towards the end of the function, which are not actually related to __eq__'s behavior but to the possibility of `func` returning a bool. * MNT: Improve comments on assert_array_compare nan/inf handling logic - Use same language as elsewhere below to explain `!= True` used to handle np.ma.masked - Clarify committed to support standard MaskedArrays - Restore note lost in 73151451437 comment changes about how the np.bool casts towards the end of the function handle np.ma.masked, and expand further. * TST: Expand TestArrayEqual.test_masked_scalar --- numpy/testing/_private/utils.py | 19 +++++++++++------ numpy/testing/tests/test_utils.py | 34 +++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 78f9e9a004b1..e868239e62ab 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -771,16 +771,20 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): y_id = func(y) # We include work-arounds here to handle three types of slightly # pathological ndarray subclasses: - # (1) all() on `masked` array scalars can return masked arrays, so we - # use != True + # (1) all() on fully masked arrays returns np.ma.masked, so we use != True + # (np.ma.masked != True evaluates as np.ma.masked, which is falsy). # (2) __eq__ on some ndarray subclasses returns Python booleans - # instead of element-wise comparisons, so we cast to np.bool() and - # use isinstance(..., bool) checks + # instead of element-wise comparisons, so we cast to np.bool() in + # that case (or in case __eq__ returns some other value with no + # all() method). # (3) subclasses with bare-bones __array_function__ implementations may # not implement np.all(), so favor using the .all() method - # We are not committed to supporting such subclasses, but it's nice to + # We are not committed to supporting cases (2) and (3), but it's nice to # support them if possible. - if np.bool(x_id == y_id).all() != True: + result = x_id == y_id + if not hasattr(result, "all") or not callable(result.all): + result = np.bool(result) + if result.all() != True: msg = build_err_msg( [x, y], err_msg + '\n%s location mismatch:' @@ -790,6 +794,9 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): raise AssertionError(msg) # If there is a scalar, then here we know the array has the same # flag as it everywhere, so we should return the scalar flag. + # np.ma.masked is also handled and converted to np.False_ (even if the other + # array has nans/infs etc.; that's OK given the handling later of fully-masked + # results). if isinstance(x_id, bool) or x_id.ndim == 0: return np.bool(x_id) elif isinstance(y_id, bool) or y_id.ndim == 0: diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index b09df821680b..243b8d420936 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -197,6 +197,40 @@ def test_masked_nan_inf(self): self._test_equal(a, b) self._test_equal(b, a) + # Also provides test cases for gh-11121 + def test_masked_scalar(self): + # Test masked scalar vs. plain/masked scalar + for a_val, b_val, b_masked in itertools.product( + [3., np.nan, np.inf], + [3., 4., np.nan, np.inf, -np.inf], + [False, True], + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=True) if b_masked else np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. plain array + for a_val, b_val in itertools.product( + [3., np.nan, -np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. masked array + for a_val, b_val, b_mask in itertools.product( + [3., np.nan, np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + itertools.product([False, True], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=b_mask) + self._test_equal(a, b) + self._test_equal(b, a) + def test_subclass_that_overrides_eq(self): # While we cannot guarantee testing functions will always work for # subclasses, the tests should ideally rely only on subclasses having From 81be692aacdae64962b0c847ba25c6db6ca7111b Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Mon, 14 Jul 2025 11:14:24 +0300 Subject: [PATCH 0208/1018] BLD: use github to build macos-arm64 wheels with OpenBLAS and update to 0.3.30 --- .cirrus.star | 18 ----- .github/workflows/wheels.yml | 5 +- requirements/ci32_requirements.txt | 3 +- requirements/ci_requirements.txt | 6 +- tools/ci/cirrus_wheels.yml | 118 ----------------------------- 5 files changed, 5 insertions(+), 145 deletions(-) delete mode 100644 tools/ci/cirrus_wheels.yml diff --git a/.cirrus.star b/.cirrus.star index c503f25720a7..3de5ce97b0e8 100644 --- a/.cirrus.star +++ b/.cirrus.star @@ -9,17 +9,12 @@ load("cirrus", "env", "fs", "http") def main(ctx): ###################################################################### - # Should wheels be built? # Only test on the numpy/numpy repository ###################################################################### if env.get("CIRRUS_REPO_FULL_NAME") != "numpy/numpy": return [] - # only run the wheels entry on a cron job - if env.get("CIRRUS_CRON", "") == "nightly": - return fs.read("tools/ci/cirrus_wheels.yml") - # Obtain commit message for the event. Unfortunately CIRRUS_CHANGE_MESSAGE # only contains the actual commit message on a non-PR trigger event. # For a PR event it contains the PR title and description. @@ -31,23 +26,10 @@ def main(ctx): if "[skip cirrus]" in commit_msg or "[skip ci]" in commit_msg: return [] - wheel = False labels = env.get("CIRRUS_PR_LABELS", "") pr_number = env.get("CIRRUS_PR", "-1") tag = env.get("CIRRUS_TAG", "") - if "[wheel build]" in commit_msg: - wheel = True - - # if int(pr_number) > 0 and ("14 - Release" in labels or "36 - Build" in labels): - # wheel = True - - if tag.startswith("v") and "dev0" not in tag: - wheel = True - - if wheel: - return fs.read("tools/ci/cirrus_wheels.yml") - if int(pr_number) < 0: return [] diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 223ec38898cf..fd2047283a1f 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -85,10 +85,9 @@ jobs: - [ubuntu-22.04-arm, manylinux_aarch64, ""] - [ubuntu-22.04-arm, musllinux_aarch64, ""] - [macos-13, macosx_x86_64, openblas] - - # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile - [macos-13, macosx_x86_64, accelerate] - - [macos-14, macosx_arm64, accelerate] # always use accelerate + - [macos-14, macosx_arm64, openblas] + - [macos-14, macosx_arm64, accelerate] - [windows-2022, win_amd64, ""] - [windows-2022, win32, ""] - [windows-11-arm, win_arm64, ""] diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 74c9a51ec111..87831586e01e 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,4 +1,3 @@ spin==0.13 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' -scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' +scipy-openblas32==0.3.30.0.1 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index b6ea06c812c8..dd16787923e3 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,6 +1,4 @@ spin==0.13 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' -scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' -# Note there is not yet a win-arm64 wheel, so we currently only exclude win-arm64 -scipy-openblas64==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' +scipy-openblas32==0.3.30.0.1 +scipy-openblas64==0.3.30.0.1 diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml deleted file mode 100644 index 6d02411df2e9..000000000000 --- a/tools/ci/cirrus_wheels.yml +++ /dev/null @@ -1,118 +0,0 @@ -###################################################################### -# Build macosx_arm64 natively -# -# macosx_arm64 for macos >= 14 used to be built here, but are now -# built on GHA. -###################################################################### - -macosx_arm64_task: - use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' - env: - CIRRUS_CLONE_SUBMODULES: true - macos_instance: - matrix: - image: ghcr.io/cirruslabs/macos-runner:sonoma - - matrix: - - env: - CIBW_BUILD: cp311-* cp312* cp313* - env: - PATH: /usr/local/lib:/usr/local/include:$PATH - CIBW_ARCHS: arm64 - - build_script: | - brew install micromamba gfortran - micromamba shell init -s bash --root-prefix ~/micromamba - source ~/.bash_profile - - micromamba create -n numpydev - micromamba activate numpydev - micromamba install -y -c conda-forge python=3.11 2>/dev/null - - # Use scipy-openblas wheels - export INSTALL_OPENBLAS=true - export CIBW_ENVIRONMENT_MACOS="MACOSX_DEPLOYMENT_TARGET='11.0' INSTALL_OPENBLAS=true RUNNER_OS=macOS PKG_CONFIG_PATH=$PWD/.openblas" - - # needed for submodules - git submodule update --init - # need to obtain all the tags so setup.py can determine FULLVERSION - git fetch origin - uname -m - python -c "import platform;print(platform.python_version());print(platform.system());print(platform.machine())" - clang --version - - python -m pip install cibuildwheel - cibuildwheel - - wheels_artifacts: - path: "wheelhouse/*" - -###################################################################### -# Upload all wheels -###################################################################### - -wheels_upload_task: - use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' - # Artifacts don't seem to be persistent from task to task. - # Rather than upload wheels at the end of each cibuildwheel run we do a - # final upload here. This is because a run may be on different OS for - # which bash, etc, may not be present. - depends_on: - - macosx_arm64 - compute_engine_instance: - image_project: cirrus-images - image: family/docker-builder - platform: linux - cpu: 1 - - env: - NUMPY_STAGING_UPLOAD_TOKEN: ENCRYPTED[!5a69522ae0c2af9edb2bc1cdfeaca6292fb3666d9ecd82dca0615921834a6ce3b702352835d8bde4ea2a9ed5ef8424ac!] - NUMPY_NIGHTLY_UPLOAD_TOKEN: ENCRYPTED[4376691390321cd5e76613ec21de8456cc0af0164971dd9542f985a017dc30ccb4d40e60f59184618e2d55afd63e93b7] - - upload_script: | - apt-get update - apt-get install -y curl wget - export IS_SCHEDULE_DISPATCH="false" - export IS_PUSH="false" - - # cron job - if [[ "$CIRRUS_CRON" == "nightly" ]]; then - export IS_SCHEDULE_DISPATCH="true" - fi - - # a manual build was started - if [[ "$CIRRUS_BUILD_SOURCE" == "api" && "$CIRRUS_COMMIT_MESSAGE" == "API build for null" ]]; then - export IS_SCHEDULE_DISPATCH="true" - fi - - # only upload wheels to staging if it's a tag beginning with 'v' and you're - # on a maintenance branch - if [[ "$CIRRUS_TAG" == v* ]] && [[ $CIRRUS_TAG != *"dev0"* ]]; then - export IS_PUSH="true" - fi - - if [[ $IS_PUSH == "true" ]] || [[ $IS_SCHEDULE_DISPATCH == "true" ]]; then - # install miniconda in the home directory. For some reason HOME isn't set by Cirrus - export HOME=$PWD - - # install miniconda for uploading to anaconda - wget -q https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh - bash miniconda.sh -b -p $HOME/miniconda3 - $HOME/miniconda3/bin/conda init bash - source $HOME/miniconda3/bin/activate - conda install -y anaconda-client - - # The name of the zip file is derived from the `wheels_artifact` line. - # If you change the artifact line to `myfile_artifact` then it would be - # called myfile.zip - - curl https://api.cirrus-ci.com/v1/artifact/build/$CIRRUS_BUILD_ID/wheels.zip --output wheels.zip - unzip wheels.zip - - source ./tools/wheels/upload_wheels.sh - # IS_PUSH takes precedence over IS_SCHEDULE_DISPATCH - set_upload_vars - - # Will be skipped if not a push/tag/scheduled build - upload_wheels - fi From 8b43d86afcecf74b51d2a96425a64363b207e4a1 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 14 Jul 2025 01:17:55 -0700 Subject: [PATCH 0209/1018] BUG: fix casting issue in center, ljust, rjust, and zfill (#29369) --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 6 +++--- numpy/_core/tests/test_strings.py | 4 ++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index b0181d4186c9..ca574f605c1a 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1736,7 +1736,7 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, size_t num_codepoints = inbuf.num_codepoints(); npy_intp width = (npy_intp)*(npy_int64*)in2; - if (num_codepoints > (size_t)width) { + if ((npy_intp)num_codepoints > width) { width = num_codepoints; } @@ -1866,8 +1866,8 @@ zfill_strided_loop(PyArrayMethod_Context *context, { Buffer inbuf((char *)is.buf, is.size); size_t in_codepoints = inbuf.num_codepoints(); - size_t width = (size_t)*(npy_int64 *)in2; - if (in_codepoints > width) { + npy_intp width = (npy_intp)*(npy_int64*)in2; + if ((npy_intp)in_codepoints > width) { width = in_codepoints; } // number of leading one-byte characters plus the size of the diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 1b77a535eee6..756c6e1bb549 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -846,6 +846,7 @@ def test_rjust_raises_multiple_character_fill(self, dt): ('abc', 6, ' ', ' abc '), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', '***abc****'), ]) def test_center(self, buf, width, fillchar, res, dt): @@ -859,6 +860,7 @@ def test_center(self, buf, width, fillchar, res, dt): ('abc', 6, ' ', 'abc '), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', 'abc*******'), ]) def test_ljust(self, buf, width, fillchar, res, dt): @@ -872,6 +874,7 @@ def test_ljust(self, buf, width, fillchar, res, dt): ('abc', 6, ' ', ' abc'), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', '*******abc'), ]) def test_rjust(self, buf, width, fillchar, res, dt): @@ -893,6 +896,7 @@ def test_rjust(self, buf, width, fillchar, res, dt): ('-0123', 5, '-0123'), ('000', 3, '000'), ('34', 1, '34'), + ('34', -1, '34'), ('0034', 4, '0034'), ]) def test_zfill(self, buf, width, res, dt): From 9173e548de3541077a9b50b0f4dff7eceb9354fd Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 14 Jul 2025 12:46:38 +0200 Subject: [PATCH 0210/1018] MNT: Options to catch more issues reported by pytest (#28983) Apply Repo-Review suggestions PP306, PP307, PP308. Skip PP305 for now. --- pytest.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytest.ini b/pytest.ini index 132af0bb78ab..b8a1da2b4ec6 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,5 +1,5 @@ [pytest] -addopts = -l +addopts = -l -ra --strict-markers --strict-config norecursedirs = doc tools numpy/linalg/lapack_lite numpy/_core/code_generators numpy/_core/src/common/pythoncapi-compat doctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS ALLOW_UNICODE ALLOW_BYTES junit_family=xunit2 From 0c203ed4d6ccfb44efef41e87fe5819dbeb7d822 Mon Sep 17 00:00:00 2001 From: Lucas Colley Date: Mon, 14 Jul 2025 18:14:31 +0100 Subject: [PATCH 0211/1018] DEV: remove "packages" from `.gitignore` not necessary now (and can interfere with niche build circumstances https://github.com/prefix-dev/pixi-build-backends/issues/243) --- .gitignore | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/.gitignore b/.gitignore index c4de68c1a9a7..b54de4091bf3 100644 --- a/.gitignore +++ b/.gitignore @@ -43,23 +43,6 @@ GTAGS *.so *.mod -# Packages # -############ -# it's better to unpack these files and commit the raw source -# git has its own built in compression methods -*.7z -*.bz2 -*.bzip2 -*.dmg -*.gz -*.iso -*.jar -*.rar -*.tar -*.tbz2 -*.tgz -*.zip - # Python files # ################ # meson build/installation directories From 7bb8b9f3df8661d9fbea05b55f00717fde10b8c4 Mon Sep 17 00:00:00 2001 From: Sarang Joshi Date: Mon, 14 Jul 2025 12:18:12 -0500 Subject: [PATCH 0212/1018] Fix typo in npy_cpu_dispatch.c --- numpy/_core/src/common/npy_cpu_dispatch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index ff22f234a7c6..47b8d2b6f4c2 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -9,7 +9,7 @@ NPY_VISIBILITY_HIDDEN int npy_cpu_dispatch_tracer_init(PyObject *mod) { if (npy_static_pydata.cpu_dispatch_registry != NULL) { - PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initlized"); + PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initialized"); return -1; } PyObject *mod_dict = PyModule_GetDict(mod); From 9a0086b8d22822e62db08d04add2d2c568346137 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 14 Jul 2025 23:52:44 +0100 Subject: [PATCH 0213/1018] TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray`` for ``unicode`` argument (#29377) --- numpy/_core/defchararray.pyi | 70 +++++++++++++++++++++---- numpy/typing/tests/data/reveal/char.pyi | 6 +++ 2 files changed, 66 insertions(+), 10 deletions(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 680875240096..d3e01c2c820a 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1044,14 +1044,15 @@ def startswith( def str_len(A: UST_co) -> NDArray[int_]: ... # Overload 1 and 2: str- or bytes-based array-likes -# overload 3: arbitrary object with unicode=False (-> bytes_) -# overload 4: arbitrary object with unicode=True (-> str_) +# overload 3 and 4: arbitrary object with unicode=False (-> bytes_) +# overload 5 and 6: arbitrary object with unicode=True (-> str_) +# overload 7: arbitrary object with unicode=None (default) (-> str_ | bytes_) @overload def array( obj: U_co, itemsize: int | None = ..., copy: bool = ..., - unicode: L[False] = ..., + unicode: L[True] | None = ..., order: _OrderKACF = ..., ) -> _CharArray[str_]: ... @overload @@ -1059,7 +1060,15 @@ def array( obj: S_co, itemsize: int | None = ..., copy: bool = ..., - unicode: L[False] = ..., + unicode: L[False] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[False], order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload @@ -1067,43 +1076,84 @@ def array( obj: object, itemsize: int | None = ..., copy: bool = ..., - unicode: L[False] = ..., + *, + unicode: L[False], order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload def array( obj: object, itemsize: int | None = ..., copy: bool = ..., - unicode: L[True] = ..., + *, + unicode: L[True], order: _OrderKACF = ..., ) -> _CharArray[str_]: ... +@overload +def array( + obj: object, + itemsize: int | None = ..., + copy: bool = ..., + unicode: bool | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_] | _CharArray[bytes_]: ... @overload def asarray( obj: U_co, itemsize: int | None = ..., - unicode: L[False] = ..., + unicode: L[True] | None = ..., order: _OrderKACF = ..., ) -> _CharArray[str_]: ... @overload def asarray( obj: S_co, itemsize: int | None = ..., - unicode: L[False] = ..., + unicode: L[False] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[False], order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload def asarray( obj: object, itemsize: int | None = ..., - unicode: L[False] = ..., + *, + unicode: L[False], order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload def asarray( obj: object, itemsize: int | None = ..., - unicode: L[True] = ..., + *, + unicode: L[True], order: _OrderKACF = ..., ) -> _CharArray[str_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = ..., + unicode: bool | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_] | _CharArray[bytes_]: ... diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index 9fdc9f61e893..5c6af73888d0 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -209,6 +209,9 @@ assert_type(np.char.array("bob", copy=True), np.char.chararray[np_t._AnyShape, n assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.array(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.array(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) assert_type(np.char.asarray(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) @@ -216,3 +219,6 @@ assert_type(np.char.asarray("bob"), np.char.chararray[np_t._AnyShape, np.dtype[n assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.asarray(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.asarray(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) From d32370539ea5aa93100842a98cf21d55667b6041 Mon Sep 17 00:00:00 2001 From: Ben Woodruff Date: Tue, 15 Jul 2025 11:42:04 -0700 Subject: [PATCH 0214/1018] BUG: Fix repeatability issues in test suite (#29380) Resolves test failures caused by persistent global state when running numpy.test() twice in the same Python session. - core/tests: Skip TestAddNewdocUfunc on repeated runs. This avoids C-level mutation errors. - f2py/tests: Reset f77modulename at end of callcrackfortran. This change alters the code base. - lib/tests: Wrap StringIO parameters in lambdas. - f2py/tests/test_docs: Reset ftype.data.a at end of test_ftype. These changes ensure that the full test suite can be run multiple times in the same interpreter without failure. Closes #26718. --- numpy/_core/tests/test_deprecations.py | 7 +++++++ numpy/f2py/f2py2e.py | 2 ++ numpy/f2py/tests/test_docs.py | 2 ++ numpy/lib/tests/test_io.py | 8 ++++++-- 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index b83a2ac610ee..d4f2e9984266 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -448,6 +448,13 @@ def test_deprecated(self): class TestAddNewdocUFunc(_DeprecationTestCase): # Deprecated in Numpy 2.2, 2024-11 def test_deprecated(self): + doc = struct_ufunc.add_triplet.__doc__ + # gh-26718 + # This test mutates the C-level docstring pointer for add_triplet, + # which is permanent once set. Skip when re-running tests. + if doc is not None and "new docs" in doc: + pytest.skip("Cannot retest deprecation, otherwise ValueError: " + "Cannot change docstring of ufunc with non-NULL docstring") self.assert_deprecated( lambda: np._core.umath._add_newdoc_ufunc( struct_ufunc.add_triplet, "new docs" diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 459299f8e127..84a5aa3c20a6 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -378,6 +378,8 @@ def callcrackfortran(files, options): mod['gil_used'] = 'Py_MOD_GIL_USED' else: mod['gil_used'] = 'Py_MOD_GIL_NOT_USED' + # gh-26718 Reset global + crackfortran.f77modulename = '' return postlist diff --git a/numpy/f2py/tests/test_docs.py b/numpy/f2py/tests/test_docs.py index 5d9aaac9f15b..7015af3b2627 100644 --- a/numpy/f2py/tests/test_docs.py +++ b/numpy/f2py/tests/test_docs.py @@ -60,5 +60,7 @@ def test_ftype(self): ftype.data.x[1] = 45 assert_array_equal(ftype.data.x, np.array([1, 45, 3], dtype=np.float32)) + # gh-26718 Cleanup for repeated test runs + ftype.data.a = 0 # TODO: implement test methods for other example Fortran codes diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index a1698f240bff..b8c8e6cfcac1 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1276,12 +1276,16 @@ def test_max_rows_larger(self): (1, ["ignored\n", "1,2\n", "\n", "3,4\n"]), # "Bad" lines that do not end in newlines: (1, ["ignored", "1,2", "", "3,4"]), - (1, StringIO("ignored\n1,2\n\n3,4")), + (1, lambda: StringIO("ignored\n1,2\n\n3,4")), # Same as above, but do not skip any lines: (0, ["-1,0\n", "1,2\n", "\n", "3,4\n"]), (0, ["-1,0", "1,2", "", "3,4"]), - (0, StringIO("-1,0\n1,2\n\n3,4"))]) + (0, lambda: StringIO("-1,0\n1,2\n\n3,4"))]) def test_max_rows_empty_lines(self, skip, data): + # gh-26718 re-instantiate StringIO objects each time + if callable(data): + data = data() + with pytest.warns(UserWarning, match=f"Input line 3.*max_rows={3 - skip}"): res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", From f37ffca5c734e6c4ae29109a1889429f1f59805b Mon Sep 17 00:00:00 2001 From: Verney7 Date: Wed, 16 Jul 2025 14:33:20 +0800 Subject: [PATCH 0215/1018] BLD: Add sw_64 support --- numpy/_core/include/numpy/npy_cpu.h | 3 +++ numpy/_core/include/numpy/npy_endian.h | 1 + 2 files changed, 4 insertions(+) diff --git a/numpy/_core/include/numpy/npy_cpu.h b/numpy/_core/include/numpy/npy_cpu.h index 52e9d5996bd1..d3a29da57f36 100644 --- a/numpy/_core/include/numpy/npy_cpu.h +++ b/numpy/_core/include/numpy/npy_cpu.h @@ -20,6 +20,7 @@ * NPY_CPU_RISCV64 * NPY_CPU_RISCV32 * NPY_CPU_LOONGARCH + * NPY_CPU_SW_64 * NPY_CPU_WASM */ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ @@ -111,6 +112,8 @@ #endif #elif defined(__loongarch_lp64) #define NPY_CPU_LOONGARCH64 +#elif defined(__sw_64__) + #define NPY_CPU_SW_64 #elif defined(__EMSCRIPTEN__) || defined(__wasm__) /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ /* __wasm__ is defined by clang when targeting wasm */ diff --git a/numpy/_core/include/numpy/npy_endian.h b/numpy/_core/include/numpy/npy_endian.h index 09262120bf82..ecb4b000763d 100644 --- a/numpy/_core/include/numpy/npy_endian.h +++ b/numpy/_core/include/numpy/npy_endian.h @@ -51,6 +51,7 @@ || defined(NPY_CPU_RISCV64) \ || defined(NPY_CPU_RISCV32) \ || defined(NPY_CPU_LOONGARCH) \ + || defined(NPY_CPU_SW_64) \ || defined(NPY_CPU_WASM) #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN From 685918f57bb2591f1ad14dedc7f2920cc533e4f8 Mon Sep 17 00:00:00 2001 From: Yuki Kobayashi Date: Wed, 16 Jul 2025 21:52:57 +0900 Subject: [PATCH 0216/1018] DOC: Fix `PyArrayMapIterObject` document (#29386) The structure of `PyArrayMapIterObject` defined in `multiarray/mapping.h`, not in `arrayobject.h`. --- doc/source/reference/c-api/types-and-structures.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 3f16b5f4dbc4..fb8efb9c8766 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -1608,7 +1608,7 @@ for completeness and assistance in understanding the code. The C-structure associated with :c:var:`PyArrayMapIter_Type`. This structure is useful if you are trying to understand the advanced-index mapping code. It is defined in the - ``arrayobject.h`` header. This type is not exposed to Python and + ``multiarray/mapping.h`` header. This type is not exposed to Python and could be replaced with a C-structure. As a Python type it takes advantage of reference- counted memory management. From 0fee7d82b176fde51be69fe06fec638768f834f5 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 16 Jul 2025 16:33:12 +0100 Subject: [PATCH 0217/1018] TYP: Type `MaskedArray.{sum,std,var,mean,prod}` (#29381) --- numpy/ma/core.pyi | 152 ++++++++++++++++++- numpy/typing/tests/data/pass/ndarray_misc.py | 4 +- numpy/typing/tests/data/reveal/ma.pyi | 25 +++ 3 files changed, 174 insertions(+), 7 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 63dea396de66..ee3b90f2891d 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1206,7 +1206,34 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = ...) -> _ArrayT: ... - def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... # Keep in sync with `ndarray.cumsum` @overload # out: None (default) @@ -1216,7 +1243,35 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... - def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + product: Any # Keep in sync with `ndarray.cumprod` @@ -1227,7 +1282,33 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... - def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... + @overload + def mean( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... @overload def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... @@ -1236,8 +1317,69 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... - def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... - def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... + @overload + def var( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... + @overload + def std( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... # Keep in sync with `ndarray.round` @overload # out=None (default) diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index bb290cdf12f7..b3428f6b48c1 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -119,7 +119,7 @@ class IntSubClass(npt.NDArray[np.intp]): ... i4.std() A.std() A.std(axis=0) -A.std(keepdims=True) +A.std(keepdims=True, mean=0.) A.std(out=B0.astype(np.float64)) i4.sum() @@ -137,7 +137,7 @@ class IntSubClass(npt.NDArray[np.intp]): ... i4.var() A.var() A.var(axis=0) -A.var(keepdims=True) +A.var(keepdims=True, mean=0.) A.var(out=B0) A.argpartition([0]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 7c2cb9d5f05e..b5a292330208 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -743,6 +743,31 @@ assert_type(AR_LIKE_td64 * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_dt64 * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] +assert_type(MAR_f8.sum(), Any) +assert_type(MAR_f8.sum(axis=0), Any) +assert_type(MAR_f8.sum(keepdims=True), Any) +assert_type(MAR_f8.sum(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.std(), Any) +assert_type(MAR_f8.std(axis=0), Any) +assert_type(MAR_f8.std(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.std(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.var(), Any) +assert_type(MAR_f8.var(axis=0), Any) +assert_type(MAR_f8.var(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.var(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.mean(), Any) +assert_type(MAR_f8.mean(axis=0), Any) +assert_type(MAR_f8.mean(keepdims=True), Any) +assert_type(MAR_f8.mean(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.prod(), Any) +assert_type(MAR_f8.prod(axis=0), Any) +assert_type(MAR_f8.prod(keepdims=True), Any) +assert_type(MAR_f8.prod(out=MAR_subclass), MaskedArraySubclass) + # MaskedArray "true" division assert_type(MAR_f8 / b, MaskedArray[np.float64]) From 6e48cff51b4ea3c002ca86ca4835a723338dbf54 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 16 Jul 2025 17:50:07 +0100 Subject: [PATCH 0218/1018] DOC: document `mean` parameter in ``ndarray.std`` and ``ndarray.var`` (#29387) --- numpy/_core/_add_newdocs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 597d5c6deaf3..90d33d4b810a 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -4240,7 +4240,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('std', """ - a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True, mean=np._NoValue) Returns the standard deviation of the array elements along given axis. @@ -4518,7 +4518,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('var', """ - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True, mean=np._NoValue) Returns the variance of the array elements, along given axis. From b5d6eb4300f782f67dde893dc72f2dca94945b6c Mon Sep 17 00:00:00 2001 From: lvllvl <24905907+lvllvl@users.noreply.github.com> Date: Wed, 16 Jul 2025 16:37:09 -0400 Subject: [PATCH 0219/1018] MNT: add linter for thread-unsafe C API uses (#29371) * MNT: add extra lint - thread-unsafe C API lint * MNT: integrate ruff-lint with C-API-lint --- numpy/_core/include/numpy/npy_3kcompat.h | 2 +- numpy/_core/src/common/npy_cpu_dispatch.c | 2 +- numpy/_core/src/common/ufunc_override.c | 2 +- .../src/multiarray/_multiarray_tests.c.src | 8 +- numpy/_core/src/multiarray/array_coercion.c | 2 +- .../src/multiarray/arrayfunction_override.c | 4 +- numpy/_core/src/multiarray/arraytypes.c.src | 12 +-- numpy/_core/src/multiarray/buffer.c | 2 +- numpy/_core/src/multiarray/compiled_base.c | 2 +- numpy/_core/src/multiarray/conversion_utils.c | 4 +- numpy/_core/src/multiarray/convert_datatype.c | 10 +- numpy/_core/src/multiarray/ctors.c | 2 +- numpy/_core/src/multiarray/descriptor.c | 44 ++++----- numpy/_core/src/multiarray/dtype_transfer.c | 10 +- numpy/_core/src/multiarray/dtype_traversal.c | 2 +- numpy/_core/src/multiarray/dtypemeta.c | 2 +- numpy/_core/src/multiarray/hashdescr.c | 2 +- numpy/_core/src/multiarray/iterators.c | 2 +- .../multiarray/legacy_dtype_implementation.c | 6 +- numpy/_core/src/multiarray/mapping.c | 2 +- numpy/_core/src/multiarray/methods.c | 10 +- numpy/_core/src/multiarray/multiarraymodule.c | 2 +- numpy/_core/src/multiarray/nditer_pywrap.c | 2 +- numpy/_core/src/multiarray/refcount.c | 6 +- numpy/_core/src/multiarray/textreading/rows.c | 2 +- numpy/_core/src/multiarray/usertypes.c | 2 +- numpy/_core/src/umath/ufunc_object.c | 8 +- numpy/_core/src/umath/ufunc_type_resolution.c | 6 +- numpy/_core/src/umath/umathmodule.c | 6 +- numpy/f2py/src/fortranobject.c | 8 +- tools/ci/check_c_api_usage.sh | 91 +++++++++++++++++++ tools/linter.py | 28 +++++- 32 files changed, 204 insertions(+), 89 deletions(-) create mode 100644 tools/ci/check_c_api_usage.sh diff --git a/numpy/_core/include/numpy/npy_3kcompat.h b/numpy/_core/include/numpy/npy_3kcompat.h index c2bf74faf09d..cd91f66268c7 100644 --- a/numpy/_core/include/numpy/npy_3kcompat.h +++ b/numpy/_core/include/numpy/npy_3kcompat.h @@ -242,7 +242,7 @@ static inline PyObject* npy_PyFile_OpenFile(PyObject *filename, const char *mode) { PyObject *open; - open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); + open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); // noqa: borrowed-ref OK if (open == NULL) { return NULL; } diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index 47b8d2b6f4c2..2cb3cd817d2a 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -33,7 +33,7 @@ NPY_VISIBILITY_HIDDEN void npy_cpu_dispatch_trace(const char *fname, const char *signature, const char **dispatch_info) { - PyObject *func_dict = PyDict_GetItemString(npy_static_pydata.cpu_dispatch_registry, fname); + PyObject *func_dict = PyDict_GetItemString(npy_static_pydata.cpu_dispatch_registry, fname); // noqa: borrowed-ref OK if (func_dict == NULL) { func_dict = PyDict_New(); if (func_dict == NULL) { diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index e98315f14a94..0bcbea5baa30 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -108,7 +108,7 @@ PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject * * PySequence_Fast* functions. This is required for PyPy */ PyObject *seq; - seq = PySequence_Fast(*out_kwd_obj, + seq = PySequence_Fast(*out_kwd_obj, // noqa: borrowed-ref OK "Could not convert object to sequence"); if (seq == NULL) { Py_CLEAR(*out_kwd_obj); diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 8012a32b070e..068fabc7fee8 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -644,7 +644,7 @@ incref_elide_l(PyObject *dummy, PyObject *args) } /* get item without increasing refcount, item may still be on the python * stack but above the inaccessible top */ - r = PyList_GetItem(arg, 4); + r = PyList_GetItem(arg, 4); // noqa: borrowed-ref OK res = PyNumber_Add(r, r); return res; @@ -863,7 +863,7 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) if (classes == NULL) { goto fail; } - Py_SETREF(classes, PySequence_Fast(classes, NULL)); + Py_SETREF(classes, PySequence_Fast(classes, NULL)); // noqa: borrowed-ref OK if (classes == NULL) { goto fail; } @@ -883,7 +883,7 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) PyObject *to_dtype, *cast_obj; Py_ssize_t pos = 0; - while (PyDict_Next(NPY_DT_SLOTS(from_dtype)->castingimpls, + while (PyDict_Next(NPY_DT_SLOTS(from_dtype)->castingimpls, // noqa: borrowed-ref OK &pos, &to_dtype, &cast_obj)) { if (cast_obj == Py_None) { continue; @@ -965,7 +965,7 @@ identityhash_tester(PyObject *NPY_UNUSED(mod), } /* Replace the sequence with a guaranteed fast-sequence */ - sequence = PySequence_Fast(sequence, "converting sequence."); + sequence = PySequence_Fast(sequence, "converting sequence."); // noqa: borrowed-ref OK if (sequence == NULL) { goto finish; } diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index ff7d98bd9c64..8271fb6812d1 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -1148,7 +1148,7 @@ PyArray_DiscoverDTypeAndShape_Recursive( force_sequence_due_to_char_dtype: /* Ensure we have a sequence (required for PyPy) */ - seq = PySequence_Fast(obj, "Could not convert object to sequence"); + seq = PySequence_Fast(obj, "Could not convert object to sequence"); // noqa: borrowed-ref - manual fix needed if (seq == NULL) { /* * Specifically do not fail on things that look like a dictionary, diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index 9834ab138cf6..72211e2a6d62 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -371,7 +371,7 @@ array__get_implementing_args( return NULL; } - relevant_args = PySequence_Fast( + relevant_args = PySequence_Fast( // noqa: borrowed-ref OK relevant_args, "dispatcher for __array_function__ did not return an iterable"); if (relevant_args == NULL) { @@ -518,7 +518,7 @@ dispatcher_vectorcall(PyArray_ArrayFunctionDispatcherObject *self, fix_name_if_typeerror(self); return NULL; } - Py_SETREF(relevant_args, PySequence_Fast(relevant_args, + Py_SETREF(relevant_args, PySequence_Fast(relevant_args, // noqa: borrowed-ref OK "dispatcher for __array_function__ did not return an iterable")); if (relevant_args == NULL) { return NULL; diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 9e5588f98a83..52c9bdfb6bcc 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -881,7 +881,7 @@ VOID_getitem(void *input, void *vap) npy_intp offset; PyArray_Descr *new; key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(descr->fields, key); + tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK if (_unpack_field(tup, &new, &offset) < 0) { Py_DECREF(ret); return NULL; @@ -973,7 +973,7 @@ _setup_field(int i, _PyArray_LegacyDescr *descr, PyArrayObject *arr, npy_intp offset; key = PyTuple_GET_ITEM(descr->names, i); - tup = PyDict_GetItem(descr->fields, key); + tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK if (_unpack_field(tup, &new, &offset) < 0) { return -1; } @@ -2274,7 +2274,7 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK npy_intp offset; PyArray_Descr *new; if (NPY_TITLE_KEY(key, value)) { @@ -2359,7 +2359,7 @@ VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK npy_intp offset; PyArray_Descr * new; @@ -2679,7 +2679,7 @@ VOID_nonzero (char *ip, PyArrayObject *ap) PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)PyArray_DESCR(ap); - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK PyArray_Descr * new; npy_intp offset; if (NPY_TITLE_KEY(key, value)) { @@ -3041,7 +3041,7 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) PyArray_Descr *new; npy_intp offset; key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(PyDataType_FIELDS(descr), key); + tup = PyDict_GetItem(PyDataType_FIELDS(descr), key); // noqa: borrowed-ref OK if (_unpack_field(tup, &new, &offset) < 0) { goto finish; } diff --git a/numpy/_core/src/multiarray/buffer.c b/numpy/_core/src/multiarray/buffer.c index fcff3ad6ca74..a6c683f26a8b 100644 --- a/numpy/_core/src/multiarray/buffer.c +++ b/numpy/_core/src/multiarray/buffer.c @@ -268,7 +268,7 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, int ret; name = PyTuple_GET_ITEM(ldescr->names, k); - item = PyDict_GetItem(ldescr->fields, name); + item = PyDict_GetItem(ldescr->fields, name); // noqa: borrowed-ref OK child = (PyArray_Descr*)PyTuple_GetItem(item, 0); offset_obj = PyTuple_GetItem(item, 1); diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index fee0d4a61a78..179c8e404322 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -1522,7 +1522,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t PyTypeObject *new = (PyTypeObject *)obj; _ADDDOC(new->tp_doc, new->tp_name); if (new->tp_dict != NULL && PyDict_CheckExact(new->tp_dict) && - PyDict_GetItemString(new->tp_dict, "__doc__") == Py_None) { + PyDict_GetItemString(new->tp_dict, "__doc__") == Py_None) { // noqa: borrowed-ref - manual fix needed /* Warning: Modifying `tp_dict` is not generally safe! */ if (PyDict_SetItemString(new->tp_dict, "__doc__", str) < 0) { return NULL; diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 1994dd0ee8f7..d487aa16727d 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -130,7 +130,7 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) * dimension_from_scalar as soon as possible. */ if (!PyLong_CheckExact(obj) && PySequence_Check(obj)) { - seq_obj = PySequence_Fast(obj, + seq_obj = PySequence_Fast(obj, // noqa: borrowed-ref - manual fix needed "expected a sequence of integers or a single integer."); if (seq_obj == NULL) { /* continue attempting to parse as a single integer. */ @@ -1135,7 +1135,7 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) { PyObject *seq_obj = NULL; if (!PyLong_CheckExact(seq) && PySequence_Check(seq)) { - seq_obj = PySequence_Fast(seq, + seq_obj = PySequence_Fast(seq, // noqa: borrowed-ref - manual fix needed "expected a sequence of integers or a single integer"); if (seq_obj == NULL) { /* continue attempting to parse as a single integer. */ diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 59b6298b5815..d34d852a706b 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -344,7 +344,7 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) PyObject *cobj; key = PyLong_FromLong(type_num); - cobj = PyDict_GetItem(obj, key); + cobj = PyDict_GetItem(obj, key); // noqa: borrowed-ref OK Py_DECREF(key); if (cobj && PyCapsule_CheckExact(cobj)) { castfunc = PyCapsule_GetPointer(cobj, NULL); @@ -2749,7 +2749,7 @@ nonstructured_to_structured_resolve_descriptors( Py_ssize_t pos = 0; PyObject *key, *tuple; - while (PyDict_Next(to_descr->fields, &pos, &key, &tuple)) { + while (PyDict_Next(to_descr->fields, &pos, &key, &tuple)) { // noqa: borrowed-ref OK PyArray_Descr *field_descr = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0); npy_intp field_view_off = NPY_MIN_INTP; NPY_CASTING field_casting = PyArray_GetCastInfo( @@ -2898,7 +2898,7 @@ structured_to_nonstructured_resolve_descriptors( return -1; } PyObject *key = PyTuple_GetItem(PyDataType_NAMES(given_descrs[0]), 0); - PyObject *base_tup = PyDict_GetItem(PyDataType_FIELDS(given_descrs[0]), key); + PyObject *base_tup = PyDict_GetItem(PyDataType_FIELDS(given_descrs[0]), key); // noqa: borrowed-ref OK base_descr = (PyArray_Descr *)PyTuple_GET_ITEM(base_tup, 0); struct_view_offset = PyLong_AsSsize_t(PyTuple_GET_ITEM(base_tup, 1)); if (error_converting(struct_view_offset)) { @@ -3033,7 +3033,7 @@ can_cast_fields_safety( for (Py_ssize_t i = 0; i < field_count; i++) { npy_intp field_view_off = NPY_MIN_INTP; PyObject *from_key = PyTuple_GET_ITEM(PyDataType_NAMES(from), i); - PyObject *from_tup = PyDict_GetItemWithError(PyDataType_FIELDS(from), from_key); + PyObject *from_tup = PyDict_GetItemWithError(PyDataType_FIELDS(from), from_key); // noqa: borrowed-ref OK if (from_tup == NULL) { return give_bad_field_error(from_key); } @@ -3041,7 +3041,7 @@ can_cast_fields_safety( /* Check whether the field names match */ PyObject *to_key = PyTuple_GET_ITEM(PyDataType_NAMES(to), i); - PyObject *to_tup = PyDict_GetItem(PyDataType_FIELDS(to), to_key); + PyObject *to_tup = PyDict_GetItem(PyDataType_FIELDS(to), to_key); // noqa: borrowed-ref OK if (to_tup == NULL) { return give_bad_field_error(from_key); } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index ff30e581cd91..4f466677c57c 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2114,7 +2114,7 @@ _is_default_descr(PyObject *descr, PyObject *typestr) { if (!PyList_Check(descr) || PyList_GET_SIZE(descr) != 1) { return 0; } - PyObject *tuple = PyList_GET_ITEM(descr, 0); + PyObject *tuple = PyList_GET_ITEM(descr, 0); // noqa: borrowed-ref - manual fix needed if (!(PyTuple_Check(tuple) && PyTuple_GET_SIZE(tuple) == 2)) { return 0; } diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index c32b0e5e3f9f..135deee83c97 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -424,7 +424,7 @@ _convert_from_array_descr(PyObject *obj, int align) return NULL; } for (int i = 0; i < n; i++) { - PyObject *item = PyList_GET_ITEM(obj, i); + PyObject *item = PyList_GET_ITEM(obj, i); // noqa: borrowed-ref - manual fix needed if (!PyTuple_Check(item) || (PyTuple_GET_SIZE(item) < 2)) { PyErr_Format(PyExc_TypeError, "Field elements must be 2- or 3-tuples, got '%R'", @@ -507,10 +507,10 @@ _convert_from_array_descr(PyObject *obj, int align) "StringDType is not currently supported for structured dtype fields."); goto fail; } - if ((PyDict_GetItemWithError(fields, name) != NULL) + if ((PyDict_GetItemWithError(fields, name) != NULL) // noqa: borrowed-ref OK || (title && PyUnicode_Check(title) - && (PyDict_GetItemWithError(fields, title) != NULL))) { + && (PyDict_GetItemWithError(fields, title) != NULL))) { // noqa: borrowed-ref OK PyErr_Format(PyExc_ValueError, "field %R occurs more than once", name); Py_DECREF(conv); @@ -548,7 +548,7 @@ _convert_from_array_descr(PyObject *obj, int align) goto fail; } if (PyUnicode_Check(title)) { - PyObject *existing = PyDict_GetItemWithError(fields, title); + PyObject *existing = PyDict_GetItemWithError(fields, title); // noqa: borrowed-ref OK if (existing == NULL && PyErr_Occurred()) { goto fail; } @@ -613,7 +613,7 @@ _convert_from_list(PyObject *obj, int align) * Ignore any empty string at end which _internal._commastring * can produce */ - PyObject *last_item = PyList_GET_ITEM(obj, n-1); + PyObject *last_item = PyList_GET_ITEM(obj, n-1); // noqa: borrowed-ref OK if (PyUnicode_Check(last_item)) { Py_ssize_t s = PySequence_Size(last_item); if (s < 0) { @@ -643,7 +643,7 @@ _convert_from_list(PyObject *obj, int align) int totalsize = 0; for (int i = 0; i < n; i++) { PyArray_Descr *conv = _convert_from_any( - PyList_GET_ITEM(obj, i), align); + PyList_GET_ITEM(obj, i), align); // noqa: borrowed-ref OK if (conv == NULL) { goto fail; } @@ -794,7 +794,7 @@ _validate_union_object_dtype(_PyArray_LegacyDescr *new, _PyArray_LegacyDescr *co if (name == NULL) { return -1; } - tup = PyDict_GetItemWithError(conv->fields, name); + tup = PyDict_GetItemWithError(conv->fields, name); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -940,7 +940,7 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) if (key == NULL) { return -1; } - tup = PyDict_GetItemWithError(fields, key); + tup = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -960,7 +960,7 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) if (key == NULL) { return -1; } - tup = PyDict_GetItemWithError(fields, key); + tup = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -1213,7 +1213,7 @@ _convert_from_dict(PyObject *obj, int align) } /* Insert into dictionary */ - if (PyDict_GetItemWithError(fields, name) != NULL) { + if (PyDict_GetItemWithError(fields, name) != NULL) { // noqa: borrowed-ref OK PyErr_SetString(PyExc_ValueError, "name already used as a name or title"); Py_DECREF(tup); @@ -1232,7 +1232,7 @@ _convert_from_dict(PyObject *obj, int align) } if (len == 3) { if (PyUnicode_Check(title)) { - if (PyDict_GetItemWithError(fields, title) != NULL) { + if (PyDict_GetItemWithError(fields, title) != NULL) { // noqa: borrowed-ref OK PyErr_SetString(PyExc_ValueError, "title already used as a name or title."); Py_DECREF(tup); @@ -1895,7 +1895,7 @@ _convert_from_str(PyObject *obj, int align) if (typeDict == NULL) { goto fail; } - PyObject *item = PyDict_GetItemWithError(typeDict, obj); + PyObject *item = PyDict_GetItemWithError(typeDict, obj); // noqa: borrowed-ref - manual fix needed if (item == NULL) { if (PyErr_Occurred()) { return NULL; @@ -2276,7 +2276,7 @@ _arraydescr_isnative(PyArray_Descr *self) PyArray_Descr *new; int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -2422,7 +2422,7 @@ arraydescr_names_set( int ret; key = PyTuple_GET_ITEM(self->names, i); /* Borrowed references to item and new_key */ - item = PyDict_GetItemWithError(self->fields, key); + item = PyDict_GetItemWithError(self->fields, key); // noqa: borrowed-ref OK if (item == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -2848,7 +2848,7 @@ _descr_find_object(PyArray_Descr *self) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -2964,7 +2964,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) if (fields != Py_None) { PyObject *key, *list; key = PyLong_FromLong(-1); - list = PyDict_GetItemWithError(fields, key); + list = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (!list) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -3140,7 +3140,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) for (i = 0; i < PyTuple_GET_SIZE(names); ++i) { name = PyTuple_GET_ITEM(names, i); - field = PyDict_GetItemWithError(fields, name); + field = PyDict_GetItemWithError(fields, name); // noqa: borrowed-ref OK if (!field) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -3344,7 +3344,7 @@ PyArray_DescrNewByteorder(PyArray_Descr *oself, char newendian) return NULL; } /* make new dictionary with replaced PyArray_Descr Objects */ - while (PyDict_Next(self->fields, &pos, &key, &value)) { + while (PyDict_Next(self->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -3470,7 +3470,7 @@ is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype) if (key == NULL) { return 0; } - tup = PyDict_GetItem(fields, key); + tup = PyDict_GetItem(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { return 0; } @@ -3635,7 +3635,7 @@ _check_has_fields(PyArray_Descr *self) static PyObject * _subscript_by_name(_PyArray_LegacyDescr *self, PyObject *op) { - PyObject *obj = PyDict_GetItemWithError(self->fields, op); + PyObject *obj = PyDict_GetItemWithError(self->fields, op); // noqa: borrowed-ref OK if (obj == NULL) { if (!PyErr_Occurred()) { PyErr_Format(PyExc_KeyError, @@ -3672,7 +3672,7 @@ _is_list_of_strings(PyObject *obj) } seqlen = PyList_GET_SIZE(obj); for (i = 0; i < seqlen; i++) { - PyObject *item = PyList_GET_ITEM(obj, i); + PyObject *item = PyList_GET_ITEM(obj, i); // noqa: borrowed-ref - manual fix needed if (!PyUnicode_Check(item)) { return NPY_FALSE; } @@ -3716,7 +3716,7 @@ arraydescr_field_subset_view(_PyArray_LegacyDescr *self, PyObject *ind) */ PyTuple_SET_ITEM(names, i, name); - tup = PyDict_GetItemWithError(self->fields, name); + tup = PyDict_GetItemWithError(self->fields, name); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { PyErr_SetObject(PyExc_KeyError, name); diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index 188a55a4b5f5..64d5bfa89e8e 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -2319,7 +2319,7 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), *out_flags = PyArrayMethod_MINIMAL_FLAGS; for (i = 0; i < field_count; ++i) { key = PyTuple_GET_ITEM(PyDataType_NAMES(dst_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, &dst_offset, &title)) { PyMem_Free(data); @@ -2383,7 +2383,7 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), NPY_traverse_info_init(&data->decref_src); key = PyTuple_GET_ITEM(PyDataType_NAMES(src_dtype), 0); - tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, &src_offset, &title)) { PyMem_Free(data); @@ -2435,14 +2435,14 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), /* set up the transfer function for each field */ for (i = 0; i < field_count; ++i) { key = PyTuple_GET_ITEM(PyDataType_NAMES(dst_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, &dst_offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); return NPY_FAIL; } key = PyTuple_GET_ITEM(PyDataType_NAMES(src_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, &src_offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); @@ -3831,4 +3831,4 @@ PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp const *shape, *out_dataC = dataC; *out_ndim = ndim; return 0; -} +} \ No newline at end of file diff --git a/numpy/_core/src/multiarray/dtype_traversal.c b/numpy/_core/src/multiarray/dtype_traversal.c index 91b1889b7d1f..e86aab7411d4 100644 --- a/numpy/_core/src/multiarray/dtype_traversal.c +++ b/numpy/_core/src/multiarray/dtype_traversal.c @@ -346,7 +346,7 @@ get_fields_traverse_function( int offset; key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(dtype->fields, key); + tup = PyDict_GetItem(dtype->fields, key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &fld_dtype, &offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); return -1; diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 0b1b0fb39192..5ac7ef3b2320 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -693,7 +693,7 @@ void_ensure_canonical(_PyArray_LegacyDescr *self) int maxalign = 1; for (Py_ssize_t i = 0; i < field_num; i++) { PyObject *name = PyTuple_GET_ITEM(self->names, i); - PyObject *tuple = PyDict_GetItem(self->fields, name); + PyObject *tuple = PyDict_GetItem(self->fields, name); // noqa: borrowed-ref OK PyObject *new_tuple = PyTuple_New(PyTuple_GET_SIZE(tuple)); PyArray_Descr *field_descr = NPY_DT_CALL_ensure_canonical( (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0)); diff --git a/numpy/_core/src/multiarray/hashdescr.c b/numpy/_core/src/multiarray/hashdescr.c index f570caf1588f..d5359d86390f 100644 --- a/numpy/_core/src/multiarray/hashdescr.c +++ b/numpy/_core/src/multiarray/hashdescr.c @@ -127,7 +127,7 @@ static int _array_descr_walk_fields(PyObject *names, PyObject* fields, PyObject* * For each field, add the key + descr + offset to l */ key = PyTuple_GET_ITEM(names, pos); - value = PyDict_GetItem(fields, key); + value = PyDict_GetItem(fields, key); // noqa: borrowed-ref OK /* XXX: are those checks necessary ? */ if (value == NULL) { PyErr_SetString(PyExc_SystemError, diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 422c690882ab..d83f0d7a3ac3 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -1489,7 +1489,7 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, return NULL; } - fast_seq = PySequence_Fast(args, ""); // needed for pypy + fast_seq = PySequence_Fast(args, ""); // needed for pypy // noqa: borrowed-ref - manual fix needed if (fast_seq == NULL) { return NULL; } diff --git a/numpy/_core/src/multiarray/legacy_dtype_implementation.c b/numpy/_core/src/multiarray/legacy_dtype_implementation.c index 70b4fa1e49db..abfc1bd0e3cd 100644 --- a/numpy/_core/src/multiarray/legacy_dtype_implementation.c +++ b/numpy/_core/src/multiarray/legacy_dtype_implementation.c @@ -320,8 +320,8 @@ can_cast_fields(PyObject *field1, PyObject *field2, NPY_CASTING casting) /* Iterate over all the fields and compare for castability */ ppos = 0; - while (PyDict_Next(field1, &ppos, &key, &tuple1)) { - if ((tuple2 = PyDict_GetItem(field2, key)) == NULL) { + while (PyDict_Next(field1, &ppos, &key, &tuple1)) { // noqa: borrowed-ref OK + if ((tuple2 = PyDict_GetItem(field2, key)) == NULL) { // noqa: borrowed-ref OK return 0; } /* Compare the dtype of the field for castability */ @@ -372,7 +372,7 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, Py_ssize_t ppos = 0; PyObject *tuple; PyArray_Descr *field; - PyDict_Next(lfrom->fields, &ppos, NULL, &tuple); + PyDict_Next(lfrom->fields, &ppos, NULL, &tuple); // noqa: borrowed-ref OK field = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0); /* * For a subarray, we need to get the underlying type; diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 7483448e632b..12f25534f1d0 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -1351,7 +1351,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view) npy_intp offset; /* get the field offset and dtype */ - tup = PyDict_GetItemWithError(PyDataType_FIELDS(PyArray_DESCR(arr)), ind); + tup = PyDict_GetItemWithError(PyDataType_FIELDS(PyArray_DESCR(arr)), ind); // noqa: borrowed-ref OK if (tup == NULL && PyErr_Occurred()) { return 0; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 58a554dc40be..50f7f5f3c73b 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1005,7 +1005,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) if (nin < 0) { return -1; } - fast = PySequence_Fast(args, "Could not convert object to sequence"); + fast = PySequence_Fast(args, "Could not convert object to sequence"); // noqa: borrowed-ref - manual fix needed if (fast == NULL) { return -1; } @@ -1033,7 +1033,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) } Py_DECREF(out_kwd_obj); /* check where if it exists */ - where_obj = PyDict_GetItemWithError(kwds, npy_interned_str.where); + where_obj = PyDict_GetItemWithError(kwds, npy_interned_str.where); // noqa: borrowed-ref OK if (where_obj == NULL) { if (PyErr_Occurred()) { return -1; @@ -1115,7 +1115,7 @@ array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kw PyErr_SetString(PyExc_TypeError, "kwargs must be a dict."); return NULL; } - types = PySequence_Fast( + types = PySequence_Fast( // noqa: borrowed-ref - manual fix needed types, "types argument to ndarray.__array_function__ must be iterable"); if (types == NULL) { @@ -1566,7 +1566,7 @@ _deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype, PyArray_Descr *new; int offset, res; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -1733,7 +1733,7 @@ _setlist_pkl(PyArrayObject *self, PyObject *list) return -1; } while(iter->index < iter->size) { - theobject = PyList_GET_ITEM(list, iter->index); + theobject = PyList_GET_ITEM(list, iter->index); // noqa: borrowed-ref OK setitem(theobject, iter->dataptr, self); PyArray_ITER_NEXT(iter); } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index e80c6c0cd45c..b8ade23b6d76 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -2764,7 +2764,7 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) npy_intp i, size; PyObject *item; - obj = PySequence_Fast(obj, "the subscripts for each operand must " + obj = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref - manual fix needed "be a list or a tuple"); if (obj == NULL) { return -1; diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index 27c392db8720..0af2ee408b4b 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -588,7 +588,7 @@ npyiter_prepare_ops(PyObject *op_in, PyObject **out_owner, PyObject ***out_objs) { /* Take ownership of op_in (either a tuple/list or single element): */ if (PyTuple_Check(op_in) || PyList_Check(op_in)) { - PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); + PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); // noqa: borrowed-ref - manual fix needed if (op_in == NULL) { Py_DECREF(op_in); return -1; diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 571b50372684..ac70f38f39a5 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -184,7 +184,7 @@ PyArray_Item_INCREF(char *data, PyArray_Descr *descr) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -246,7 +246,7 @@ PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -480,7 +480,7 @@ _fill_with_none(char *optr, PyArray_Descr *dtype) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index c459fa826e53..e5a294ecb01d 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -61,7 +61,7 @@ create_conv_funcs( Py_ssize_t pos = 0; int error = 0; Py_BEGIN_CRITICAL_SECTION(converters); - while (PyDict_Next(converters, &pos, &key, &value)) { + while (PyDict_Next(converters, &pos, &key, &value)) { // noqa: borrowed-ref OK Py_ssize_t column = PyNumber_AsSsize_t(key, PyExc_IndexError); if (column == -1 && PyErr_Occurred()) { PyErr_Format(PyExc_TypeError, diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 445f7ad7fe67..9d026f32044f 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -344,7 +344,7 @@ static int _warn_if_cast_exists_already( if (to_DType == NULL) { return -1; } - PyObject *cast_impl = PyDict_GetItemWithError( + PyObject *cast_impl = PyDict_GetItemWithError( // noqa: borrowed-ref OK NPY_DT_SLOTS(NPY_DTYPE(descr))->castingimpls, (PyObject *)to_DType); Py_DECREF(to_DType); if (cast_impl == NULL) { diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index fd9ae9f1e41d..1d2c3edbd3b9 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -1368,7 +1368,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, * Get axes tuple for operand. If not a tuple already, make it one if * there is only one axis (its content is checked later). */ - op_axes_tuple = PyList_GET_ITEM(axes, iop); + op_axes_tuple = PyList_GET_ITEM(axes, iop); // noqa: borrowed-ref - manual fix needed if (PyTuple_Check(op_axes_tuple)) { if (PyTuple_Size(op_axes_tuple) != op_ncore) { /* must have been a tuple with too many entries. */ @@ -4944,7 +4944,7 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, function, arg_typenums, data); if (result == 0) { - cobj = PyDict_GetItemWithError(ufunc->userloops, key); + cobj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref OK if (cobj == NULL && PyErr_Occurred()) { result = -1; } @@ -5075,7 +5075,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, */ int add_new_loop = 1; for (Py_ssize_t j = 0; j < PyList_GET_SIZE(ufunc->_loops); j++) { - PyObject *item = PyList_GET_ITEM(ufunc->_loops, j); + PyObject *item = PyList_GET_ITEM(ufunc->_loops, j); // noqa: borrowed-ref OK PyObject *existing_tuple = PyTuple_GET_ITEM(item, 0); int cmp = PyObject_RichCompareBool(existing_tuple, signature_tuple, Py_EQ); @@ -5117,7 +5117,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, funcdata->nargs = 0; /* Get entry for this user-defined type*/ - cobj = PyDict_GetItemWithError(ufunc->userloops, key); + cobj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref OK if (cobj == NULL && PyErr_Occurred()) { goto fail; } diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index 95670efb936f..8d617f3ddc6c 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -1455,7 +1455,7 @@ find_userloop(PyUFuncObject *ufunc, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(ufunc->userloops, key); + obj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; @@ -1742,7 +1742,7 @@ linear_search_userloop_type_resolver(PyUFuncObject *self, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(self->userloops, key); + obj = PyDict_GetItemWithError(self->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; @@ -1813,7 +1813,7 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(self->userloops, key); + obj = PyDict_GetItemWithError(self->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index e5cf2cf8acb3..3efb02bd4a49 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -267,11 +267,11 @@ int initumath(PyObject *m) PyModule_AddObject(m, "NZERO", PyFloat_FromDouble(NPY_NZERO)); PyModule_AddObject(m, "NAN", PyFloat_FromDouble(NPY_NAN)); - s = PyDict_GetItemString(d, "divide"); + s = PyDict_GetItemString(d, "divide"); // noqa: borrowed-ref OK PyDict_SetItemString(d, "true_divide", s); - s = PyDict_GetItemString(d, "conjugate"); - s2 = PyDict_GetItemString(d, "remainder"); + s = PyDict_GetItemString(d, "conjugate"); // noqa: borrowed-ref OK + s2 = PyDict_GetItemString(d, "remainder"); // noqa: borrowed-ref OK /* Setup the array object's numerical structures with appropriate ufuncs in d*/ diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 5c2b4bdf0931..d6664d6bdfb7 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -47,7 +47,7 @@ F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) "failed"); } - value = PyDict_GetItemString(local_dict, key); + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { @@ -87,7 +87,7 @@ F2PyGetThreadLocalCallbackPtr(char *key) "F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); } - value = PyDict_GetItemString(local_dict, key); + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { @@ -365,7 +365,7 @@ fortran_getattr(PyFortranObject *fp, char *name) if (fp->dict != NULL) { // python 3.13 added PyDict_GetItemRef #if PY_VERSION_HEX < 0x030D0000 - PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); + PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); // noqa: borrowed-ref OK if (v == NULL && PyErr_Occurred()) { return NULL; } @@ -822,7 +822,7 @@ get_elsize(PyObject *obj) { } else if (PyUnicode_Check(obj)) { return PyUnicode_GET_LENGTH(obj); } else if (PySequence_Check(obj)) { - PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); + PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); // noqa: borrowed-ref OK if (fast != NULL) { Py_ssize_t i, n = PySequence_Fast_GET_SIZE(fast); int sz, elsize = 0; diff --git a/tools/ci/check_c_api_usage.sh b/tools/ci/check_c_api_usage.sh new file mode 100644 index 000000000000..9f4203284823 --- /dev/null +++ b/tools/ci/check_c_api_usage.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash +set -e + +# List of suspicious function calls: +SUSPICIOUS_FUNCS=( + "PyList_GetItem" + "PyDict_GetItem" + "PyDict_GetItemWithError" + "PyDict_GetItemString" + "PyDict_SetDefault" + "PyDict_Next" + "PyWeakref_GetObject" + "PyWeakref_GET_OBJECT" + "PyList_GET_ITEM" + "_PyDict_GetItemStringWithError" + "PySequence_Fast" +) + +# Find all C/C++ source files in the repo +ALL_FILES=$(find numpy -type f \( -name "*.c" -o -name "*.h" -o -name "*.c.src" -o -name "*.cpp" \) ! -path "*/pythoncapi-compat/*") + +# For debugging: print out file count +echo "Scanning $(echo "$ALL_FILES" | wc -l) C/C++ source files..." + +# Prepare a result file +mkdir -p .tmp +OUTPUT=$(mktemp .tmp/c_api_usage_report.XXXXXX.txt) +echo -e "Running Suspicious C API usage report workflow...\n" > $OUTPUT + +FAIL=0 + +# Scan each changed file +for file in $ALL_FILES; do + + for func in "${SUSPICIOUS_FUNCS[@]}"; do + # -n : show line number + # -P : perl-style boundaries + # (?> "$OUTPUT" + echo " -> $line" >> "$OUTPUT" + echo "Recommendation:" >> "$OUTPUT" + echo "If this use is intentional and safe, add '// noqa: borrowed-ref OK' on the same line to silence this warning." >> "$OUTPUT" + echo "Otherwise, consider replacing $func with a thread-safe API function." >> "$OUTPUT" + echo "" >> "$OUTPUT" + FAIL=1 + done <<< "$matches" + fi + done +done + +if [[ $FAIL -eq 1 ]]; then + echo "C API borrow-ref linter found issues." +else + echo "C API borrow-ref linter found no issues." > $OUTPUT +fi + +cat "$OUTPUT" +exit "$FAIL" diff --git a/tools/linter.py b/tools/linter.py index 1ce9ca763343..4b5115c97a2b 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -18,6 +18,7 @@ def run_ruff(self, fix: bool) -> tuple[int, str]: Unlike pycodestyle, ruff by itself is not capable of limiting its output to the given diff. """ + print("Running Ruff Check...") command = ["ruff", "check"] if fix: command.append("--fix") @@ -31,12 +32,35 @@ def run_ruff(self, fix: bool) -> tuple[int, str]: return res.returncode, res.stdout def run_lint(self, fix: bool) -> None: - retcode, errors = self.run_ruff(fix) - errors and print(errors) + # Ruff Linter + retcode, ruff_errors = self.run_ruff(fix) + ruff_errors and print(ruff_errors) + + if retcode: + sys.exit(retcode) + + # C API Borrowed-ref Linter + retcode, c_API_errors = self.run_check_c_api() + c_API_errors and print(c_API_errors) sys.exit(retcode) + def run_check_c_api(self) -> tuple[int, str]: + # Running borrowed ref checker + print("Running C API borrow-reference linter...") + borrowed_ref_script = os.path.join(self.repository_root, "tools", "ci", + "check_c_api_usage.sh") + borrowed_res = subprocess.run( + ["bash", borrowed_ref_script], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding="utf-8", + ) + + # Exit with non-zero if C API Check fails + return borrowed_res.returncode, borrowed_res.stdout + if __name__ == "__main__": parser = ArgumentParser() From d02611ad99488637b48f4be203f297ea7b29c95d Mon Sep 17 00:00:00 2001 From: hannah Date: Thu, 17 Jul 2025 05:43:20 -0400 Subject: [PATCH 0220/1018] Doc: use different data in stacked arrays (#29390) Improve docstring examples for dstack and column_stack --- numpy/lib/_shape_base_impl.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index 89b86c80964d..c44d603611ee 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -653,11 +653,11 @@ def column_stack(tup): -------- >>> import numpy as np >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) + >>> b = np.array((4,5,6)) >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) + array([[1, 4], + [2, 5], + [3, 6]]) """ arrays = [] @@ -713,18 +713,18 @@ def dstack(tup): -------- >>> import numpy as np >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) + >>> b = np.array((4,5,6)) >>> np.dstack((a,b)) - array([[[1, 2], - [2, 3], - [3, 4]]]) + array([[[1, 4], + [2, 5], + [3, 6]]]) >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) + >>> b = np.array([[4],[5],[6]]) >>> np.dstack((a,b)) - array([[[1, 2]], - [[2, 3]], - [[3, 4]]]) + array([[[1, 4]], + [[2, 5]], + [[3, 6]]]) """ arrs = atleast_3d(*tup) From bd5cb2a293c02b8a50864cec1a751afc54f2ff47 Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Sat, 19 Jul 2025 23:55:14 -0700 Subject: [PATCH 0221/1018] MNT: Cleanup infs handling in np.testing assertion utilities (#29321) * TST: Add failing test showing shape mismatch issue Recently, #29112 added showing first mismatches indices, but assert_array_almost_equal.compare trips it up by returning a shape different from its input, causing an IndexError: ``` <...> if invalids.ndim != 0: if flagged.ndim > 0: > positions = np.argwhere(np.asarray(~flagged))[invalids] E IndexError: boolean index did not match indexed array along axis 0; size of axis is 3 but size of corresponding boolean axis is 2 ``` (traceback shown using pytest --full-trace) * MNT: Remove assert_array_almost_equal old infs logic, but tests fail A nice cleanup (newer, similar inf handling already exists now in assert_array_compare), and resolves the shape mismatch issue. However, the removed logic was handling complex infs while the one isn't, causing the new test and an existing one (TestInterp::test_complex_interp) to now fail with RuntimeWarnings attempting to subtract the complex infs. * MNT: assert_array_compare handles all inf values assert_array_compare now tests all inf values for matching position and value, including complex infs. Fixes the failing tests. * TST: Test array_allclose behavior for inf values The behavior for real infs is the same is before. For complex infs, demonstrates that the behavior for mismatching values is now cleaner, showing a concise error message vs. previously displaying nan max errors. For complex infs with matching values, the behavior is the same as before, accepting them as equal (although internally they would now be filtered ahead of being passed to isclose, like real infs already had been). * TST: assert_allclose behavior with nans that are also infs * MNT: Extract robust_any_difference() helper to DRY --- numpy/testing/_private/utils.py | 84 ++++++++++++++++++------------- numpy/testing/tests/test_utils.py | 49 ++++++++++++++++++ 2 files changed, 98 insertions(+), 35 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index e868239e62ab..592940f3e328 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -25,7 +25,7 @@ import numpy as np import numpy.linalg._umath_linalg -from numpy import isfinite, isinf, isnan +from numpy import isfinite, isnan from numpy._core import arange, array, array_repr, empty, float32, intp, isnat, ndarray __all__ = [ @@ -758,17 +758,7 @@ def istime(x): def isvstring(x): return x.dtype.char == "T" - def func_assert_same_pos(x, y, func=isnan, hasval='nan'): - """Handling nan/inf. - - Combine results of running func on x and y, checking that they are True - at the same locations. - - """ - __tracebackhide__ = True # Hide traceback for py.test - - x_id = func(x) - y_id = func(y) + def robust_any_difference(x, y): # We include work-arounds here to handle three types of slightly # pathological ndarray subclasses: # (1) all() on fully masked arrays returns np.ma.masked, so we use != True @@ -781,10 +771,23 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): # not implement np.all(), so favor using the .all() method # We are not committed to supporting cases (2) and (3), but it's nice to # support them if possible. - result = x_id == y_id + result = x == y if not hasattr(result, "all") or not callable(result.all): result = np.bool(result) - if result.all() != True: + return result.all() != True + + def func_assert_same_pos(x, y, func=isnan, hasval='nan'): + """Handling nan/inf. + + Combine results of running func on x and y, checking that they are True + at the same locations. + + """ + __tracebackhide__ = True # Hide traceback for py.test + + x_id = func(x) + y_id = func(y) + if robust_any_difference(x_id, y_id): msg = build_err_msg( [x, y], err_msg + '\n%s location mismatch:' @@ -804,6 +807,29 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): else: return y_id + def assert_same_inf_values(x, y, infs_mask): + """ + Verify all inf values match in the two arrays + """ + __tracebackhide__ = True # Hide traceback for py.test + + if not infs_mask.any(): + return + if x.ndim > 0 and y.ndim > 0: + x = x[infs_mask] + y = y[infs_mask] + else: + assert infs_mask.all() + + if robust_any_difference(x, y): + msg = build_err_msg( + [x, y], + err_msg + '\ninf values mismatch:', + verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + try: if strict: cond = x.shape == y.shape and x.dtype == y.dtype @@ -828,12 +854,15 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan') if equal_inf: - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == +inf, - hasval='+inf') - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == -inf, - hasval='-inf') + # If equal_nan=True, skip comparing nans below for equality if they are + # also infs (e.g. inf+nanj) since that would always fail. + isinf_func = lambda xy: np.logical_and(np.isinf(xy), np.invert(flagged)) + infs_mask = func_assert_same_pos( + x, y, + func=isinf_func, + hasval='inf') + assert_same_inf_values(x, y, infs_mask) + flagged |= infs_mask elif istime(x) and istime(y): # If one is datetime64 and the other timedelta64 there is no point @@ -1183,24 +1212,9 @@ def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', """ __tracebackhide__ = True # Hide traceback for py.test from numpy._core import number, result_type - from numpy._core.fromnumeric import any as npany from numpy._core.numerictypes import issubdtype def compare(x, y): - try: - if npany(isinf(x)) or npany(isinf(y)): - xinfid = isinf(x) - yinfid = isinf(y) - if not (xinfid == yinfid).all(): - return False - # if one item, x and y is +- inf - if x.size == y.size == 1: - return x == y - x = x[~xinfid] - y = y[~yinfid] - except (TypeError, NotImplementedError): - pass - # make sure y is an inexact type to avoid abs(MIN_INT); will cause # casting of x later. dtype = result_type(y, 1.) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 243b8d420936..07f2c9da3005 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -612,6 +612,18 @@ def test_inf(self): assert_raises(AssertionError, lambda: self._assert_func(a, b)) + def test_complex_inf(self): + a = np.array([np.inf + 1.j, 2. + 1.j, 3. + 1.j]) + b = a.copy() + self._assert_func(a, b) + b[1] = 3. + 1.j + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: (2+1j) (ACTUAL), (3+1j) (DESIRED)\n' + 'Max absolute difference among violations: 1.\n') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + def test_subclass(self): a = np.array([[1., 2.], [3., 4.]]) b = np.ma.masked_array([[1., 2.], [0., 4.]], @@ -1283,11 +1295,21 @@ def test_equal_nan(self): # Should not raise: assert_allclose(a, b, equal_nan=True) + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_allclose(a, b, equal_nan=True) + b = np.array([complex(np.nan, -np.inf)]) + assert_allclose(a, b, equal_nan=True) + def test_not_equal_nan(self): a = np.array([np.nan]) b = np.array([np.nan]) assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + def test_equal_nan_default(self): # Make sure equal_nan default behavior remains unchanged. (All # of these functions use assert_array_compare under the hood.) @@ -1336,6 +1358,33 @@ def test_strict(self): with pytest.raises(AssertionError): assert_allclose(x, x.astype(np.float32), strict=True) + def test_infs(self): + a = np.array([np.inf]) + b = np.array([np.inf]) + assert_allclose(a, b) + + b = np.array([3.]) + expected_msg = 'inf location mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + b = np.array([-np.inf]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + b = np.array([complex(np.inf, 1.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + a = np.array([complex(np.inf, 1.)]) + b = np.array([complex(np.inf, 1.)]) + assert_allclose(a, b) + + b = np.array([complex(np.inf, 2.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) class TestArrayAlmostEqualNulp: From 96cf781e226900ba88592f3a20145d6af63e863a Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 15 May 2025 11:59:22 +0200 Subject: [PATCH 0222/1018] STY: Apply ruff/Perflint rule PERF102 PERF102 When using only the values of a dict use the `values()` method PERF102 When using only the keys of a dict use the `keys()` method --- benchmarks/benchmarks/bench_ufunc.py | 4 ++-- numpy/f2py/func2subr.py | 2 +- numpy/ma/testutils.py | 8 ++++---- numpy/testing/_private/utils.py | 6 +++--- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 7dc321ac2980..155e7d4f7421 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -53,7 +53,7 @@ def setup(self, ufuncname): except AttributeError: raise NotImplementedError self.args = [] - for _, aarg in get_squares_().items(): + for aarg in get_squares_().values(): arg = (aarg,) * 1 # no nin try: self.afdn(*arg) @@ -100,7 +100,7 @@ def setup(self, ufuncname): except AttributeError: raise NotImplementedError self.args = [] - for _, aarg in get_squares_().items(): + for aarg in get_squares_().values(): arg = (aarg,) * self.ufn.nin try: self.ufn(*arg) diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index 0a875006ed75..09b67f7c3085 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -77,7 +77,7 @@ def var2fixfortran(vars, a, fa=None, f90mode=None): def useiso_c_binding(rout): useisoc = False - for key, value in rout['vars'].items(): + for value in rout['vars'].values(): kind_value = value.get('kindselector', {}).get('kind') if kind_value in isoc_kindmap: return True diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py index bffcc34b759c..4840b758663c 100644 --- a/numpy/ma/testutils.py +++ b/numpy/ma/testutils.py @@ -124,7 +124,7 @@ def assert_equal(actual, desired, err_msg=''): for k, i in desired.items(): if k not in actual: raise AssertionError(f"{k} not in {actual}") - assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + assert_equal(actual[k], i, f'key={k!r}\n{err_msg}') return # Case #2: lists ..... if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): @@ -162,12 +162,12 @@ def fail_if_equal(actual, desired, err_msg='',): for k, i in desired.items(): if k not in actual: raise AssertionError(repr(k)) - fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + fail_if_equal(actual[k], i, f'key={k!r}\n{err_msg}') return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): fail_if_equal(len(actual), len(desired), err_msg) - for k in range(len(desired)): - fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + for k, i in enumerate(desired): + fail_if_equal(actual[k], i, f'item={k!r}\n{err_msg}') return if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): return fail_if_array_equal(actual, desired, err_msg) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 592940f3e328..e7bc76c584d6 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -367,13 +367,13 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): for k, i in desired.items(): if k not in actual: raise AssertionError(repr(k)) - assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', + assert_equal(actual[k], i, f'key={k!r}\n{err_msg}', verbose) return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): assert_equal(len(actual), len(desired), err_msg, verbose) - for k in range(len(desired)): - assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', + for k, i in enumerate(desired): + assert_equal(actual[k], i, f'item={k!r}\n{err_msg}', verbose) return from numpy import imag, iscomplexobj, real From e8d5153caceed246c378de72e5ebde15b1fc0996 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 15 May 2025 12:00:41 +0200 Subject: [PATCH 0223/1018] STY: Apply ruff/Perflint rule PERF403 PERF403 Use `dict.update` instead of a for-loop PERF403 Use a dictionary comprehension instead of a for-loop --- numpy/_core/code_generators/genapi.py | 3 +-- numpy/lib/introspect.py | 9 +++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index caeaf7a08532..b0036def7300 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -466,8 +466,7 @@ def _key(x): def merge_api_dicts(dicts): ret = {} for d in dicts: - for k, v in d.items(): - ret[k] = v + ret.update(dict(d.items())) return ret diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index 5526a332fead..7ea621b823c8 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -80,13 +80,14 @@ def opt_func_info(func_name=None, signature=None): sig_pattern = re.compile(signature) matching_sigs = {} for k, v in matching_funcs.items(): - matching_chars = {} - for chars, targets in v.items(): + matching_chars = { + chars: targets + for chars, targets in v.items() if any( sig_pattern.search(c) or sig_pattern.search(dtype(c).name) for c in chars - ): - matching_chars[chars] = targets + ) + } if matching_chars: matching_sigs[k] = matching_chars else: From 4eeace478850ade16d74b9fb0db96b5342f73090 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 15 May 2025 12:03:27 +0200 Subject: [PATCH 0224/1018] MNT: Enforce ruff/Perflint rules (PERF) --- ruff.toml | 54 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/ruff.toml b/ruff.toml index ed6026ed0665..98b6b0c7244d 100644 --- a/ruff.toml +++ b/ruff.toml @@ -32,6 +32,7 @@ extend-select = [ "FLY", "I", "PD", + "PERF", "E", "W", "PGH", @@ -39,32 +40,33 @@ extend-select = [ "UP", ] ignore = [ - "B006", # Do not use mutable data structures for argument defaults - "B007", # Loop control variable not used within loop body - "B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()` - "B023", # Function definition does not bind loop variable - "B028", # No explicit `stacklevel` keyword argument found - "B904", # Within an `except` clause distinguish raised exceptions from errors in exception handling - "B905", #`zip()` without an explicit `strict=` parameter - "C408", # Unnecessary `dict()` call (rewrite as a literal) - "ISC002", # Implicitly concatenated string literals over multiple lines - "PIE790", # Unnecessary `pass` statement - "PD901", # Avoid using the generic variable name `df` for DataFrames - "E241", # Multiple spaces after comma - "E265", # Block comment should start with `# ` - "E266", # Too many leading `#` before block comment - "E302", # TODO: Expected 2 blank lines, found 1 - "E402", # Module level import not at top of file - "E712", # Avoid equality comparisons to `True` or `False` - "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check - "E731", # Do not assign a `lambda` expression, use a `def` - "E741", # Ambiguous variable name - "F403", # `from ... import *` used; unable to detect undefined names - "F405", # may be undefined, or defined from star imports - "F821", # Undefined name - "F841", # Local variable is assigned to but never used - "UP015", # Unnecessary mode argument - "UP031", # TODO: Use format specifiers instead of percent format + "B006", # Do not use mutable data structures for argument defaults + "B007", # Loop control variable not used within loop body + "B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()` + "B023", # Function definition does not bind loop variable + "B028", # No explicit `stacklevel` keyword argument found + "B904", # Within an `except` clause distinguish raised exceptions from errors in exception handling + "B905", #`zip()` without an explicit `strict=` parameter + "C408", # Unnecessary `dict()` call (rewrite as a literal) + "ISC002", # Implicitly concatenated string literals over multiple lines + "PIE790", # Unnecessary `pass` statement + "PD901", # Avoid using the generic variable name `df` for DataFrames + "PERF401", # PERF401 Use a list comprehension to create a transformed list + "E241", # Multiple spaces after comma + "E265", # Block comment should start with `# ` + "E266", # Too many leading `#` before block comment + "E302", # TODO: Expected 2 blank lines, found 1 + "E402", # Module level import not at top of file + "E712", # Avoid equality comparisons to `True` or `False` + "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check + "E731", # Do not assign a `lambda` expression, use a `def` + "E741", # Ambiguous variable name + "F403", # `from ... import *` used; unable to detect undefined names + "F405", # may be undefined, or defined from star imports + "F821", # Undefined name + "F841", # Local variable is assigned to but never used + "UP015" , # Unnecessary mode argument + "UP031", # TODO: Use format specifiers instead of percent format ] [lint.per-file-ignores] From adedeb093f0254c27e46da84c1139c1f702f6bab Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 15 May 2025 23:55:18 +0200 Subject: [PATCH 0225/1018] STY: better solution for PERF102 Co-authored-by: Pieter Eendebak --- numpy/ma/testutils.py | 12 ++++++------ numpy/testing/_private/utils.py | 8 ++++---- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py index 4840b758663c..0df3b1757fd6 100644 --- a/numpy/ma/testutils.py +++ b/numpy/ma/testutils.py @@ -121,10 +121,10 @@ def assert_equal(actual, desired, err_msg=''): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(f"{k} not in {actual}") - assert_equal(actual[k], i, f'key={k!r}\n{err_msg}') + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') return # Case #2: lists ..... if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): @@ -159,15 +159,15 @@ def fail_if_equal(actual, desired, err_msg='',): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) fail_if_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(repr(k)) - fail_if_equal(actual[k], i, f'key={k!r}\n{err_msg}') + fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): fail_if_equal(len(actual), len(desired), err_msg) - for k, i in enumerate(desired): - fail_if_equal(actual[k], i, f'item={k!r}\n{err_msg}') + for k in range(len(desired)): + fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') return if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): return fail_if_array_equal(actual, desired, err_msg) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index e7bc76c584d6..e4a74e69afb0 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -364,16 +364,16 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg, verbose) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(repr(k)) - assert_equal(actual[k], i, f'key={k!r}\n{err_msg}', + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', verbose) return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): assert_equal(len(actual), len(desired), err_msg, verbose) - for k, i in enumerate(desired): - assert_equal(actual[k], i, f'item={k!r}\n{err_msg}', + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', verbose) return from numpy import imag, iscomplexobj, real From c56f86d6a1c54267164cf67407db1a95b0307b20 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 29 May 2025 00:27:18 +0200 Subject: [PATCH 0226/1018] STY: taken into account reviewer's comments Co-authored-by: Joren Hammudoglu --- numpy/_core/code_generators/genapi.py | 2 +- numpy/lib/introspect.py | 9 ++++----- ruff.toml | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index b0036def7300..6a370a7dc3cd 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -466,7 +466,7 @@ def _key(x): def merge_api_dicts(dicts): ret = {} for d in dicts: - ret.update(dict(d.items())) + ret.update(d) return ret diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index 7ea621b823c8..816c79a669b9 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -80,14 +80,13 @@ def opt_func_info(func_name=None, signature=None): sig_pattern = re.compile(signature) matching_sigs = {} for k, v in matching_funcs.items(): - matching_chars = { - chars: targets - for chars, targets in v.items() + matching_chars = {} + for chars, targets in v.items(): if any( sig_pattern.search(c) or sig_pattern.search(dtype(c).name) for c in chars - ) - } + ): + matching_chars[chars] = targets # noqa: PERF403 if matching_chars: matching_sigs[k] = matching_chars else: diff --git a/ruff.toml b/ruff.toml index 98b6b0c7244d..4666efdf39bc 100644 --- a/ruff.toml +++ b/ruff.toml @@ -51,7 +51,7 @@ ignore = [ "ISC002", # Implicitly concatenated string literals over multiple lines "PIE790", # Unnecessary `pass` statement "PD901", # Avoid using the generic variable name `df` for DataFrames - "PERF401", # PERF401 Use a list comprehension to create a transformed list + "PERF401", # Use a list comprehension to create a transformed list "E241", # Multiple spaces after comma "E265", # Block comment should start with `# ` "E266", # Too many leading `#` before block comment From 5a031d9b8822551711973e6ae220cd00817c9523 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 21 Jul 2025 10:44:47 -0600 Subject: [PATCH 0227/1018] ENH: avoid thread safety issues around uses of `PySequence_Fast` (#29394) * BUG: prevent mutating multiter arguments while processing them * MAINT: also add locking for array creation from array-likes * MAINT: add lock nditer argument with a critical section * MAINT: give NO_BRACKETS variants a label argument * MAINT: more critical sections * MAINT: apply suggestions from Sam * MAINT: fix linter and GIL-enabled build compilation * MAINT: simplify error paths * Update numpy/_core/src/multiarray/iterators.c Co-authored-by: Sebastian Berg --------- Co-authored-by: Sebastian Berg --- numpy/_core/src/common/npy_pycompat.h | 52 ++++++++++++++++ numpy/_core/src/multiarray/ctors.c | 59 ++++++++++--------- numpy/_core/src/multiarray/iterators.c | 13 ++-- numpy/_core/src/multiarray/methods.c | 25 ++++---- numpy/_core/src/multiarray/multiarraymodule.c | 31 +++++----- numpy/_core/src/multiarray/nditer_pywrap.c | 38 +++++++++--- numpy/_core/tests/test_multithreading.py | 47 +++++++++++++++ 7 files changed, 200 insertions(+), 65 deletions(-) diff --git a/numpy/_core/src/common/npy_pycompat.h b/numpy/_core/src/common/npy_pycompat.h index 769b90215f2b..f751af666524 100644 --- a/numpy/_core/src/common/npy_pycompat.h +++ b/numpy/_core/src/common/npy_pycompat.h @@ -6,4 +6,56 @@ #define Npy_HashDouble _Py_HashDouble +#ifdef Py_GIL_DISABLED +// Specialized version of critical section locking to safely use +// PySequence_Fast APIs without the GIL. For performance, the argument *to* +// PySequence_Fast() is provided to the macro, not the *result* of +// PySequence_Fast(), which would require an extra test to determine if the +// lock must be acquired. +// +// These are tweaked versions of macros defined in CPython in +// pycore_critical_section.h, originally added in CPython commit baf347d91643. +// They're defined in terms of the NPY_*_CRITICAL_SECTION_NO_BRACKETS to avoid +// repition and should behave identically to the versions in CPython. Once the +// macros are expanded, The only difference relative to those versions is the +// use of public C API symbols that are equivalent to the ones used in the +// corresponding CPython definitions. +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) \ + { \ + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS( \ + original, npy_cs_fast) +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() \ + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(npy_cs_fast) \ + } + +// These macros are more flexible than the versions in the public CPython C API, +// but that comes at a cost. Here are some differences and limitations: +// +// * cs_name is a named label for the critical section. If you must nest +// critical sections, do *not* use the same name for multiple nesting +// critical sections. +// * The beginning and ending macros must happen within the same scope +// and the compiler won't necessarily enforce that. +// * The macros ending critical sections accept a named label. The label +// must match the opening critical section. +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(original, cs_name) \ + PyObject *_##cs_name##_orig_seq = (PyObject *)(original); \ + const int _##cs_name##_should_lock_cs = \ + PyList_CheckExact(_##cs_name##_orig_seq); \ + PyCriticalSection _##cs_name; \ + if (_##cs_name##_should_lock_cs) { \ + PyCriticalSection_Begin(&_##cs_name, _##cs_name##_orig_seq); \ + } +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(cs_name) \ + if (_##cs_name##_should_lock_cs) { \ + PyCriticalSection_End(&_##cs_name); \ + } +#else +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(original, cs_name) +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) { +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(cs_name) +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() } +#endif + + #endif /* NUMPY_CORE_SRC_COMMON_NPY_PYCOMPAT_H_ */ diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 4f466677c57c..498fa78118b3 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1539,7 +1539,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * This is the main code to make a NumPy array from a Python * Object. It is called from many different places. */ - PyArrayObject *arr = NULL, *ret; + PyArrayObject *arr = NULL, *ret = NULL; PyArray_Descr *dtype = NULL; coercion_cache_obj *cache = NULL; int ndim = 0; @@ -1560,12 +1560,14 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, copy = 1; } + Py_BEGIN_CRITICAL_SECTION(op); + ndim = PyArray_DiscoverDTypeAndShape( op, NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, copy, &was_copied_by__array__); if (ndim < 0) { - return NULL; + goto cleanup; } /* If the cache is NULL, then the object is considered a scalar */ @@ -1578,16 +1580,14 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, if (min_depth != 0 && ndim < min_depth) { PyErr_SetString(PyExc_ValueError, "object of too small depth for desired array"); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } if (max_depth != 0 && ndim > max_depth) { PyErr_SetString(PyExc_ValueError, "object too deep for desired array"); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } /* Got the correct parameters, but the cache may already hold the result */ @@ -1602,9 +1602,11 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, if (was_copied_by__array__ == 1) { flags = flags & ~NPY_ARRAY_ENSURECOPY; } - PyObject *res = PyArray_FromArray(arr, dtype, flags); + // PyArray_FromArray steals a reference to the dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_FromArray(arr, dtype, flags); npy_unlink_coercion_cache(cache); - return res; + goto cleanup; } else if (cache == NULL && PyArray_IsScalar(op, Void) && !(((PyVoidScalarObject *)op)->flags & NPY_ARRAY_OWNDATA) && @@ -1619,13 +1621,15 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * provide a dtype (newtype is NULL). */ assert(ndim == 0); - - return PyArray_NewFromDescrAndBase( + // PyArray_NewFromDescrAndBase steals a reference to the dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( &PyArray_Type, dtype, 0, NULL, NULL, ((PyVoidScalarObject *)op)->obval, ((PyVoidScalarObject *)op)->flags, NULL, op); + goto cleanup; } /* * If we got this far, we definitely have to create a copy, since we are @@ -1633,9 +1637,8 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, */ if (flags & NPY_ARRAY_ENSURENOCOPY) { PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } if (cache == NULL && in_descr != NULL && @@ -1662,16 +1665,18 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * have a better solution at some point): * https://github.com/pandas-dev/pandas/issues/35481 */ - return PyArray_FromScalar(op, dtype); + // PyArray_FromScalar steals a reference to dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_FromScalar(op, dtype); + goto cleanup; } /* There was no array (or array-like) passed in directly. */ if (flags & NPY_ARRAY_WRITEBACKIFCOPY) { PyErr_SetString(PyExc_TypeError, "WRITEBACKIFCOPY used for non-array input."); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } /* Create a new array and copy the data */ @@ -1681,8 +1686,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, flags&NPY_ARRAY_F_CONTIGUOUS, NULL); if (ret == NULL) { npy_free_coercion_cache(cache); - Py_DECREF(dtype); - return NULL; + goto cleanup; } if (ndim == PyArray_NDIM(ret)) { /* @@ -1699,12 +1703,10 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, assert(ndim == 0); if (PyArray_Pack(dtype, PyArray_BYTES(ret), op) < 0) { - Py_DECREF(dtype); - Py_DECREF(ret); - return NULL; + Py_CLEAR(ret); + goto cleanup; } - Py_DECREF(dtype); - return (PyObject *)ret; + goto cleanup; } assert(ndim != 0); assert(op == cache->converted_obj); @@ -1717,15 +1719,18 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, ((PyArrayObject_fields *)ret)->descr = dtype; } - int success = PyArray_AssignFromCache(ret, cache); + int succeed = PyArray_AssignFromCache(ret, cache); ((PyArrayObject_fields *)ret)->nd = out_ndim; ((PyArrayObject_fields *)ret)->descr = out_descr; - Py_DECREF(dtype); - if (success < 0) { - Py_DECREF(ret); - return NULL; + if (succeed < 0) { + Py_CLEAR(ret); } + +cleanup:; + + Py_XDECREF(dtype); + Py_END_CRITICAL_SECTION(); return (PyObject *)ret; } diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index d83f0d7a3ac3..f45328a2bb75 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -23,6 +23,7 @@ #include "item_selection.h" #include "lowlevel_strided_loops.h" #include "array_assign.h" +#include "npy_pycompat.h" #define NEWAXIS_INDEX -1 #define ELLIPSIS_INDEX -2 @@ -1476,6 +1477,7 @@ PyArray_MultiIterNew(int n, ...) return multiiter_new_impl(n, args_impl); } + static PyObject* arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, PyObject *kwds) @@ -1488,18 +1490,19 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, "keyword arguments not accepted."); return NULL; } - - fast_seq = PySequence_Fast(args, ""); // needed for pypy // noqa: borrowed-ref - manual fix needed + fast_seq = PySequence_Fast(args, ""); // noqa: borrowed-ref OK if (fast_seq == NULL) { return NULL; } + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(fast_seq) n = PySequence_Fast_GET_SIZE(fast_seq); if (n > NPY_MAXARGS) { - Py_DECREF(fast_seq); - return multiiter_wrong_number_of_args(); + ret = multiiter_wrong_number_of_args(); + } else { + ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq)); } - ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq)); Py_DECREF(fast_seq); + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() return ret; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 50f7f5f3c73b..7a9f5d83a57c 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -997,26 +997,26 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) int i; int nin, nout; PyObject *out_kwd_obj; - PyObject *fast; - PyObject **in_objs, **out_objs, *where_obj; + PyObject **out_objs, *where_obj; /* check inputs */ nin = PyTuple_Size(args); if (nin < 0) { return -1; } - fast = PySequence_Fast(args, "Could not convert object to sequence"); // noqa: borrowed-ref - manual fix needed - if (fast == NULL) { - return -1; - } - in_objs = PySequence_Fast_ITEMS(fast); for (i = 0; i < nin; ++i) { - if (PyUFunc_HasOverride(in_objs[i])) { - Py_DECREF(fast); +#if defined(PYPY_VERSION) || defined(Py_LIMITED_API) + PyObject *obj = PyTuple_GetItem(args, i); + if (obj == NULL) { + return -1; + } +#else + PyObject *obj = PyTuple_GET_ITEM(args, i); +#endif + if (PyUFunc_HasOverride(obj)) { return 1; } } - Py_DECREF(fast); if (kwds == NULL) { return 0; } @@ -1115,14 +1115,15 @@ array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kw PyErr_SetString(PyExc_TypeError, "kwargs must be a dict."); return NULL; } - types = PySequence_Fast( // noqa: borrowed-ref - manual fix needed + types = PySequence_Fast( // noqa: borrowed-ref OK types, "types argument to ndarray.__array_function__ must be iterable"); if (types == NULL) { return NULL; } - + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(types); result = array_function_method_impl(func, types, args, kwargs); + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); Py_DECREF(types); return result; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index b8ade23b6d76..732919c347b0 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -2760,15 +2760,18 @@ einsum_sub_op_from_str( static int einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) { - int ellipsis = 0, subindex = 0; + int ellipsis = 0, subindex = 0, ret = -1; npy_intp i, size; PyObject *item; - obj = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref - manual fix needed + obj = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref OK "be a list or a tuple"); if (obj == NULL) { return -1; } + + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(obj); + size = PySequence_Size(obj); for (i = 0; i < size; ++i) { @@ -2778,14 +2781,12 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) if (ellipsis) { PyErr_SetString(PyExc_ValueError, "each subscripts list may have only one ellipsis"); - Py_DECREF(obj); - return -1; + goto cleanup; } if (subindex + 3 >= subsize) { PyErr_SetString(PyExc_ValueError, "subscripts list is too long"); - Py_DECREF(obj); - return -1; + goto cleanup; } subscripts[subindex++] = '.'; subscripts[subindex++] = '.'; @@ -2800,16 +2801,14 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) PyErr_SetString(PyExc_TypeError, "each subscript must be either an integer " "or an ellipsis"); - Py_DECREF(obj); - return -1; + goto cleanup; } npy_bool bad_input = 0; if (subindex + 1 >= subsize) { PyErr_SetString(PyExc_ValueError, "subscripts list is too long"); - Py_DECREF(obj); - return -1; + goto cleanup; } if (s < 0) { @@ -2828,16 +2827,20 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) if (bad_input) { PyErr_SetString(PyExc_ValueError, "subscript is not within the valid range [0, 52)"); - Py_DECREF(obj); - return -1; + goto cleanup; } } - } + ret = subindex; + + cleanup:; + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); Py_DECREF(obj); - return subindex; + return ret; + } /* diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index 0af2ee408b4b..86198e9cd9f4 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -20,6 +20,7 @@ #include "common.h" #include "conversion_utils.h" #include "ctors.h" +#include "npy_pycompat.h" /* Functions not part of the public NumPy C API */ npy_bool npyiter_has_writeback(NpyIter *iter); @@ -588,7 +589,7 @@ npyiter_prepare_ops(PyObject *op_in, PyObject **out_owner, PyObject ***out_objs) { /* Take ownership of op_in (either a tuple/list or single element): */ if (PyTuple_Check(op_in) || PyList_Check(op_in)) { - PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); // noqa: borrowed-ref - manual fix needed + PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); // noqa: borrowed-ref OK if (op_in == NULL) { Py_DECREF(op_in); return -1; @@ -719,19 +720,28 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) /* Need nop to set up workspaces */ PyObject **op_objs = NULL; - PyObject *op_in_owned = NULL; /* Sequence/object owning op_objs. */ - int nop = npyiter_prepare_ops(op_in, &op_in_owned, &op_objs); + PyObject *op_in_owned = NULL; /* Sequence/object owning op_objs. */ + PyArray_Descr **op_request_dtypes = NULL; + int pre_alloc_fail = 0; + int post_alloc_fail = 0; + int nop; + + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(op_in, nditer_cs); + + nop = npyiter_prepare_ops(op_in, &op_in_owned, &op_objs); if (nop < 0) { - goto pre_alloc_fail; + pre_alloc_fail = 1; + goto cleanup; } /* allocate workspace for Python objects (operands and dtypes) */ NPY_ALLOC_WORKSPACE(op, PyArrayObject *, 2 * 8, 2 * nop); if (op == NULL) { - goto pre_alloc_fail; + pre_alloc_fail = 1; + goto cleanup; } memset(op, 0, sizeof(PyObject *) * 2 * nop); - PyArray_Descr **op_request_dtypes = (PyArray_Descr **)(op + nop); + op_request_dtypes = (PyArray_Descr **)(op + nop); /* And other workspaces (that do not need to clean up their content) */ NPY_ALLOC_WORKSPACE(op_flags, npy_uint32, 8, nop); @@ -743,11 +753,25 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) * (NPY_ALLOC_WORKSPACE has to be done before a goto fail currently.) */ if (op_flags == NULL || op_axes_storage == NULL || op_axes == NULL) { - goto finish; + post_alloc_fail = 1; + goto cleanup; } /* op and op_flags */ if (npyiter_convert_ops(nop, op_objs, op_flags_in, op, op_flags) != 1) { + post_alloc_fail = 1; + goto cleanup; + } + +cleanup: + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(nditer_cs); + + if (pre_alloc_fail) { + goto pre_alloc_fail; + } + + if (post_alloc_fail) { goto finish; } diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 5f8a2f11ea1f..b81c989ae5c7 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -293,3 +293,50 @@ def func(index): assert expected_warning in str(ex) run_threaded(func, max_workers=10, pass_count=True, outer_iterations=5) + + +# These are all implemented using PySequence_Fast, which needs locking to be safe +def np_broadcast(arrs): + for i in range(100): + np.broadcast(arrs) + +def create_array(arrs): + for i in range(100): + np.array(arrs) + +def create_nditer(arrs): + for i in range(1000): + np.nditer(arrs) + +@pytest.mark.parametrize("kernel", (np_broadcast, create_array, create_nditer)) +def test_arg_locking(kernel): + # should complete without failing or generating an error about an array size + # changing + + b = threading.Barrier(5) + done = 0 + arrs = [] + + def read_arrs(): + nonlocal done + b.wait() + try: + kernel(arrs) + finally: + done += 1 + + def mutate_list(): + b.wait() + while done < 4: + if len(arrs) > 10: + arrs.pop(0) + elif len(arrs) <= 10: + arrs.extend([np.array([1, 2, 3]) for _ in range(1000)]) + + arrs = [np.array([1, 2, 3]) for _ in range(1000)] + + tasks = [threading.Thread(target=read_arrs) for _ in range(4)] + tasks.append(threading.Thread(target=mutate_list)) + + [t.start() for t in tasks] + [t.join() for t in tasks] From ec3ac3d31e244384856c3f1c6cc65a37ee3906f5 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 21 Jul 2025 19:01:42 +0100 Subject: [PATCH 0228/1018] TYP: Type ``MaskedArray.resize``, wrap ``NoReturn`` tests in functions (#29401) Co-authored-by: Joren Hammudoglu --- numpy/ma/core.pyi | 13 ++++- .../tests/data/reveal/lib_polynomial.pyi | 5 +- numpy/typing/tests/data/reveal/ma.pyi | 5 +- numpy/typing/tests/data/reveal/ufuncs.pyi | 57 ++++++++++++------- 4 files changed, 57 insertions(+), 23 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index ee3b90f2891d..a3d05556f7ce 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3,7 +3,16 @@ from _typeshed import Incomplete from collections.abc import Sequence -from typing import Any, Literal, NoReturn, Self, SupportsIndex, TypeAlias, overload +from typing import ( + Any, + Literal, + Never, + NoReturn, + Self, + SupportsIndex, + TypeAlias, + overload, +) from typing_extensions import TypeIs, TypeVar import numpy as np @@ -1073,7 +1082,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... def reshape(self, *s, **kwargs): ... - def resize(self, newshape, refcheck=..., order=...): ... + def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def ids(self) -> tuple[int, int]: ... def iscontiguous(self) -> bool: ... diff --git a/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/numpy/typing/tests/data/reveal/lib_polynomial.pyi index 8b0a9f3d22e7..b7cbadefc610 100644 --- a/numpy/typing/tests/data/reveal/lib_polynomial.pyi +++ b/numpy/typing/tests/data/reveal/lib_polynomial.pyi @@ -118,7 +118,10 @@ assert_type(np.polyadd(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.polysub(poly_obj, AR_i8), np.poly1d) assert_type(np.polysub(AR_f8, poly_obj), np.poly1d) -assert_type(np.polysub(AR_b, AR_b), NoReturn) + +def test_invalid_polysub() -> None: + assert_type(np.polysub(AR_b, AR_b), NoReturn) + assert_type(np.polysub(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) assert_type(np.polysub(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.polysub(AR_f8, AR_i8), npt.NDArray[np.floating]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index b5a292330208..1d44c3f8ba3c 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -1,4 +1,4 @@ -from typing import Any, Literal, TypeAlias, TypeVar, assert_type +from typing import Any, Literal, TypeAlias, TypeVar, assert_type, NoReturn import numpy as np from numpy import dtype, generic @@ -405,6 +405,9 @@ assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) assert_type(MAR_f8.cumsum(), MaskedArray[Any]) assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) +def invalid_resize() -> None: + assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] + # Masked Array addition assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index 93a8bfb15d06..0bfe4df9ad8d 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -98,26 +98,45 @@ assert_type(np.bitwise_count.identity, None) assert_type(np.bitwise_count(i8), Any) assert_type(np.bitwise_count(AR_i8), npt.NDArray[Any]) -assert_type(np.absolute.outer(), NoReturn) -assert_type(np.frexp.outer(), NoReturn) -assert_type(np.divmod.outer(), NoReturn) -assert_type(np.matmul.outer(), NoReturn) +def test_absolute_outer_invalid() -> None: + assert_type(np.absolute.outer(), NoReturn) +def test_frexp_outer_invalid() -> None: + assert_type(np.frexp.outer(), NoReturn) +def test_divmod_outer_invalid() -> None: + assert_type(np.divmod.outer(), NoReturn) +def test_matmul_outer_invalid() -> None: + assert_type(np.matmul.outer(), NoReturn) -assert_type(np.absolute.reduceat(), NoReturn) -assert_type(np.frexp.reduceat(), NoReturn) -assert_type(np.divmod.reduceat(), NoReturn) -assert_type(np.matmul.reduceat(), NoReturn) +def test_absolute_reduceat_invalid() -> None: + assert_type(np.absolute.reduceat(), NoReturn) +def test_frexp_reduceat_invalid() -> None: + assert_type(np.frexp.reduceat(), NoReturn) +def test_divmod_reduceat_invalid() -> None: + assert_type(np.divmod.reduceat(), NoReturn) +def test_matmul_reduceat_invalid() -> None: + assert_type(np.matmul.reduceat(), NoReturn) -assert_type(np.absolute.reduce(), NoReturn) -assert_type(np.frexp.reduce(), NoReturn) -assert_type(np.divmod.reduce(), NoReturn) -assert_type(np.matmul.reduce(), NoReturn) +def test_absolute_reduce_invalid() -> None: + assert_type(np.absolute.reduce(), NoReturn) +def test_frexp_reduce_invalid() -> None: + assert_type(np.frexp.reduce(), NoReturn) +def test_divmod_reduce_invalid() -> None: + assert_type(np.divmod.reduce(), NoReturn) +def test_matmul_reduce_invalid() -> None: + assert_type(np.matmul.reduce(), NoReturn) -assert_type(np.absolute.accumulate(), NoReturn) -assert_type(np.frexp.accumulate(), NoReturn) -assert_type(np.divmod.accumulate(), NoReturn) -assert_type(np.matmul.accumulate(), NoReturn) +def test_absolute_accumulate_invalid() -> None: + assert_type(np.absolute.accumulate(), NoReturn) +def test_frexp_accumulate_invalid() -> None: + assert_type(np.frexp.accumulate(), NoReturn) +def test_divmod_accumulate_invalid() -> None: + assert_type(np.divmod.accumulate(), NoReturn) +def test_matmul_accumulate_invalid() -> None: + assert_type(np.matmul.accumulate(), NoReturn) -assert_type(np.frexp.at(), NoReturn) -assert_type(np.divmod.at(), NoReturn) -assert_type(np.matmul.at(), NoReturn) +def test_frexp_at_invalid() -> None: + assert_type(np.frexp.at(), NoReturn) +def test_divmod_at_invalid() -> None: + assert_type(np.divmod.at(), NoReturn) +def test_matmul_at_invalid() -> None: + assert_type(np.matmul.at(), NoReturn) From c7a93c19765317464e3e1e301468a5c2ccd35737 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 19:13:28 +0000 Subject: [PATCH 0229/1018] MAINT: Bump github/codeql-action from 3.29.2 to 3.29.3 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.2 to 3.29.3. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/181d5eefc20863364f96762470ba6f862bdef56b...d6bbdef45e766d081b84a2def353b0055f728d3e) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index cbee65f0b713..71abe29f9d09 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/init@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/autobuild@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/analyze@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index aa3bf1aec7cc..de3d582f4750 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v2.1.27 + uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v2.1.27 with: sarif_file: results.sarif From fcb82dfe9137890326ff18920442c75bd6dc488b Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 22 Jul 2025 08:00:54 +1000 Subject: [PATCH 0230/1018] use a stable pypy release in CI --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 3452724841c3..7ac5ea673d61 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -82,7 +82,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: 'pypy3.11-nightly' + python-version: 'pypy3.11-v7.3.20' - name: Setup using scipy-openblas run: | python -m pip install -r requirements/ci_requirements.txt From d2c0c106eab6bc79193964b2eab40298c7295094 Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Tue, 22 Jul 2025 07:51:15 +0900 Subject: [PATCH 0231/1018] ENH: Show unit information in repr for datetime64("NaT") (#29396) --- .../upcoming_changes/29396.improvement.rst | 5 +++++ numpy/_core/src/multiarray/scalartypes.c.src | 21 ++++++++++++++----- numpy/_core/tests/test_datetime.py | 4 +++- 3 files changed, 24 insertions(+), 6 deletions(-) create mode 100644 doc/release/upcoming_changes/29396.improvement.rst diff --git a/doc/release/upcoming_changes/29396.improvement.rst b/doc/release/upcoming_changes/29396.improvement.rst new file mode 100644 index 000000000000..2cd3d81ad9d8 --- /dev/null +++ b/doc/release/upcoming_changes/29396.improvement.rst @@ -0,0 +1,5 @@ +Show unit information in ``__repr__`` for ``datetime64("NaT")`` +------------------------------------------------------------------ +When a `datetime64` object is "Not a Time" (NaT), its ``__repr__`` method now +includes the time unit of the datetime64 type. This makes it consistent with +the behavior of a `timedelta64` object. diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 03165b10337e..5724afda7e47 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -939,12 +939,23 @@ datetimetype_repr(PyObject *self) if (legacy_print_mode == -1) { return NULL; } - if (legacy_print_mode > 125) { - ret = PyUnicode_FromFormat("np.datetime64('%s')", iso); - } - else { - ret = PyUnicode_FromFormat("numpy.datetime64('%s')", iso); + + PyObject *meta = metastr_to_unicode(&scal->obmeta, 1); + if((scal->obval == NPY_DATETIME_NAT) && (meta != NULL)){ + if (legacy_print_mode > 125) { + ret = PyUnicode_FromFormat("np.datetime64('%s','%S')", iso, meta); + } else { + ret = PyUnicode_FromFormat("numpy.datetime64('%s','%S')", iso, meta); + } + } else { + if (legacy_print_mode > 125) { + ret = PyUnicode_FromFormat("np.datetime64('%s')", iso); + } + else { + ret = PyUnicode_FromFormat("numpy.datetime64('%s')", iso); + } } + } else { PyObject *meta = metastr_to_unicode(&scal->obmeta, 1); diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index a10ca15bc373..888bb7db293b 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -264,10 +264,12 @@ def test_datetime_scalar_construction(self): # Some basic strings and repr assert_equal(str(np.datetime64('NaT')), 'NaT') assert_equal(repr(np.datetime64('NaT')), - "np.datetime64('NaT')") + "np.datetime64('NaT','generic')") assert_equal(str(np.datetime64('2011-02')), '2011-02') assert_equal(repr(np.datetime64('2011-02')), "np.datetime64('2011-02')") + assert_equal(repr(np.datetime64('NaT').astype(np.dtype("datetime64[ns]"))), + "np.datetime64('NaT','ns')") # None gets constructed as NaT assert_equal(np.datetime64(None), np.datetime64('NaT')) From f13cf6e5748c8282a161a990cc7abd0859faaf75 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 22 Jul 2025 02:17:53 -0600 Subject: [PATCH 0232/1018] MAINT/BUG: Followups for PySequence_Fast locking (#29405) * MAINT: break up NPY_ALLOC_WORKSPACE into sub-macros that define and then init * MAINT: remove now-unneeded NO_BRACKETS critical section variants * BUG: always pass the argument to PySequence_Fast to BEGIN_CRITICAL_SECTION_SEQUENCE_FAST --- numpy/_core/src/common/npy_pycompat.h | 43 ++++++------------- numpy/_core/src/multiarray/alloc.h | 9 +++- numpy/_core/src/multiarray/iterators.c | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 13 +++--- numpy/_core/src/multiarray/nditer_pywrap.c | 18 +++++--- 5 files changed, 37 insertions(+), 48 deletions(-) diff --git a/numpy/_core/src/common/npy_pycompat.h b/numpy/_core/src/common/npy_pycompat.h index f751af666524..605833a511b7 100644 --- a/numpy/_core/src/common/npy_pycompat.h +++ b/numpy/_core/src/common/npy_pycompat.h @@ -15,45 +15,26 @@ // // These are tweaked versions of macros defined in CPython in // pycore_critical_section.h, originally added in CPython commit baf347d91643. -// They're defined in terms of the NPY_*_CRITICAL_SECTION_NO_BRACKETS to avoid -// repition and should behave identically to the versions in CPython. Once the -// macros are expanded, The only difference relative to those versions is the +// They should behave identically to the versions in CPython. Once the +// macros are expanded, the only difference relative to those versions is the // use of public C API symbols that are equivalent to the ones used in the // corresponding CPython definitions. #define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) \ { \ - NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS( \ - original, npy_cs_fast) + PyObject *_orig_seq = (PyObject *)(original); \ + const int _should_lock_cs = \ + PyList_CheckExact(_orig_seq); \ + PyCriticalSection _cs_fast; \ + if (_should_lock_cs) { \ + PyCriticalSection_Begin(&_cs_fast, _orig_seq); \ + } #define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() \ - NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(npy_cs_fast) \ - } - -// These macros are more flexible than the versions in the public CPython C API, -// but that comes at a cost. Here are some differences and limitations: -// -// * cs_name is a named label for the critical section. If you must nest -// critical sections, do *not* use the same name for multiple nesting -// critical sections. -// * The beginning and ending macros must happen within the same scope -// and the compiler won't necessarily enforce that. -// * The macros ending critical sections accept a named label. The label -// must match the opening critical section. -#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(original, cs_name) \ - PyObject *_##cs_name##_orig_seq = (PyObject *)(original); \ - const int _##cs_name##_should_lock_cs = \ - PyList_CheckExact(_##cs_name##_orig_seq); \ - PyCriticalSection _##cs_name; \ - if (_##cs_name##_should_lock_cs) { \ - PyCriticalSection_Begin(&_##cs_name, _##cs_name##_orig_seq); \ - } -#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(cs_name) \ - if (_##cs_name##_should_lock_cs) { \ - PyCriticalSection_End(&_##cs_name); \ + if (_should_lock_cs) { \ + PyCriticalSection_End(&_cs_fast); \ + } \ } #else -#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(original, cs_name) #define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) { -#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(cs_name) #define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() } #endif diff --git a/numpy/_core/src/multiarray/alloc.h b/numpy/_core/src/multiarray/alloc.h index 8cd763f971ed..bef6407a28a3 100644 --- a/numpy/_core/src/multiarray/alloc.h +++ b/numpy/_core/src/multiarray/alloc.h @@ -93,11 +93,16 @@ _npy_init_workspace( * With some caches, it may be possible to malloc/calloc very quickly in which * case we should not hesitate to replace this pattern. */ -#define NPY_ALLOC_WORKSPACE(NAME, TYPE, fixed_size, size) \ +#define NPY_DEFINE_WORKSPACE(NAME, TYPE, fixed_size) \ TYPE NAME##_static[fixed_size]; \ - TYPE *NAME; \ + TYPE *NAME; +#define NPY_INIT_WORKSPACE(NAME, TYPE, fixed_size, size) \ _npy_init_workspace((void **)&NAME, NAME##_static, (fixed_size), sizeof(TYPE), (size)) +#define NPY_ALLOC_WORKSPACE(NAME, TYPE, fixed_size, size) \ + NPY_DEFINE_WORKSPACE(NAME, TYPE, fixed_size) \ + NPY_INIT_WORKSPACE(NAME, TYPE, fixed_size, size) + static inline void _npy_free_workspace(void *buf, void *static_buf) diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index f45328a2bb75..7557662ad56c 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -1494,7 +1494,7 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, if (fast_seq == NULL) { return NULL; } - NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(fast_seq) + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(args) n = PySequence_Fast_GET_SIZE(fast_seq); if (n > NPY_MAXARGS) { ret = multiiter_wrong_number_of_args(); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 732919c347b0..3d82e6c7f448 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -2762,20 +2762,20 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) { int ellipsis = 0, subindex = 0, ret = -1; npy_intp i, size; - PyObject *item; + PyObject *item, *seq; - obj = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref OK + seq = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref OK "be a list or a tuple"); - if (obj == NULL) { + if (seq == NULL) { return -1; } NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(obj); - size = PySequence_Size(obj); + size = PySequence_Size(seq); for (i = 0; i < size; ++i) { - item = PySequence_Fast_GET_ITEM(obj, i); + item = PySequence_Fast_GET_ITEM(seq, i); /* Ellipsis */ if (item == Py_Ellipsis) { if (ellipsis) { @@ -2837,10 +2837,9 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) cleanup:; NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); - Py_DECREF(obj); + Py_DECREF(seq); return ret; - } /* diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index 86198e9cd9f4..b68f7ad9708d 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -725,8 +725,12 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) int pre_alloc_fail = 0; int post_alloc_fail = 0; int nop; + NPY_DEFINE_WORKSPACE(op, PyArrayObject *, 2 * 8); + NPY_DEFINE_WORKSPACE(op_flags, npy_uint32, 8); + NPY_DEFINE_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS); + NPY_DEFINE_WORKSPACE(op_axes, int *, 8); - NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(op_in, nditer_cs); + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(op_in); nop = npyiter_prepare_ops(op_in, &op_in_owned, &op_objs); if (nop < 0) { @@ -735,7 +739,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) } /* allocate workspace for Python objects (operands and dtypes) */ - NPY_ALLOC_WORKSPACE(op, PyArrayObject *, 2 * 8, 2 * nop); + NPY_INIT_WORKSPACE(op, PyArrayObject *, 2 * 8, 2 * nop); if (op == NULL) { pre_alloc_fail = 1; goto cleanup; @@ -744,9 +748,9 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) op_request_dtypes = (PyArray_Descr **)(op + nop); /* And other workspaces (that do not need to clean up their content) */ - NPY_ALLOC_WORKSPACE(op_flags, npy_uint32, 8, nop); - NPY_ALLOC_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS, nop * NPY_MAXDIMS); - NPY_ALLOC_WORKSPACE(op_axes, int *, 8, nop); + NPY_INIT_WORKSPACE(op_flags, npy_uint32, 8, nop); + NPY_INIT_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS, nop * NPY_MAXDIMS); + NPY_INIT_WORKSPACE(op_axes, int *, 8, nop); /* * Trying to allocate should be OK if one failed, check for error now * that we can use `goto finish` to clean up everything. @@ -763,9 +767,9 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) goto cleanup; } -cleanup: +cleanup:; - NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(nditer_cs); + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); if (pre_alloc_fail) { goto pre_alloc_fail; From 8f68377c0e90f8ff9a996e801ad0ba2c5971ec3b Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Tue, 22 Jul 2025 06:31:01 -0400 Subject: [PATCH 0233/1018] BUG: Any dtype should call `square` on `arr ** 2` (#29392) * BUG: update fast_scalar_power to handle special-case squaring for any array type except object arrays * BUG: fix missing declaration * TST: add test to ensure `arr**2` calls square for structured dtypes * STY: remove whitespace * BUG: replace new variable `is_square` with direct op comparison in `fast_scalar_power` function --- numpy/_core/src/multiarray/number.c | 8 +++++++- numpy/_core/tests/test_multiarray.py | 7 +++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index b801d7e041e2..de4012641684 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -332,6 +332,7 @@ static int fast_scalar_power(PyObject *o1, PyObject *o2, int inplace, PyObject **result) { PyObject *fastop = NULL; + if (PyLong_CheckExact(o2)) { int overflow = 0; long exp = PyLong_AsLongAndOverflow(o2, &overflow); @@ -363,7 +364,12 @@ fast_scalar_power(PyObject *o1, PyObject *o2, int inplace, PyObject **result) } PyArrayObject *a1 = (PyArrayObject *)o1; - if (!(PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1))) { + if (PyArray_ISOBJECT(a1)) { + return 1; + } + if (fastop != n_ops.square && !PyArray_ISFLOAT(a1) && !PyArray_ISCOMPLEX(a1)) { + // we special-case squaring for any array type + // gh-29388 return 1; } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 04222025883e..e7647d2e23fe 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4215,6 +4215,13 @@ def pow_for(exp, arr): assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) + def test_pow_calls_square_structured_dtype(self): + # gh-29388 + dt = np.dtype([('a', 'i4'), ('b', 'i4')]) + a = np.array([(1, 2), (3, 4)], dtype=dt) + with pytest.raises(TypeError, match="ufunc 'square' not supported"): + a ** 2 + def test_pos_array_ufunc_override(self): class A(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): From 1016f8a9abf715b5cd84440e2d27c36faf442046 Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Tue, 22 Jul 2025 12:53:49 -0700 Subject: [PATCH 0234/1018] DOC: Fix index name in notes for np.take --- numpy/_core/fromnumeric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 9d01fca8aa32..1eeae1f33a01 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -170,7 +170,7 @@ def take(a, indices, axis=None, out=None, mode='raise'): Ni, Nk = a.shape[:axis], a.shape[axis+1:] for ii in ndindex(Ni): - for kk in ndindex(Nj): + for kk in ndindex(Nk): out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] For this reason, it is equivalent to (but faster than) the following use From 436662ee6cd73e959d5eff375f3e8eec5e0be8a2 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 23 Jul 2025 13:57:43 +0100 Subject: [PATCH 0235/1018] TYP: Type ``MaskedArray.__deepcopy__`` and ``MaskedArray.argsort`` (#29418) --- numpy/__init__.pyi | 2 +- numpy/ma/core.py | 2 +- numpy/ma/core.pyi | 13 +++++++++++-- numpy/typing/tests/data/fail/ma.pyi | 2 ++ numpy/typing/tests/data/reveal/ma.pyi | 6 ++++++ numpy/typing/tests/data/reveal/ndarray_misc.pyi | 4 ++-- 6 files changed, 23 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d8c57c87cbbe..a196c000b6bc 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1721,7 +1721,7 @@ class _ArrayOrScalarCommon: order: str | Sequence[str] | None = ..., *, stable: bool | None = ..., - ) -> NDArray[Any]: ... + ) -> NDArray[intp]: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... diff --git a/numpy/ma/core.py b/numpy/ma/core.py index e7d8a20f6c6c..84e029439725 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5628,7 +5628,7 @@ def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, is used. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional The sorting algorithm used. - order : list, optional + order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index a3d05556f7ce..a4bfeb3ffab7 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1398,7 +1398,16 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... - def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... + def argsort( + self, + axis: SupportsIndex | _NoValueType = ..., + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool = False, + ) -> _MaskedArray[intp]: ... # Keep in-sync with np.ma.argmin @overload # type: ignore[override] @@ -1703,7 +1712,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # def __reduce__(self): ... - def __deepcopy__(self, memo=...): ... + def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 5dc6706ebf81..160d30f885e3 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -141,3 +141,5 @@ MAR_c //= 2 # type: ignore[misc] MAR_td64 **= 2 # type: ignore[misc] MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] + +MAR_1d_f8.argsort(axis=(1,0)) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 1d44c3f8ba3c..9aae83eda366 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -405,6 +405,12 @@ assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) assert_type(MAR_f8.cumsum(), MaskedArray[Any]) assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.__deepcopy__(), MaskedArray[np.float64]) + +assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(axis=0, kind='heap', order=('x', 'y')), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) + def invalid_resize() -> None: assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 7f0a214f8f52..b219f4e5bec2 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -70,8 +70,8 @@ assert_type(AR_f8.argmin(), np.intp) assert_type(AR_f8.argmin(axis=0), Any) assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.intp]) -assert_type(f8.argsort(), npt.NDArray[Any]) -assert_type(AR_f8.argsort(), npt.NDArray[Any]) +assert_type(f8.argsort(), npt.NDArray[np.intp]) +assert_type(AR_f8.argsort(), npt.NDArray[np.intp]) assert_type(f8.astype(np.int64).choose([()]), npt.NDArray[Any]) assert_type(AR_f8.choose([0]), npt.NDArray[Any]) From e766381f73c29c7ef4c0d7f8d58012c78914c15c Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 23 Jul 2025 16:31:36 +0100 Subject: [PATCH 0236/1018] TYP: Type ``MaskedArray.view`` (#29383) --- numpy/__init__.pyi | 4 +-- numpy/ma/core.pyi | 47 ++++++++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 12 +++++++ 3 files changed, 60 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a196c000b6bc..934fe7ff0287 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2575,14 +2575,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # (dtype: T) def view(self, /, dtype: _DTypeT | _HasDType[_DTypeT]) -> ndarray[_ShapeT_co, _DTypeT]: ... @overload # (dtype: dtype[T]) - def view(self, /, dtype: _DTypeLike[_ScalarT]) -> NDArray[_ScalarT]: ... + def view(self, /, dtype: _DTypeLike[_ScalarT]) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... @overload # (type: T) def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... @overload # (_: T) def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... @overload # (dtype: ?) def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ... - @overload # (dtype: ?, type: type[T]) + @overload # (dtype: ?, type: T) def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index a4bfeb3ffab7..205dc83fad67 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -17,6 +17,7 @@ from typing_extensions import TypeIs, TypeVar import numpy as np from numpy import ( + _HasDType, _HasDTypeWithRealAndImag, _ModeKind, _OrderKACF, @@ -76,12 +77,14 @@ from numpy._typing import ( _ArrayLikeString_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _DTypeLike, _DTypeLikeBool, _IntLike_co, _ScalarLike_co, _Shape, _ShapeLike, ) +from numpy._typing._dtype_like import _VoidDTypeLike __all__ = [ "MAError", @@ -453,7 +456,49 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... def __array_finalize__(self, obj): ... def __array_wrap__(self, obj, context=..., return_scalar=...): ... - def view(self, dtype=..., type=..., fill_value=...): ... + + @overload # () + def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... + @overload # (dtype: DTypeT) + def view( + self, + /, + dtype: _DTypeT | _HasDType[_DTypeT], + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, _DTypeT]: ... + @overload # (dtype: dtype[ScalarT]) + def view( + self, + /, + dtype: _DTypeLike[_ScalarT], + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + @overload # ([dtype: _, ]*, type: ArrayT) + def view( + self, + /, + dtype: DTypeLike | None = None, + *, + type: type[_ArrayT], + fill_value: _ScalarLike_co | None = None + ) -> _ArrayT: ... + @overload # (dtype: _, type: ArrayT) + def view(self, /, dtype: DTypeLike | None, type: type[_ArrayT], fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + @overload # (dtype: ArrayT, /) + def view(self, /, dtype: type[_ArrayT], type: None = None, fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + @overload # (dtype: ?) + def view( + self, + /, + # `_VoidDTypeLike | str | None` is like `DTypeLike` but without `_DTypeLike[Any]` to avoid + # overlaps with previous overloads. + dtype: _VoidDTypeLike | str | None, + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, dtype]: ... + def __getitem__(self, indx): ... def __setitem__(self, indx, value): ... @property diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 9aae83eda366..6cad138b9dc5 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -405,6 +405,18 @@ assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) assert_type(MAR_f8.cumsum(), MaskedArray[Any]) assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.view(), MaskedArray[np.float64]) +assert_type(MAR_f8.view(dtype=np.float32), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.dtype(np.float32)), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.float32, fill_value=0), MaskedArray[np.float32]) +assert_type(MAR_f8.view(type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(None, np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype='float32'), MaskedArray[Any]) +assert_type(MAR_f8.view(dtype='float32', type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_2d_f4.view(dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) +assert_type(MAR_2d_f4.view(dtype=np.dtype(np.float16)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) + assert_type(MAR_f8.__deepcopy__(), MaskedArray[np.float64]) assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) From 7845ef8a42eb8a87830b704b434f4939be0e7591 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 23 Jul 2025 17:18:56 +0000 Subject: [PATCH 0237/1018] MAINT: Bump github/codeql-action from 3.29.3 to 3.29.4 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.3 to 3.29.4. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/d6bbdef45e766d081b84a2def353b0055f728d3e...4e828ff8d448a8a6e532957b1811f387a63867e8) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 71abe29f9d09..1aea33f531f4 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/autobuild@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index de3d582f4750..84fbe1f03cb1 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v2.1.27 + uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v2.1.27 with: sarif_file: results.sarif From a0bb9edfb91c77b241294a5dba9e26158fb3ec1f Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 23 Jul 2025 19:22:38 +0200 Subject: [PATCH 0238/1018] DOC: Remove outdated `numpy.exceptions` compatibility note. The exceptions were removed from the global `numpy` namespace in 2.0 (#24316). --- numpy/exceptions.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/exceptions.py b/numpy/exceptions.py index 0e8688ae9eba..cf70b4a4ce3b 100644 --- a/numpy/exceptions.py +++ b/numpy/exceptions.py @@ -7,8 +7,7 @@ .. versionadded:: NumPy 1.25 - The exceptions module is new in NumPy 1.25. Older exceptions remain - available through the main NumPy namespace for compatibility. + The exceptions module is new in NumPy 1.25. .. currentmodule:: numpy.exceptions From 8b2321d03c1abce671a15f9d08a97249bd2d161d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 23 Jul 2025 18:31:05 +0100 Subject: [PATCH 0239/1018] TYP: Type ``MaskedArray.reshape`` (#29404) Co-authored-by: Joren Hammudoglu --- numpy/__init__.pyi | 1 + numpy/ma/core.pyi | 87 ++++++++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 8 +++ 3 files changed, 95 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 934fe7ff0287..302f0cdf15fc 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2468,6 +2468,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + # Keep in sync with `MaskedArray.reshape` # NOTE: reshape also accepts negative integers, so we can't use integer literals @overload # (None) def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 205dc83fad67..0a039831a7d4 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -17,9 +17,11 @@ from typing_extensions import TypeIs, TypeVar import numpy as np from numpy import ( + _AnyShapeT, _HasDType, _HasDTypeWithRealAndImag, _ModeKind, + _OrderACF, _OrderKACF, _PartitionKind, _SortKind, @@ -1126,7 +1128,90 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... - def reshape(self, *s, **kwargs): ... + + # Keep in sync with `ndarray.reshape` + # NOTE: reshape also accepts negative integers, so we can't use integer literals + @overload # (None) + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: bool | None = None) -> Self: ... + @overload # (empty_sequence) + def reshape( # type: ignore[overload-overlap] # mypy false positive + self, + shape: Sequence[Never], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[()], _DTypeT_co]: ... + @overload # (() | (int) | (int, int) | ....) # up to 8-d + def reshape( + self, + shape: _AnyShapeT, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShapeT, _DTypeT_co]: ... + @overload # (index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload # (index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int], _DTypeT_co]: ... + @overload # (index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int], _DTypeT_co]: ... + @overload # (index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int, int], _DTypeT_co]: ... + @overload # (int, *(index, ...)) + def reshape( + self, + size0: SupportsIndex, + /, + *shape: SupportsIndex, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload # (sequence[index]) + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def ids(self) -> tuple[int, int]: ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 6cad138b9dc5..f2b7aa215c7a 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -27,6 +27,7 @@ AR_LIKE_dt64: list[np.datetime64] AR_LIKE_o: list[np.object_] AR_number: NDArray[np.number] +MAR_c8: MaskedArray[np.complex64] MAR_c16: MaskedArray[np.complex128] MAR_b: MaskedArray[np.bool] MAR_f4: MaskedArray[np.float32] @@ -399,6 +400,13 @@ assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclass) assert_type(MAR_f8.round(), MaskedArray[np.float64]) assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_i8.reshape(None), MaskedArray[np.int64]) +assert_type(MAR_f8.reshape(-1), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) +assert_type(MAR_c8.reshape(2, 3, 4, 5), np.ma.MaskedArray[tuple[int, int, int, int], np.dtype[np.complex64]]) +assert_type(MAR_td64.reshape(()), np.ma.MaskedArray[tuple[()], np.dtype[np.timedelta64]]) +assert_type(MAR_s.reshape([]), np.ma.MaskedArray[tuple[()], np.dtype[np.str_]]) +assert_type(MAR_V.reshape((480, 720, 4)), np.ma.MaskedArray[tuple[int, int, int], np.dtype[np.void]]) + assert_type(MAR_f8.cumprod(), MaskedArray[Any]) assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) From eecfbc544dcaafa49003a8a2cd113ea46d763be4 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Thu, 24 Jul 2025 16:22:40 +0800 Subject: [PATCH 0240/1018] ENH: Enable RVV acceleration for auto-vectorization in RISC-V Signed-off-by: Wang Yang --- numpy/_core/meson.build | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 1b79aa39781c..ba482d0cf5a0 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1068,6 +1068,7 @@ foreach gen_mtargets : [ VSX2, VX, LSX, + RVV, ] ], [ From 66ffb4c45f366b57b43457d531eed6e256f32086 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Thu, 24 Jul 2025 17:02:52 +0800 Subject: [PATCH 0241/1018] ci: rerun workflow due to mirror sync issue --- numpy/_core/meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index ba482d0cf5a0..b4c769810ad8 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1068,7 +1068,7 @@ foreach gen_mtargets : [ VSX2, VX, LSX, - RVV, + RVV ] ], [ From 3702c9ab93c2b732b200dd19ac0670c68fb0702c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 24 Jul 2025 13:17:03 +0000 Subject: [PATCH 0242/1018] MAINT: Bump larsoner/circleci-artifacts-redirector-action Bumps [larsoner/circleci-artifacts-redirector-action](https://github.com/larsoner/circleci-artifacts-redirector-action) from 1.1.0 to 1.2.0. - [Release notes](https://github.com/larsoner/circleci-artifacts-redirector-action/releases) - [Commits](https://github.com/larsoner/circleci-artifacts-redirector-action/compare/7eafdb60666f57706a5525a2f5eb76224dc8779b...839631420e45a08af893032e5a5e8843bf47e8ff) --- updated-dependencies: - dependency-name: larsoner/circleci-artifacts-redirector-action dependency-version: 1.2.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/circleci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/circleci.yml b/.github/workflows/circleci.yml index 3c84ce3c6890..12c51735bf81 100644 --- a/.github/workflows/circleci.yml +++ b/.github/workflows/circleci.yml @@ -17,7 +17,7 @@ jobs: statuses: write steps: - name: GitHub Action step - uses: larsoner/circleci-artifacts-redirector-action@7eafdb60666f57706a5525a2f5eb76224dc8779b # master + uses: larsoner/circleci-artifacts-redirector-action@839631420e45a08af893032e5a5e8843bf47e8ff # master with: repo-token: ${{ secrets.GITHUB_TOKEN }} api-token: ${{ secrets.CIRCLE_TOKEN }} From f093acd2ef1804787b9976bd6d92bf75e8169e70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 24 Jul 2025 13:17:08 +0000 Subject: [PATCH 0243/1018] MAINT: Bump pypa/cibuildwheel from 3.0.1 to 3.1.0 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.0.1 to 3.1.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/95d2f3a92fbf80abe066b09418bbf128a8923df2...ffd835cef18fa11522f608fc0fa973b89f5ddc87) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.1.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index ce0c2e803143..dbfb0ad48960 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@95d2f3a92fbf80abe066b09418bbf128a8923df2 # v3.0.1 + - uses: pypa/cibuildwheel@ffd835cef18fa11522f608fc0fa973b89f5ddc87 # v3.1.0 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index fd2047283a1f..ca41042e1ad3 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -174,7 +174,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@95d2f3a92fbf80abe066b09418bbf128a8923df2 # v3.0.1 + uses: pypa/cibuildwheel@ffd835cef18fa11522f608fc0fa973b89f5ddc87 # v3.1.0 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 30310834a888aa10863dc57de44b547757dd4466 Mon Sep 17 00:00:00 2001 From: "Aleksandr A. Voyt" Date: Thu, 24 Jul 2025 15:18:57 +0300 Subject: [PATCH 0244/1018] BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR symlink (#29434) Ensure `TestNumpyConfig.test_configtool_pkgconfigdir` resolves `PKG_CONFIG_DIR` using `.resolve()` to match the resolved path from `_configtool.py`. --- numpy/tests/test_configtool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/tests/test_configtool.py b/numpy/tests/test_configtool.py index a9c23b5cc007..c4e9a9551c0c 100644 --- a/numpy/tests/test_configtool.py +++ b/numpy/tests/test_configtool.py @@ -35,7 +35,7 @@ def test_configtool_cflags(self): def test_configtool_pkgconfigdir(self): stdout = self.check_numpyconfig('--pkgconfigdir') - assert pathlib.Path(stdout) == PKG_CONFIG_DIR + assert pathlib.Path(stdout) == PKG_CONFIG_DIR.resolve() @pytest.mark.skipif(not IS_INSTALLED, From 439f7632ab2a828d70a30d16d3104f8c9f5c1f13 Mon Sep 17 00:00:00 2001 From: kostayScr <11485271+kostayScr@users.noreply.github.com> Date: Thu, 24 Jul 2025 17:59:28 +0300 Subject: [PATCH 0245/1018] BUG: fix datetime/timedelta hash memory leak (#29411) * BUG: fix datetime/timedelta hash memory leak * get metadata directly from scalar object Co-authored-by: Nathan Goldbaum * BUG: remove unnecessary Py_DECREF Co-authored-by: Nathan Goldbaum * BUG: remove dtype variable declaration that is not used anymore --------- Co-authored-by: Nathan Goldbaum --- numpy/_core/src/multiarray/scalartypes.c.src | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 5724afda7e47..5e3a3ba71d3e 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -3911,7 +3911,6 @@ static npy_hash_t @lname@_arrtype_hash(PyObject *obj) { PyArray_DatetimeMetaData *meta; - PyArray_Descr *dtype; npy_@lname@ val = PyArrayScalar_VAL(obj, @name@); if (val == NPY_DATETIME_NAT) { @@ -3919,10 +3918,10 @@ static npy_hash_t return PyBaseObject_Type.tp_hash(obj); } - dtype = PyArray_DescrFromScalar(obj); - meta = get_datetime_metadata_from_dtype(dtype); + meta = &((PyDatetimeScalarObject *)obj)->obmeta; - return @lname@_hash(meta, val); + npy_hash_t res = @lname@_hash(meta, val); + return res; } /**end repeat**/ From a4906a744754260e2ad166c14ca1b9ff8926cb6c Mon Sep 17 00:00:00 2001 From: Zebreus Date: Thu, 24 Jul 2025 17:32:17 +0200 Subject: [PATCH 0246/1018] BLD: allow targeting webassembly without emscripten --- numpy/_core/src/multiarray/lowlevel_strided_loops.c.src | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 0c4eb3dd9a8d..0a69a08e678e 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -850,7 +850,7 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * // Enable auto-vectorization for floating point casts with clang #if @is_native_half1@ || @is_float1@ || @is_double1@ #if @is_native_half2@ || @is_float2@ || @is_double2@ - #if defined(__clang__) && !defined(__EMSCRIPTEN__) + #if defined(__clang__) && !defined(__EMSCRIPTEN__) && !defined(__wasm__) #if __clang_major__ >= 12 _Pragma("clang fp exceptions(ignore)") #endif @@ -965,7 +965,7 @@ static GCC_CAST_OPT_LEVEL int #if @is_native_half1@ || @is_float1@ || @is_double1@ #if @is_native_half2@ || @is_float2@ || @is_double2@ - #if defined(__clang__) && !defined(__EMSCRIPTEN__) + #if defined(__clang__) && !defined(__EMSCRIPTEN__) && !defined(__wasm__) #if __clang_major__ >= 12 _Pragma("clang fp exceptions(strict)") #endif From 9812a3f701abc1030f93037a1165b2c0a9c8d101 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 24 Jul 2025 22:30:57 +0100 Subject: [PATCH 0247/1018] DOC: Correct more ndarray defaults (#29402) --- numpy/_core/_add_newdocs.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 90d33d4b810a..ed8cf50ee360 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -3080,7 +3080,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('all', """ - a.all(axis=None, out=None, keepdims=False, *, where=True) + a.all(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns True if all elements evaluate to True. @@ -3095,7 +3095,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('any', """ - a.any(axis=None, out=None, keepdims=False, *, where=True) + a.any(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns True if any of the elements of `a` evaluate to True. @@ -3303,7 +3303,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('clip', """ - a.clip(min=None, max=None, out=None, **kwargs) + a.clip(min=np._NoValue, max=np._NoValue, out=None, **kwargs) Return an array whose values are limited to ``[min, max]``. One of max or min must be given. @@ -3708,7 +3708,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('max', """ - a.max(axis=None, out=None, keepdims=False, initial=, where=True) + a.max(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the maximum along a given axis. @@ -3723,7 +3723,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('mean', """ - a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True) + a.mean(axis=None, dtype=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns the average of the array elements along given axis. @@ -3738,7 +3738,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('min', """ - a.min(axis=None, out=None, keepdims=False, initial=, where=True) + a.min(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the minimum along a given axis. @@ -3768,8 +3768,8 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('prod', """ - a.prod(axis=None, dtype=None, out=None, keepdims=False, - initial=1, where=True) + a.prod(axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue) Return the product of the array elements over the given axis @@ -4240,7 +4240,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('std', """ - a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True, mean=np._NoValue) + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) Returns the standard deviation of the array elements along given axis. @@ -4255,7 +4255,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('sum', """ - a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True) + a.sum(axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the sum of the array elements over the given axis. @@ -4518,7 +4518,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('var', """ - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True, mean=np._NoValue) + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) Returns the variance of the array elements, along given axis. From ec816336bcc042794418693508e678164afd3b25 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 24 Jul 2025 16:34:43 -0600 Subject: [PATCH 0248/1018] MAINT: Update main after 2.3.2 release. - Forward port 2.3.2-changelog.rst, - Forward port 2.3.2-notes.rst, - Update release.rst [skip azp] [skip cirrus] [skip actions] --- doc/changelog/2.3.2-changelog.rst | 38 ++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.3.2-notes.rst | 56 ++++++++++++++++++++++++++++++ 3 files changed, 95 insertions(+) create mode 100644 doc/changelog/2.3.2-changelog.rst create mode 100644 doc/source/release/2.3.2-notes.rst diff --git a/doc/changelog/2.3.2-changelog.rst b/doc/changelog/2.3.2-changelog.rst new file mode 100644 index 000000000000..5c893a510ae7 --- /dev/null +++ b/doc/changelog/2.3.2-changelog.rst @@ -0,0 +1,38 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Joren Hammudoglu +* Maanas Arora +* Marco Edward Gorelli +* Matti Picus +* Nathan Goldbaum +* Sebastian Berg +* kostayScr + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29256 `__: MAINT: Prepare 2.3.x for further development +* `#29283 `__: TYP: Work around a mypy issue with bool arrays (#29248) +* `#29284 `__: BUG: fix fencepost error in StringDType internals +* `#29287 `__: BUG: handle case in mapiter where descriptors might get replaced... +* `#29350 `__: BUG: Fix shape error path in array-interface +* `#29412 `__: BUG: Allow reading non-npy files in npz and add test +* `#29413 `__: TST: Avoid uninitialized values in test (#29341) +* `#29414 `__: BUG: Fix reference leakage for output arrays in reduction functions +* `#29415 `__: BUG: fix casting issue in center, ljust, rjust, and zfill (#29369) +* `#29416 `__: TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray``... +* `#29417 `__: BUG: Any dtype should call ``square`` on ``arr \*\* 2`` (#29392) +* `#29424 `__: MAINT: use a stable pypy release in CI +* `#29425 `__: MAINT: Support python 314rc1 +* `#29429 `__: MAINT: Update highway to match main. +* `#29430 `__: BLD: use github to build macos-arm64 wheels with OpenBLAS and... +* `#29437 `__: BUG: fix datetime/timedelta hash memory leak (#29411) diff --git a/doc/source/release.rst b/doc/source/release.rst index 59e6dd07b002..3af644a57562 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.4.0 + 2.3.2 2.3.1 2.3.0 2.2.6 diff --git a/doc/source/release/2.3.2-notes.rst b/doc/source/release/2.3.2-notes.rst new file mode 100644 index 000000000000..2acc400c89fe --- /dev/null +++ b/doc/source/release/2.3.2-notes.rst @@ -0,0 +1,56 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.2 Release Notes +========================= + +The NumPy 2.3.2 release is a patch release with a number of bug fixes and +maintenance updates. The highlights are: + +- Wheels for Python 3.14.0rc1 +- PyPy updated to the latest stable release +- OpenBLAS updated to 0.3.30 + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Joren Hammudoglu +* Maanas Arora +* Marco Edward Gorelli +* Matti Picus +* Nathan Goldbaum +* Sebastian Berg +* kostayScr + + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29256 `__: MAINT: Prepare 2.3.x for further development +* `#29283 `__: TYP: Work around a mypy issue with bool arrays (#29248) +* `#29284 `__: BUG: fix fencepost error in StringDType internals +* `#29287 `__: BUG: handle case in mapiter where descriptors might get replaced... +* `#29350 `__: BUG: Fix shape error path in array-interface +* `#29412 `__: BUG: Allow reading non-npy files in npz and add test +* `#29413 `__: TST: Avoid uninitialized values in test (#29341) +* `#29414 `__: BUG: Fix reference leakage for output arrays in reduction functions +* `#29415 `__: BUG: fix casting issue in center, ljust, rjust, and zfill (#29369) +* `#29416 `__: TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray``... +* `#29417 `__: BUG: Any dtype should call ``square`` on ``arr \*\* 2`` (#29392) +* `#29424 `__: MAINT: use a stable pypy release in CI +* `#29425 `__: MAINT: Support python 314rc1 +* `#29429 `__: MAINT: Update highway to match main. +* `#29430 `__: BLD: use github to build macos-arm64 wheels with OpenBLAS and... +* `#29437 `__: BUG: fix datetime/timedelta hash memory leak (#29411) + + From 7c00e3ea28a556a9d6fcc69baac67d7896fbc0e1 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 25 Jul 2025 08:02:18 -0600 Subject: [PATCH 0249/1018] MAINT: Add Python 3.14 to classifier. [skip azp] [skip cirrus] [skip actions] --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index b0e58705ebd1..f9cfeadee599 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,6 +29,7 @@ classifiers = [ 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: 3.13', + 'Programming Language :: Python :: 3.14', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Software Development', From c1bd73c9066db667d7b9138e1d3cb0c8db413559 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 25 Jul 2025 09:09:12 -0600 Subject: [PATCH 0250/1018] DOC: Update RELEASE_WALKTHROUGH.rst Update the release documentation to reflect current practice. [skip azp] [skip cirrus] [skip actions] --- doc/RELEASE_WALKTHROUGH.rst | 67 +++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 36 deletions(-) diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index 6d2194b5c4e6..c80c7d0463eb 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -26,18 +26,16 @@ Prior to release Add/drop Python versions ------------------------ -When adding or dropping Python versions, three files need to be edited: +When adding or dropping Python versions, two files need to be edited: - .github/workflows/wheels.yml # for github cibuildwheel -- tools/ci/cirrus_wheels.yml # for cibuildwheel aarch64/arm64 builds - pyproject.toml # for classifier and minimum version check. Make these changes in an ordinary PR against main and backport if necessary. Add ``[wheel build]`` at the end of the title line of the commit summary so that wheel builds will be run to test the changes. We currently release wheels for new Python versions after the first Python rc once manylinux and -cibuildwheel support it. For Python 3.11 we were able to release within a week -of the rc1 announcement. +cibuildwheel support it. Backport pull requests @@ -46,6 +44,7 @@ Backport pull requests Changes that have been marked for this release must be backported to the maintenance/2.1.x branch. + Update 2.1.0 milestones ----------------------- @@ -79,7 +78,8 @@ commit message might be something like:: Set the release version ----------------------- -Check the ``pyproject.toml`` file and set the release version if needed:: +Check the ``pyproject.toml`` file and set the release version and update the +classifier if needed:: $ gvim pyproject.toml @@ -89,7 +89,7 @@ Check the ``pavement.py`` and ``doc/source/release.rst`` files Check that the ``pavement.py`` file points to the correct release notes. It should have been updated after the last release, but if not, fix it now. Also make -sure that the notes have an entry in the ``release.rst`` file:: +sure that the release notes have an entry in the ``release.rst`` file:: $ gvim pavement.py doc/source/release.rst @@ -102,13 +102,12 @@ The changelog is generated using the changelog tool:: $ spin changelog $GITHUB v2.0.0..maintenance/2.1.x > doc/changelog/2.1.0-changelog.rst where ``GITHUB`` contains your GitHub access token. The text will need to be -checked for non-standard contributor names and dependabot entries removed. It -is also a good idea to remove any links that may be present in the PR titles -as they don't translate well to markdown, replace them with monospaced text. The -non-standard contributor names should be fixed by updating the ``.mailmap`` -file, which is a lot of work. It is best to make several trial runs before -reaching this point and ping the malefactors using a GitHub issue to get the -needed information. +checked for non-standard contributor names. It is also a good idea to remove +any links that may be present in the PR titles as they don't translate well to +markdown, replace them with monospaced text. The non-standard contributor names +should be fixed by updating the ``.mailmap`` file, which is a lot of work. It +is best to make several trial runs before reaching this point and ping the +malefactors using a GitHub issue to get the needed information. Finish the release notes @@ -170,26 +169,23 @@ If you need to delete the tag due to error:: --------------- Tagging the build at the beginning of this process will trigger a wheel build -via cibuildwheel and upload wheels and an sdist to the staging repo. The CI run -on github actions (for all x86-based and macOS arm64 wheels) takes about 1 1/4 -hours. The CI runs on cirrus (for aarch64 and M1) take less time. You can check -for uploaded files at the `staging repository`_, but note that it is not -closely synched with what you see of the running jobs. +via cibuildwheel and upload wheels and an sdist to the staging repo. All wheels +are currently built on GitHub actions and take about 1 1/4 hours to build. If you wish to manually trigger a wheel build, you can do so: -- On github actions -> `Wheel builder`_ there is a "Run workflow" button, click +- On GitHub actions -> `Wheel builder`_ there is a "Run workflow" button, click on it and choose the tag to build -- On Cirrus we don't currently have an easy way to manually trigger builds and - uploads. -If a wheel build fails for unrelated reasons, you can rerun it individually: +If some wheel builds fail for unrelated reasons, you can re-run them: + +- On GitHub actions select `Wheel builder`_ click on the task that contains + the build you want to re-run, it will have the tag as the branch. On the + upper right will be a re-run button, hit it and select "re-run failed" -- On github actions select `Wheel builder`_ click on the commit that contains - the build you want to rerun. On the left there is a list of wheel builds, - select the one you want to rerun and on the resulting page hit the - counterclockwise arrows button. -- On cirrus, log into cirrusci, look for the v2.1.0 tag and rerun the failed jobs. +If some wheels fail to upload to anaconda, you can select those builds in the +`Wheel builder`_ and manually download the build artifact. This is a temporary +workaround, but sometimes the quickest way to get a release out. .. _`staging repository`: https://anaconda.org/multibuild-wheels-staging/numpy/files .. _`Wheel builder`: https://github.com/numpy/numpy/actions/workflows/wheels.yml @@ -218,20 +214,19 @@ file is updated for continued development:: 5. Upload to PyPI ----------------- -Upload to PyPI using ``twine``. A recent version of ``twine`` of is needed -after recent PyPI changes, version ``3.4.1`` was used here:: +Upload to PyPI using ``twine``:: $ cd ../numpy $ twine upload release/installers/*.whl $ twine upload release/installers/*.gz # Upload last. -If one of the commands breaks in the middle, you may need to selectively upload -the remaining files because PyPI does not allow the same file to be uploaded -twice. The source file should be uploaded last to avoid synchronization -problems that might occur if pip users access the files while this is in -process, causing pip to build from source rather than downloading a binary -wheel. PyPI only allows a single source distribution, here we have -chosen the zip archive. +The source file should be uploaded last to avoid synchronization problems that +might occur if pip users access the files while this is in process, causing pip +to build from source rather than downloading a binary wheel. PyPI only allows a +single source distribution, here we have chosen the gz version. If the +uploading breaks because of network related reasons, you can try re-running the +commands, possibly after a fix. Twine will now handle the error generated by +PyPI when the same file is uploaded twice. 6. Upload files to GitHub From 3203001d84ad387253697f1ba4af805093dc338e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 27 Jul 2025 15:39:25 -0600 Subject: [PATCH 0251/1018] MAINT: Replace pavement.py Replace `pavement.py` functionality with a `write_release.py` script. This gets rid of a paver dependency and simplifies the release process. - Add tools/write_release.py - Remove pavement.py [skip azp] [skip cirrus] [skip actions] --- doc/RELEASE_WALKTHROUGH.rst | 12 +- pavement.py | 184 -------------------------- requirements/release_requirements.txt | 3 - tools/write_release.py | 121 +++++++++++++++++ 4 files changed, 126 insertions(+), 194 deletions(-) delete mode 100644 pavement.py create mode 100644 tools/write_release.py diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index c80c7d0463eb..a3787ae95ee5 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -84,14 +84,12 @@ classifier if needed:: $ gvim pyproject.toml -Check the ``pavement.py`` and ``doc/source/release.rst`` files --------------------------------------------------------------- +Check the ``doc/source/release.rst`` file +----------------------------------------- -Check that the ``pavement.py`` file points to the correct release notes. It should -have been updated after the last release, but if not, fix it now. Also make -sure that the release notes have an entry in the ``release.rst`` file:: +make sure that the release notes have an entry in the ``release.rst`` file:: - $ gvim pavement.py doc/source/release.rst + $ gvim doc/source/release.rst Generate the changelog @@ -208,7 +206,7 @@ Anaconda staging directory using the ``tools/download-wheels.py`` script:: This needs to be done after all installers are downloaded, but before the pavement file is updated for continued development:: - $ paver write_release + $ python write_release 2.1.0 5. Upload to PyPI diff --git a/pavement.py b/pavement.py deleted file mode 100644 index 369b8703b0ba..000000000000 --- a/pavement.py +++ /dev/null @@ -1,184 +0,0 @@ -r""" -This paver file is intended to help with the release process as much as -possible. It relies on virtualenv to generate 'bootstrap' environments as -independent from the user system as possible (e.g. to make sure the sphinx doc -is built against the built numpy, not an installed one). - -Building changelog + notes -========================== - -Assumes you have git and the binaries/tarballs in installers/:: - - paver write_release - paver write_note - -This automatically put the checksum into README.rst, and writes the Changelog. - -TODO -==== - - the script is messy, lots of global variables - - make it more easily customizable (through command line args) - - missing targets: install & test, sdist test, debian packaging - - fix bdist_mpkg: we build the same source twice -> how to make sure we use - the same underlying python for egg install in venv and for bdist_mpkg -""" -import hashlib -import os -import textwrap - -# The paver package needs to be installed to run tasks -import paver -from paver.easy import Bunch, options, sh, task - -#----------------------------------- -# Things to be changed for a release -#----------------------------------- - -# Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.4.0-notes.rst' - - -#------------------------------------------------------- -# Hardcoded build/install dirs, virtualenv options, etc. -#------------------------------------------------------- - -# Where to put the release installers -options(installers=Bunch(releasedir="release", - installersdir=os.path.join("release", "installers")),) - - -#------------- -# README stuff -#------------- - -def _compute_hash(idirs, hashfunc): - """Hash files using given hashfunc. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - hashfunc : hash function - Function to be used to hash the files. - - """ - released = paver.path.path(idirs).listdir() - checksums = [] - for fpath in sorted(released): - with open(fpath, 'rb') as fin: - fhash = hashfunc(fin.read()) - checksums.append( - f'{fhash.hexdigest()} {os.path.basename(fpath)}') - return checksums - - -def compute_md5(idirs): - """Compute md5 hash of files in idirs. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - - """ - return _compute_hash(idirs, hashlib.md5) - - -def compute_sha256(idirs): - """Compute sha256 hash of files in idirs. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - - """ - # better checksum so gpg signed README.rst containing the sums can be used - # to verify the binaries instead of signing all binaries - return _compute_hash(idirs, hashlib.sha256) - - -def write_release_task(options, filename='README'): - """Append hashes of release files to release notes. - - This appends file hashes to the release notes and creates - four README files of the result in various formats: - - - README.rst - - README.rst.gpg - - README.md - - README.md.gpg - - The md file are created using `pandoc` so that the links are - properly updated. The gpg files are kept separate, so that - the unsigned files may be edited before signing if needed. - - Parameters - ---------- - options : - Set by ``task`` decorator. - filename : str - Filename of the modified notes. The file is written - in the release directory. - - """ - idirs = options.installers.installersdir - notes = paver.path.path(RELEASE_NOTES) - rst_readme = paver.path.path(filename + '.rst') - md_readme = paver.path.path(filename + '.md') - - # append hashes - with open(rst_readme, 'w') as freadme: - with open(notes) as fnotes: - freadme.write(fnotes.read()) - - freadme.writelines(textwrap.dedent( - """ - Checksums - ========= - - MD5 - --- - :: - - """)) - freadme.writelines([f' {c}\n' for c in compute_md5(idirs)]) - - freadme.writelines(textwrap.dedent( - """ - SHA256 - ------ - :: - - """)) - freadme.writelines([f' {c}\n' for c in compute_sha256(idirs)]) - - # generate md file using pandoc before signing - sh(f"pandoc -s -o {md_readme} {rst_readme}") - - # Sign files - if hasattr(options, 'gpg_key'): - cmd = f'gpg --clearsign --armor --default_key {options.gpg_key}' - else: - cmd = 'gpg --clearsign --armor' - - sh(cmd + f' --output {rst_readme}.gpg {rst_readme}') - sh(cmd + f' --output {md_readme}.gpg {md_readme}') - - -@task -def write_release(options): - """Write the README files. - - Two README files are generated from the release notes, one in ``rst`` - markup for the general release, the other in ``md`` markup for the github - release notes. - - Parameters - ---------- - options : - Set by ``task`` decorator. - - """ - rdir = options.installers.releasedir - write_release_task(options, os.path.join(rdir, 'README')) diff --git a/requirements/release_requirements.txt b/requirements/release_requirements.txt index d23e69fa1fa8..eaa092560d2d 100644 --- a/requirements/release_requirements.txt +++ b/requirements/release_requirements.txt @@ -12,8 +12,5 @@ gitpython>=3.1.30 # uploading wheels twine -# building and notes -Paver - # uploading release documentation packaging diff --git a/tools/write_release.py b/tools/write_release.py new file mode 100644 index 000000000000..f86113acde8e --- /dev/null +++ b/tools/write_release.py @@ -0,0 +1,121 @@ +""" +Standalone script for writing release doc:: + + python tools/write_release + +Example:: + + python tools/write_release.py 1.7.0 + +Needs to be run from the root of the repository and assumes +that the output is in `release` and wheels and sdist in +`release/installers`. + +Translation from rst to md markdown requires Pandoc, you +will need to rely on your distribution to provide that. + +""" +import os +import subprocess +import textwrap +import argparse +from hashlib import md5 +from hashlib import sha256 +from pathlib import Path + +# Name of the notes directory +NOTES_DIR = "doc/source/release" +# Name of the output directory +OUTPUT_DIR = "release" +# Output base name, `.rst` or `.md` will be appended +OUTPUT_FILE = "README" + +def compute_hash(wheel_dir, hash_func): + """ + Compute hashes of files in wheel_dir. + + Parameters + ---------- + wheel_dir: str + Path to wheel directory from repo root. + hash_func: function + Hash function, i.e., md5, sha256, etc. + + Returns + ------- + list_of_strings: list + List of of strings. Each string is the hash + followed by the file basename. + + """ + released = os.listdir(wheel_dir) + checksums = [] + for fn in sorted(released): + fn_path = Path(f"{wheel_dir}/{fn}") + with open(fn_path, 'rb') as f: + m = hash_func(f.read()) + checksums.append(f"{m.hexdigest()} {fn}") + return checksums + + +def write_release(version): + """ + Copy the -notes.rst file to the OUTPUT_DIR, append + the md5 and sha256 hashes of the wheels and sdist, and produce + README.rst and README.md files. + + Parameters + ---------- + version: str + Release version, e.g., '2.3.2', etc. + + Returns + ------- + None. + + """ + notes = Path(f"{NOTES_DIR}/{version}-notes.rst") + wheel_dir = Path(f"{OUTPUT_DIR}/installers") + target_md = Path(f"{OUTPUT_DIR}/{OUTPUT_FILE}.md") + target_rst = Path(f"{OUTPUT_DIR}/{OUTPUT_FILE}.rst") + + os.system(f"cp {notes} {target_rst}") + + with open(str(target_rst), 'a') as f: + f.writelines(textwrap.dedent( + """ + Checksums + ========= + + MD5 + --- + :: + + """)) + f.writelines([f' {c}\n' for c in compute_hash(wheel_dir, md5)]) + + f.writelines(textwrap.dedent( + """ + SHA256 + ------ + :: + + """)) + f.writelines([f' {c}\n' for c in compute_hash(wheel_dir, sha256)]) + + # translate README.rst to md for posting on GitHub + rst_to_md = subprocess.Popen( + ["pandoc", "-s", "-o", f"{target_md}", f"{target_rst}", "--wrap=preserve"], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, error = rst_to_md.communicate() + if not rst_to_md.returncode == 0: + raise RuntimeError(f"{error} failed") + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + "version", + help="NumPy version of the release, e.g. 2.3.2, etc.") + + args = parser.parse_args() + write_release(args.version) From d9acbceec552ecf6e737821b3c2385cfef590b77 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 28 Jul 2025 11:33:55 -0600 Subject: [PATCH 0252/1018] MAINT: Update tools/write_release.py Co-authored-by: Joren Hammudoglu --- tools/write_release.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/tools/write_release.py b/tools/write_release.py index f86113acde8e..5e976fe564f4 100644 --- a/tools/write_release.py +++ b/tools/write_release.py @@ -52,8 +52,7 @@ def compute_hash(wheel_dir, hash_func): checksums = [] for fn in sorted(released): fn_path = Path(f"{wheel_dir}/{fn}") - with open(fn_path, 'rb') as f: - m = hash_func(f.read()) + m = hash_func(fn_path.read_bytes()) checksums.append(f"{m.hexdigest()} {fn}") return checksums @@ -74,10 +73,10 @@ def write_release(version): None. """ - notes = Path(f"{NOTES_DIR}/{version}-notes.rst") - wheel_dir = Path(f"{OUTPUT_DIR}/installers") - target_md = Path(f"{OUTPUT_DIR}/{OUTPUT_FILE}.md") - target_rst = Path(f"{OUTPUT_DIR}/{OUTPUT_FILE}.rst") + notes = Path(NOTES_DIR) / f"{version}-notes.rst" + wheel_dir = Path(OUTPUT_DIR) / "installers" + target_md = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.md" + target_rst = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.rst" os.system(f"cp {notes} {target_rst}") @@ -104,12 +103,10 @@ def write_release(version): f.writelines([f' {c}\n' for c in compute_hash(wheel_dir, sha256)]) # translate README.rst to md for posting on GitHub - rst_to_md = subprocess.Popen( - ["pandoc", "-s", "-o", f"{target_md}", f"{target_rst}", "--wrap=preserve"], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output, error = rst_to_md.communicate() - if not rst_to_md.returncode == 0: - raise RuntimeError(f"{error} failed") + subprocess.run( + ["pandoc", "-s", "-o", str(target_md), str(target_rst), "--wrap=preserve"], + check=True, + ) if __name__ == '__main__': parser = argparse.ArgumentParser() From 6bab2a5606094b47f96d9047caa67565793561ab Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 28 Jul 2025 11:57:44 -0600 Subject: [PATCH 0253/1018] MAINT: Fix linting errors. --- tools/write_release.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/write_release.py b/tools/write_release.py index 5e976fe564f4..7662eb7b1288 100644 --- a/tools/write_release.py +++ b/tools/write_release.py @@ -11,16 +11,15 @@ that the output is in `release` and wheels and sdist in `release/installers`. -Translation from rst to md markdown requires Pandoc, you +Translation from rst to md markdown requires Pandoc, you will need to rely on your distribution to provide that. """ +import argparse import os import subprocess import textwrap -import argparse -from hashlib import md5 -from hashlib import sha256 +from hashlib import md5, sha256 from pathlib import Path # Name of the notes directory @@ -51,7 +50,7 @@ def compute_hash(wheel_dir, hash_func): released = os.listdir(wheel_dir) checksums = [] for fn in sorted(released): - fn_path = Path(f"{wheel_dir}/{fn}") + fn_path = Path(f"{wheel_dir}/{fn}") m = hash_func(fn_path.read_bytes()) checksums.append(f"{m.hexdigest()} {fn}") return checksums @@ -77,7 +76,7 @@ def write_release(version): wheel_dir = Path(OUTPUT_DIR) / "installers" target_md = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.md" target_rst = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.rst" - + os.system(f"cp {notes} {target_rst}") with open(str(target_rst), 'a') as f: @@ -107,7 +106,8 @@ def write_release(version): ["pandoc", "-s", "-o", str(target_md), str(target_rst), "--wrap=preserve"], check=True, ) - + + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( From 5591f1109b45a482bdaea3d3e44a80212a188edd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Mon, 28 Jul 2025 21:33:18 +0200 Subject: [PATCH 0254/1018] BLD: provide explicit control over cpu-baseline detection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new `cpu-baseline-detect` feature flag that can be used to more precisely control the use of CPU baseline detection. This can be used by packages to more precisely control used SIMD code independently of compiler flags specified. The option follows typical feature semantics -- with `auto` preserving the current behavior of enabling when relevant compiler flags are found, `enabled` forcing it on based on the implicit compiler defaults, and `disabled` forcing it off. Signed-off-by: Michał Górny --- meson.options | 2 ++ meson_cpu/meson.build | 22 ++++++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/meson.options b/meson.options index b09992fe9b91..f17f9901664a 100644 --- a/meson.options +++ b/meson.options @@ -28,6 +28,8 @@ option('disable-optimization', type: 'boolean', value: false, description: 'Disable CPU optimized code (dispatch,simd,unroll...)') option('cpu-baseline', type: 'string', value: 'min', description: 'Minimal set of required CPU features') +option('cpu-baseline-detect', type: 'feature', value: 'auto', + description: 'Detect CPU baseline from the compiler flags') option('cpu-dispatch', type: 'string', value: 'max -xop -fma4', description: 'Dispatched set of additional CPU features') option('test-simd', type: 'array', diff --git a/meson_cpu/meson.build b/meson_cpu/meson.build index e5b6d0fbe7be..1c4c6eecb308 100644 --- a/meson_cpu/meson.build +++ b/meson_cpu/meson.build @@ -46,20 +46,22 @@ if get_option('disable-optimization') CPU_CONF_BASELINE = 'none' CPU_CONF_DISPATCH = 'none' else - baseline_detect = false + baseline_detect = get_option('cpu-baseline-detect').enabled() c_args = get_option('c_args') - foreach arg : c_args - foreach carch : ['-march', '-mcpu', '-xhost', '/QxHost'] - if arg.contains(carch) - message('Appending option "detect" to "cpu-baseline" due to detecting global architecture c_arg "' + arg + '"') - baseline_detect = true + if get_option('cpu-baseline-detect').auto() + foreach arg : c_args + foreach carch : ['-march', '-mcpu', '-xhost', '/QxHost'] + if arg.contains(carch) + message('Appending option "detect" to "cpu-baseline" due to detecting global architecture c_arg "' + arg + '"') + baseline_detect = true + break + endif + endforeach + if baseline_detect break endif endforeach - if baseline_detect - break - endif - endforeach + endif # The required minimal set of required CPU features. CPU_CONF_BASELINE = get_option('cpu-baseline') if baseline_detect From bb7dd1ba604ba6c1766c6ca72fc21d378609f485 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 29 Jul 2025 11:57:28 +0100 Subject: [PATCH 0255/1018] TYP: Add test which hits `np.array` constructor overload with `object: _SupportsArray[_ArrayT]` (#29428) Co-authored-by: Joren Hammudoglu --- numpy/typing/tests/data/reveal/array_constructors.pyi | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 766547e54b60..99073ac4543a 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,7 +1,7 @@ import sys from collections import deque from pathlib import Path -from typing import Any, TypeVar, assert_type +from typing import Any, Generic, TypeVar, assert_type import numpy as np import numpy.typing as npt @@ -10,12 +10,16 @@ _ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) class SubClass(npt.NDArray[_ScalarT_co]): ... +class IntoSubClass(Generic[_ScalarT_co]): + def __array__(self) -> SubClass[_ScalarT_co]: ... + i8: np.int64 A: npt.NDArray[np.float64] B: SubClass[np.float64] C: list[int] D: SubClass[np.float64 | np.int64] +E: IntoSubClass[np.float64 | np.int64] mixed_shape: tuple[int, np.int64] @@ -39,6 +43,7 @@ assert_type(np.array(B, subok=True), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) +assert_type(np.array(E, subok=True), SubClass[np.float64 | np.int64]) # https://github.com/numpy/numpy/issues/29245 assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) From c6471c59bbf1cb95b50eae91773ccc2f5fe1436e Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 29 Jul 2025 15:44:00 +0100 Subject: [PATCH 0256/1018] TYP: Type ``MaskedArray.flat`` (#29466) Co-authored-by: Joren Hammudoglu --- numpy/ma/core.pyi | 4 ++-- numpy/typing/tests/data/pass/ma.py | 2 ++ numpy/typing/tests/data/reveal/ma.pyi | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 0a039831a7d4..09edffc889a5 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -528,9 +528,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def baseclass(self) -> type[NDArray[Any]]: ... data: Any @property - def flat(self): ... + def flat(self) -> MaskedIterator: ... @flat.setter - def flat(self, value): ... + def flat(self, value: ArrayLike, /) -> None: ... @property def fill_value(self): ... @fill_value.setter diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index b9be2b2e4384..793f7097ee81 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -34,6 +34,8 @@ MAR_f.mask = AR_b MAR_f.mask = np.False_ +MAR_f.flat = [9] + # Inplace addition MAR_b += AR_LIKE_b diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index f2b7aa215c7a..4f9d971759e6 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -431,6 +431,8 @@ assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(axis=0, kind='heap', order=('x', 'y')), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) +assert_type(MAR_f8.flat, np.ma.core.MaskedIterator) + def invalid_resize() -> None: assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] From bb31564192e4789b94bd7cc4c703b2beb8ce0806 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Jul 2025 12:24:57 -0600 Subject: [PATCH 0257/1018] MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 (#29471) Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.1.0 to 3.1.2. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/ffd835cef18fa11522f608fc0fa973b89f5ddc87...9e4e50bd76b3190f55304387e333f6234823ea9b) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.1.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index dbfb0ad48960..5c1b35653d68 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@ffd835cef18fa11522f608fc0fa973b89f5ddc87 # v3.1.0 + - uses: pypa/cibuildwheel@9e4e50bd76b3190f55304387e333f6234823ea9b # v3.1.2 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index ca41042e1ad3..3fd37bb60ee5 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -174,7 +174,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@ffd835cef18fa11522f608fc0fa973b89f5ddc87 # v3.1.0 + uses: pypa/cibuildwheel@9e4e50bd76b3190f55304387e333f6234823ea9b # v3.1.2 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From d621a3162e7a14e986e2dee29bbf9ff05f7b728d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 00:49:32 +0100 Subject: [PATCH 0258/1018] TYP: Type ``MaskedArray.recordmask`` (#29467) --- numpy/ma/core.pyi | 6 +++--- numpy/typing/tests/data/fail/ma.pyi | 5 +++++ numpy/typing/tests/data/reveal/ma.pyi | 3 +++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 09edffc889a5..764138048e1b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -509,13 +509,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property - def mask(self) -> NDArray[MaskType] | MaskType: ... + def mask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... @mask.setter def mask(self, value: _ArrayLikeBool_co, /) -> None: ... @property - def recordmask(self): ... + def recordmask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... @recordmask.setter - def recordmask(self, mask): ... + def recordmask(self, mask: Never, /) -> NoReturn: ... def harden_mask(self) -> Self: ... def soften_mask(self) -> Self: ... @property diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 160d30f885e3..9354084fead0 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -17,6 +17,11 @@ AR_b: npt.NDArray[np.bool] MAR_1d_f8.shape = (3, 1) # type: ignore[assignment] MAR_1d_f8.dtype = np.bool # type: ignore[assignment] +def invalid_recordmask_setter() -> None: + # We make an inner function for this one to avoid the + # `NoReturn` causing an early exit for type checkers. + MAR_1d_f8.recordmask = [True] # type: ignore[assignment] + np.ma.min(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] np.ma.min(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] np.ma.min(MAR_1d_f8, out=1.0) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 4f9d971759e6..d708eeed45e2 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -374,6 +374,9 @@ assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) assert_type(MAR_i8.hardmask, bool) assert_type(MAR_i8.sharedmask, bool) +assert_type(MAR_i8.recordmask, np.ma.MaskType | NDArray[np.ma.MaskType]) +assert_type(MAR_2d_f4.recordmask, np.ma.MaskType | np.ndarray[tuple[int, int], np.dtype[np.ma.MaskType]]) + assert_type(MAR_2d_f4.anom(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) From e4aee500a2bf497457efd825b7d578257c0e04f5 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 13:30:15 +0100 Subject: [PATCH 0259/1018] TYP: Type ``MaskedArray.__new__`` (#29457) Co-authored-by: Joren Hammudoglu --- numpy/ma/core.pyi | 63 +++++++++++++- numpy/typing/tests/data/fail/ma.pyi | 5 ++ numpy/typing/tests/data/reveal/ma.pyi | 114 +++++++++++++++----------- 3 files changed, 132 insertions(+), 50 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 764138048e1b..824ceef18822 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -455,7 +455,68 @@ class MaskedIterator: class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: Any - def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... + @overload + def __new__( + cls, + data: _ArrayLike[_ScalarT], + mask: _ArrayLikeBool_co = nomask, + dtype: None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object, + mask: _ArrayLikeBool_co, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object, + mask: _ArrayLikeBool_co = nomask, + *, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object, + mask: _ArrayLikeBool_co = nomask, + dtype: DTypeLike | None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[Any]: ... + def __array_finalize__(self, obj): ... def __array_wrap__(self, obj, context=..., return_scalar=...): ... diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 9354084fead0..ed973462e2d4 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -148,3 +148,8 @@ MAR_td64 **= 2 # type: ignore[misc] MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] MAR_1d_f8.argsort(axis=(1,0)) # type: ignore[arg-type] + +np.ma.MaskedArray(np.array([1,2,3]), keep_mask='yes') # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1,2,3]), subok=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1,2,3]), ndim=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1,2,3]), order='Corinthian') # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index d708eeed45e2..3edd300c0b5a 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -1,14 +1,20 @@ -from typing import Any, Literal, TypeAlias, TypeVar, assert_type, NoReturn +from typing import Any, Generic, Literal, NoReturn, TypeAlias, TypeVar, assert_type import numpy as np from numpy import dtype, generic from numpy._typing import NDArray, _AnyShape _ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] -class MaskedArraySubclass(MaskedArray[np.complex128]): ... +class MaskedArraySubclass(MaskedArray[_ScalarT_co]): ... + +class IntoMaskedArraySubClass(Generic[_ScalarT_co]): + def __array__(self) -> MaskedArraySubclass[_ScalarT_co]: ... + +MaskedArraySubclassC: TypeAlias = MaskedArraySubclass[np.complex128] AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] @@ -43,7 +49,8 @@ MAR_V: MaskedArray[np.void] MAR_floating: MaskedArray[np.floating] MAR_number: MaskedArray[np.number] -MAR_subclass: MaskedArraySubclass +MAR_subclass: MaskedArraySubclassC +MAR_into_subclass: IntoMaskedArraySubClass[np.float32] MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] @@ -66,9 +73,9 @@ assert_type(np.ma.min(MAR_b, axis=0), Any) assert_type(np.ma.min(MAR_f4, axis=0), Any) assert_type(np.ma.min(MAR_b, keepdims=True), Any) assert_type(np.ma.min(MAR_f4, keepdims=True), Any) -assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.min(), np.bool) assert_type(MAR_f4.min(), np.float32) @@ -76,9 +83,9 @@ assert_type(MAR_b.min(axis=0), Any) assert_type(MAR_f4.min(axis=0), Any) assert_type(MAR_b.min(keepdims=True), Any) assert_type(MAR_f4.min(keepdims=True), Any) -assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.max(MAR_b), np.bool) assert_type(np.ma.max(MAR_f4), np.float32) @@ -86,9 +93,9 @@ assert_type(np.ma.max(MAR_b, axis=0), Any) assert_type(np.ma.max(MAR_f4, axis=0), Any) assert_type(np.ma.max(MAR_b, keepdims=True), Any) assert_type(np.ma.max(MAR_f4, keepdims=True), Any) -assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.max(), np.bool) assert_type(MAR_f4.max(), np.float32) @@ -96,9 +103,9 @@ assert_type(MAR_b.max(axis=0), Any) assert_type(MAR_f4.max(axis=0), Any) assert_type(MAR_b.max(keepdims=True), Any) assert_type(MAR_f4.max(keepdims=True), Any) -assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.ptp(MAR_b), np.bool) assert_type(np.ma.ptp(MAR_f4), np.float32) @@ -106,9 +113,9 @@ assert_type(np.ma.ptp(MAR_b, axis=0), Any) assert_type(np.ma.ptp(MAR_f4, axis=0), Any) assert_type(np.ma.ptp(MAR_b, keepdims=True), Any) assert_type(np.ma.ptp(MAR_f4, keepdims=True), Any) -assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.ptp(), np.bool) assert_type(MAR_f4.ptp(), np.float32) @@ -116,9 +123,9 @@ assert_type(MAR_b.ptp(axis=0), Any) assert_type(MAR_f4.ptp(axis=0), Any) assert_type(MAR_b.ptp(keepdims=True), Any) assert_type(MAR_f4.ptp(keepdims=True), Any) -assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.argmin(), np.intp) assert_type(MAR_f4.argmin(), np.intp) @@ -126,8 +133,8 @@ assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) assert_type(MAR_b.argmin(axis=0), Any) assert_type(MAR_f4.argmin(axis=0), Any) assert_type(MAR_b.argmin(keepdims=True), Any) -assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.argmin(MAR_b), np.intp) assert_type(np.ma.argmin(MAR_f4), np.intp) @@ -135,8 +142,8 @@ assert_type(np.ma.argmin(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) assert_type(np.ma.argmin(MAR_b, axis=0), Any) assert_type(np.ma.argmin(MAR_f4, axis=0), Any) assert_type(np.ma.argmin(MAR_b, keepdims=True), Any) -assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.argmax(), np.intp) assert_type(MAR_f4.argmax(), np.intp) @@ -144,8 +151,8 @@ assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) assert_type(MAR_b.argmax(axis=0), Any) assert_type(MAR_f4.argmax(axis=0), Any) assert_type(MAR_b.argmax(keepdims=True), Any) -assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.argmax(MAR_b), np.intp) assert_type(np.ma.argmax(MAR_f4), np.intp) @@ -153,8 +160,8 @@ assert_type(np.ma.argmax(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) assert_type(np.ma.argmax(MAR_b, axis=0), Any) assert_type(np.ma.argmax(MAR_f4, axis=0), Any) assert_type(np.ma.argmax(MAR_b, keepdims=True), Any) -assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.all(), np.bool) assert_type(MAR_f4.all(), np.bool) @@ -164,8 +171,8 @@ assert_type(MAR_b.all(axis=0, keepdims=True), MaskedArray[np.bool]) assert_type(MAR_b.all(0, None, True), MaskedArray[np.bool]) assert_type(MAR_f4.all(axis=0), np.bool | MaskedArray[np.bool]) assert_type(MAR_b.all(keepdims=True), MaskedArray[np.bool]) -assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.any(), np.bool) assert_type(MAR_f4.any(), np.bool) @@ -175,22 +182,22 @@ assert_type(MAR_b.any(axis=0, keepdims=True), MaskedArray[np.bool]) assert_type(MAR_b.any(0, None, True), MaskedArray[np.bool]) assert_type(MAR_f4.any(axis=0), np.bool | MaskedArray[np.bool]) assert_type(MAR_b.any(keepdims=True), MaskedArray[np.bool]) -assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f4.sort(), None) assert_type(MAR_f4.sort(axis=0, kind='quicksort', order='K', endwith=False, fill_value=42., stable=False), None) assert_type(np.ma.sort(MAR_f4), MaskedArray[np.float32]) -assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.sort([[0, 1], [2, 3]]), NDArray[Any]) assert_type(np.ma.sort(AR_f4), NDArray[np.float32]) assert_type(MAR_f8.take(0), np.float64) assert_type(MAR_1d.take(0), Any) assert_type(MAR_f8.take([0]), MaskedArray[np.float64]) -assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.take(f, 0), Any) assert_type(np.ma.take(f4, 0), np.float32) @@ -199,8 +206,8 @@ assert_type(np.ma.take(AR_f4, 0), np.float32) assert_type(np.ma.take(MAR_1d, 0), Any) assert_type(np.ma.take(MAR_f8, [0]), MaskedArray[np.float64]) assert_type(np.ma.take(AR_f4, [0]), MaskedArray[np.float32]) -assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.take([1], [0]), MaskedArray[Any]) assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedArray[np.float64]) @@ -391,17 +398,17 @@ assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32] assert_type(MAR_2d_f4.dot(1), MaskedArray[Any]) assert_type(MAR_2d_f4.dot([1]), MaskedArray[Any]) -assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) assert_type(MAR_f8.trace(), Any) -assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclass) +assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclassC) assert_type(MAR_f8.round(), MaskedArray[np.float64]) -assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_i8.reshape(None), MaskedArray[np.int64]) assert_type(MAR_f8.reshape(-1), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) @@ -411,10 +418,10 @@ assert_type(MAR_s.reshape([]), np.ma.MaskedArray[tuple[()], np.dtype[np.str_]]) assert_type(MAR_V.reshape((480, 720, 4)), np.ma.MaskedArray[tuple[int, int, int], np.dtype[np.void]]) assert_type(MAR_f8.cumprod(), MaskedArray[Any]) -assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.cumsum(), MaskedArray[Any]) -assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.view(), MaskedArray[np.float64]) assert_type(MAR_f8.view(dtype=np.float32), MaskedArray[np.float32]) @@ -439,6 +446,15 @@ assert_type(MAR_f8.flat, np.ma.core.MaskedIterator) def invalid_resize() -> None: assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] +assert_type(np.ma.MaskedArray(AR_f4), MaskedArray[np.float32]) +assert_type(np.ma.MaskedArray(np.array([1,2,3]), [True, True, False], np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1,2,3]), dtype=np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1,2,3]), copy=True), MaskedArray[Any]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclassC` +assert_type(np.ma.MaskedArray(MAR_subclass), MaskedArray[np.complex128]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclass[np.float32]` +assert_type(np.ma.MaskedArray(MAR_into_subclass), MaskedArray[np.float32]) + # Masked Array addition assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) @@ -780,27 +796,27 @@ assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] assert_type(MAR_f8.sum(), Any) assert_type(MAR_f8.sum(axis=0), Any) assert_type(MAR_f8.sum(keepdims=True), Any) -assert_type(MAR_f8.sum(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.sum(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.std(), Any) assert_type(MAR_f8.std(axis=0), Any) assert_type(MAR_f8.std(keepdims=True, mean=0.), Any) -assert_type(MAR_f8.std(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.std(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.var(), Any) assert_type(MAR_f8.var(axis=0), Any) assert_type(MAR_f8.var(keepdims=True, mean=0.), Any) -assert_type(MAR_f8.var(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.var(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.mean(), Any) assert_type(MAR_f8.mean(axis=0), Any) assert_type(MAR_f8.mean(keepdims=True), Any) -assert_type(MAR_f8.mean(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.mean(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.prod(), Any) assert_type(MAR_f8.prod(axis=0), Any) assert_type(MAR_f8.prod(keepdims=True), Any) -assert_type(MAR_f8.prod(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.prod(out=MAR_subclass), MaskedArraySubclassC) # MaskedArray "true" division From 75169a9cba27bd28bb19c36e8d0cff6a49f35bf0 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 13:34:37 +0100 Subject: [PATCH 0260/1018] TYP: Remove ``MaskedArray.__reduce__``, and punt on ``MaskedArray.__{eq,ne}__`` (#29470) --- numpy/ma/core.pyi | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 824ceef18822..79503caa7821 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -601,8 +601,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... def compress(self, condition, axis=..., out=...): ... - def __eq__(self, other): ... - def __ne__(self, other): ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Incomplete, /) -> Incomplete: ... + def __ne__(self, other: Incomplete, /) -> Incomplete: ... + def __ge__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __gt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __le__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] @@ -1902,7 +1906,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... # - def __reduce__(self): ... def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` From 00bbfefed97140b3c493b9154d4db62dfe2cf787 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 13:36:48 +0100 Subject: [PATCH 0261/1018] TYP: Type ``MaskedArray.__getitem__`` (#29472) --- numpy/__init__.pyi | 1 + numpy/ma/core.pyi | 16 +++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 12 ++++++++++++ 3 files changed, 28 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 302f0cdf15fc..643327f29d1b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2106,6 +2106,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, ) -> ndarray[_ShapeT, _DTypeT]: ... + # Keep in sync with `MaskedArray.__getitem__` @overload def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 79503caa7821..51038178ee6f 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -25,6 +25,7 @@ from numpy import ( _OrderKACF, _PartitionKind, _SortKind, + _ToIndices, amax, amin, bool_, @@ -54,6 +55,7 @@ from numpy import ( str_, timedelta64, unsignedinteger, + void, ) from numpy._globals import _NoValueType from numpy._typing import ( @@ -290,6 +292,7 @@ _MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] _MaskedArrayNumber_co: TypeAlias = _MaskedArray[number | np.bool] _MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] +_ArrayInt_co: TypeAlias = NDArray[integer | bool_] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] MaskType = bool_ @@ -562,7 +565,18 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): fill_value: _ScalarLike_co | None = None ) -> MaskedArray[_ShapeT_co, dtype]: ... - def __getitem__(self, indx): ... + # Keep in sync with `ndarray.__getitem__` + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self: _MaskedArray[void], indx: str, /) -> MaskedArray[_ShapeT_co, dtype]: ... + @overload + def __getitem__(self: _MaskedArray[void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[void]]: ... + def __setitem__(self, indx, value): ... @property def shape(self) -> _ShapeT_co: ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 3edd300c0b5a..344a9ef70160 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -18,6 +18,7 @@ MaskedArraySubclassC: TypeAlias = MaskedArraySubclass[np.complex128] AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] +AR_i8: NDArray[np.int64] AR_u4: NDArray[np.uint32] AR_dt64: NDArray[np.datetime64] AR_td64: NDArray[np.timedelta64] @@ -54,6 +55,7 @@ MAR_into_subclass: IntoMaskedArraySubClass[np.float32] MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] +MAR_2d_V: np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]] b: np.bool f4: np.float32 @@ -365,6 +367,16 @@ assert_type(MAR_2d_f4.baseclass, type[NDArray[Any]]) assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) +assert_type(MAR_2d_f4[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[0, 0], Any) +assert_type(MAR_2d_f4[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[..., -1], MaskedArray[np.float32]) +assert_type(MAR_2d_V['field_0'], np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_V[['field_0', 'field_1']], np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]]) + assert_type(np.ma.nomask, np.bool[Literal[False]]) assert_type(np.ma.MaskType, type[np.bool]) From d53fe4aa45f44f52835e4b1946434b1a4acc76c8 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 15:44:04 +0100 Subject: [PATCH 0262/1018] TYP: Type ``MaskedArray.fill_value`` (#29468) --- numpy/ma/core.pyi | 4 ++-- numpy/typing/tests/data/pass/ma.py | 2 ++ numpy/typing/tests/data/reveal/ma.pyi | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 51038178ee6f..d79b4695bb32 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -607,9 +607,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @flat.setter def flat(self, value: ArrayLike, /) -> None: ... @property - def fill_value(self): ... + def fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... @fill_value.setter - def fill_value(self, value=...): ... + def fill_value(self, value: _ScalarLike_co | None = None) -> None: ... get_fill_value: Any set_fill_value: Any def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 793f7097ee81..02655d7f88c7 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -34,6 +34,8 @@ MAR_f.mask = AR_b MAR_f.mask = np.False_ +MAR_i.fill_value = 0 + MAR_f.flat = [9] # Inplace addition diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 344a9ef70160..52c0dfa6ab75 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -401,6 +401,8 @@ assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[in assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) assert_type(MAR_2d_f4.anom(0, 'float16'), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_i8.fill_value, np.int64) + assert_type(MAR_b.transpose(), MaskedArray[np.bool]) assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) From 97c6b8602791e266b0e073914e25121df0bb4936 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 30 Jul 2025 08:44:48 -0700 Subject: [PATCH 0263/1018] Add .file entry to all .s SVML files --- numpy/_core/src/umath/svml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/svml b/numpy/_core/src/umath/svml index 32bf2a984207..3a713b130183 160000 --- a/numpy/_core/src/umath/svml +++ b/numpy/_core/src/umath/svml @@ -1 +1 @@ -Subproject commit 32bf2a98420762a63ab418aaa0a7d6e17eb9627a +Subproject commit 3a713b13018325451c1b939d3914ceff5ec68e19 From 19574af8decf1285cd4a5ca10af8fe81fd30f6bc Mon Sep 17 00:00:00 2001 From: Bernard Roesler Date: Wed, 30 Jul 2025 12:17:09 -0400 Subject: [PATCH 0264/1018] BUG: Always return a real dtype from linalg.cond (gh-18304) (#29333) * BUG: Always return a real dtype from linalg.cond. Addresses gh-18304. The condition number of a matrix is the product of two norms, which are always non-negative and real-valued, so the condition number itself should be non-negative and real-valued. This commit returns the proper real dtype from `linalg.cond`, and includes tests for the condition number of a complex matrix in various norms. * ENH: Change type of complex results only. This commit addresses a reviewer comment on the blanket application of `abs(r)`. It specifically ensures the return type of complex-valued matrices will be the corresponding real type. --- numpy/linalg/_linalg.py | 1 + numpy/linalg/tests/test_linalg.py | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 52a2ffb8f50b..7471db7e1fe2 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -2011,6 +2011,7 @@ def cond(x, p=None): # contain nans in the entries where inversion failed. _assert_stacked_square(x) t, result_t = _commonType(x) + result_t = _realType(result_t) # condition number is always real signature = 'D->D' if isComplexType(t) else 'd->d' with errstate(all='ignore'): invx = _umath_linalg.inv(x, signature=signature) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 8ad1c3ed6d16..4cc9ac7a5496 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -793,15 +793,28 @@ def do(self, a, b, tags): class TestCond(CondCases): - def test_basic_nonsvd(self): + @pytest.mark.parametrize('is_complex', [False, True]) + def test_basic_nonsvd(self, is_complex): # Smoketest the non-svd norms A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) + if is_complex: + # Since A is linearly scaled, the condition number should not change + A = A * (1 + 1j) assert_almost_equal(linalg.cond(A, inf), 4) assert_almost_equal(linalg.cond(A, -inf), 2 / 3) assert_almost_equal(linalg.cond(A, 1), 4) assert_almost_equal(linalg.cond(A, -1), 0.5) assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + @pytest.mark.parametrize('norm_ord', [1, -1, 2, -2, 'fro', np.inf, -np.inf]) + def test_cond_dtypes(self, dtype, norm_ord): + # Check that the condition number is computed in the same dtype + # as the input matrix + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]], dtype=dtype) + out_type = get_real_dtype(dtype) + assert_equal(linalg.cond(A, p=norm_ord).dtype, out_type) + def test_singular(self): # Singular matrices have infinite condition number for # positive norms, and negative norms shouldn't raise From b4586aea61e94589e3c62e1ac6d2adc05e5a40f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Jul 2025 18:02:32 +0000 Subject: [PATCH 0265/1018] MAINT: Bump github/codeql-action from 3.29.4 to 3.29.5 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.4 to 3.29.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/4e828ff8d448a8a6e532957b1811f387a63867e8...51f77329afa6477de8c49fc9c7046c15b9a4e79d) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 1aea33f531f4..2d5fdb08a9d4 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 + uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 + uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 + uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 84fbe1f03cb1..079813c3aea3 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v2.1.27 + uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v2.1.27 with: sarif_file: results.sarif From 46268caa719db95ab2198bcd619f0e3120d7c52e Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Thu, 31 Jul 2025 05:09:16 +0900 Subject: [PATCH 0266/1018] =?UTF-8?q?DOC:=20Clarify=20that=20`numpy.printo?= =?UTF-8?q?ptions`=20applies=20only=20to=20`ndarray`,=20not=E2=80=A6=20(#2?= =?UTF-8?q?9450)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * DOC: Clarify that `numpy.printoptions` applies only to `ndarray`, not scalars As discussed in #29409, numpy.printoptions applies only to `numpy.ndarray`, not to scalars. A Note describing this behavior has been added to the docstring. * Update numpy/_core/arrayprint.py [skip cirrus] [skip actions] [skip azp] --------- Co-authored-by: Nathan Goldbaum --- numpy/_core/arrayprint.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 2a684280610b..9eda8db40dfc 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -250,9 +250,10 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, Notes ----- - `formatter` is always reset with a call to `set_printoptions`. - Use `printoptions` as a context manager to set the values temporarily. + * ``formatter`` is always reset with a call to `set_printoptions`. + * Use `printoptions` as a context manager to set the values temporarily. + * These print options apply only to NumPy ndarrays, not to scalars. Examples -------- @@ -352,6 +353,10 @@ def get_printoptions(): For a full description of these options, see `set_printoptions`. + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + See Also -------- set_printoptions, printoptions @@ -410,6 +415,10 @@ def printoptions(*args, **kwargs): -------- set_printoptions, get_printoptions + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + """ token = _set_printoptions(*args, **kwargs) From 4126291bc7062cbe636edf810b006653b693f78d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 31 Jul 2025 09:59:40 +0100 Subject: [PATCH 0267/1018] MAINT: remove unnecessary `kwargs` update in `MaskedArray.reshape` (#29403) --- numpy/ma/core.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 84e029439725..9b705460dd85 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -4818,7 +4818,6 @@ def reshape(self, *s, **kwargs): fill_value=999999) """ - kwargs.update(order=kwargs.get('order', 'C')) result = self._data.reshape(*s, **kwargs).view(type(self)) result._update_from(self) mask = self._mask From a80542541130a2dc47eab2a6972019453cc0c518 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 31 Jul 2025 11:03:20 +0200 Subject: [PATCH 0268/1018] MAINT: Replace setting of array shape by reshape operation (#29314) This PR we replace some explicit setting of the shape of an array in favor of using reshape. Some cases are left out (e.g. masked arrays, tests to check the .shape setting works). This is because explicit shape setting is generally unsafe mutation that we would like to avoid/get rid of. --- numpy/_core/numeric.py | 5 +- numpy/_core/tests/test_einsum.py | 70 ++++++++++++--------------- numpy/_core/tests/test_multiarray.py | 22 ++++----- numpy/lib/_format_impl.py | 4 +- numpy/lib/_function_base_impl.py | 5 +- numpy/lib/tests/test_arrayterator.py | 3 +- numpy/linalg/_linalg.py | 9 ++-- numpy/linalg/tests/test_linalg.py | 9 ++-- numpy/linalg/tests/test_regression.py | 2 +- numpy/ma/tests/test_core.py | 23 +++++---- numpy/ma/tests/test_extras.py | 2 +- tools/swig/test/testFlat.py | 6 +-- 12 files changed, 74 insertions(+), 86 deletions(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 4886468f0864..ec2cbf6dd7fc 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -1106,10 +1106,9 @@ def tensordot(a, b, axes=2): An extended example taking advantage of the overloading of + and \\*: - >>> a = np.array(range(1, 9)) - >>> a.shape = (2, 2, 2) + >>> a = np.array(range(1, 9)).reshape((2, 2, 2)) >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) - >>> A.shape = (2, 2) + >>> A = A.reshape((2, 2)) >>> a; A array([[[1, 2], [3, 4]], diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 84d4af1707b6..82789eae0679 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -231,21 +231,20 @@ def __rmul__(self, other): def test_einsum_views(self): # pass-through for do_opt in [True, False]: - a = np.arange(6) - a.shape = (2, 3) + a = np.arange(6).reshape((2, 3)) b = np.einsum("...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) b = np.einsum(a, [Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) b = np.einsum("ij", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a) b = np.einsum(a, [0, 1], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a) # output is writeable whenever input is writeable @@ -256,115 +255,110 @@ def test_einsum_views(self): assert_(not b.flags['WRITEABLE']) # transpose - a = np.arange(6) - a.shape = (2, 3) + a = np.arange(6).reshape((2, 3)) b = np.einsum("ji", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.T) b = np.einsum(a, [1, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.T) # diagonal - a = np.arange(9) - a.shape = (3, 3) + a = np.arange(9).reshape((3, 3)) b = np.einsum("ii->i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i] for i in range(3)]) b = np.einsum(a, [0, 0], [0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i] for i in range(3)]) # diagonal with various ways of broadcasting an additional dimension - a = np.arange(27) - a.shape = (3, 3, 3) + a = np.arange(27).reshape((3, 3, 3)) b = np.einsum("...ii->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum("ii...->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum("...ii->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("jii->ij", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("ii...->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum("i...i->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum("i...i->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) # triple diagonal - a = np.arange(27) - a.shape = (3, 3, 3) + a = np.arange(27).reshape((3, 3, 3)) b = np.einsum("iii->i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i, i] for i in range(3)]) # swap axes - a = np.arange(24) - a.shape = (2, 3, 4) + a = np.arange(24).reshape((2, 3, 4)) b = np.einsum("ijk->jik", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.swapaxes(0, 1)) b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.swapaxes(0, 1)) def check_einsum_sums(self, dtype, do_opt=False): diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index e7647d2e23fe..cf2f899b7991 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -2279,8 +2279,7 @@ def test_sort_axis(self): def test_sort_size_0(self): # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array sort with axis={axis}' assert_equal(np.sort(a, axis=axis), a, msg) @@ -2541,8 +2540,7 @@ def test_argsort(self): assert_equal(a.copy().argsort(), c) # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array argsort with axis={axis}' assert_equal(np.argsort(a, axis=axis), @@ -2859,8 +2857,7 @@ def test_partition_integer(self): def test_partition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays kth = np.array(0, dtype=kth_dtype)[()] - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array partition with axis={axis}' assert_equal(np.partition(a, kth, axis=axis), a, msg) @@ -2871,8 +2868,7 @@ def test_partition_empty_array(self, kth_dtype): def test_argpartition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays kth = np.array(0, dtype=kth_dtype)[()] - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array argpartition with axis={axis}' assert_equal(np.partition(a, kth, axis=axis), @@ -5367,7 +5363,7 @@ def test_ip_types(self): unchecked_types = [bytes, str, np.void] x = np.random.random(24) * 100 - x.shape = 2, 3, 4 + x = x.reshape((2, 3, 4)) for types in np._core.sctypes.values(): for T in types: if T not in unchecked_types: @@ -5378,20 +5374,20 @@ def test_ip_types(self): def test_raise(self): x = np.random.random(24) * 100 - x.shape = 2, 3, 4 + x = x.reshape((2, 3, 4)) assert_raises(IndexError, x.take, [0, 1, 2], axis=0) assert_raises(IndexError, x.take, [-3], axis=0) assert_array_equal(x.take([-1], axis=0)[0], x[1]) def test_clip(self): x = np.random.random(24) * 100 - x.shape = 2, 3, 4 + x = x.reshape((2, 3, 4)) assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) def test_wrap(self): x = np.random.random(24) * 100 - x.shape = 2, 3, 4 + x = x.reshape((2, 3, 4)) assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) @@ -5971,7 +5967,7 @@ class TestFlat: def setup_method(self): a0 = np.arange(20.0) a = a0.reshape(4, 5) - a0.shape = (4, 5) + a0 = a0.reshape((4, 5)) a.flags.writeable = False self.a = a self.b = a[::2, ::2] diff --git a/numpy/lib/_format_impl.py b/numpy/lib/_format_impl.py index 7378ba554810..bfda7fec73b6 100644 --- a/numpy/lib/_format_impl.py +++ b/numpy/lib/_format_impl.py @@ -879,10 +879,10 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, ) if fortran_order: - array.shape = shape[::-1] + array = array.reshape(shape[::-1]) array = array.transpose() else: - array.shape = shape + array = array.reshape(shape) return array diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index f5690a829fc6..05823ca3741f 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1317,7 +1317,10 @@ def gradient(f, *varargs, axis=None, edge_order=1): # fix the shape for broadcasting shape = np.ones(N, dtype=int) shape[axis] = -1 - a.shape = b.shape = c.shape = shape + + a = a.reshape(shape) + b = b.reshape(shape) + c = c.reshape(shape) # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + c * f[tuple(slice4)] diff --git a/numpy/lib/tests/test_arrayterator.py b/numpy/lib/tests/test_arrayterator.py index 800c9a2a5f77..42a85e58ff62 100644 --- a/numpy/lib/tests/test_arrayterator.py +++ b/numpy/lib/tests/test_arrayterator.py @@ -14,8 +14,7 @@ def test(): ndims = randint(5) + 1 shape = tuple(randint(10) + 1 for dim in range(ndims)) els = reduce(mul, shape) - a = np.arange(els) - a.shape = shape + a = np.arange(els).reshape(shape) buf_size = randint(2 * els) b = Arrayterator(a, buf_size) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 7471db7e1fe2..7e2f1ebf531b 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -317,8 +317,7 @@ def tensorsolve(a, b, axes=None): Examples -------- >>> import numpy as np - >>> a = np.eye(2*3*4) - >>> a.shape = (2*3, 4, 2, 3, 4) + >>> a = np.eye(2*3*4).reshape((2*3, 4, 2, 3, 4)) >>> rng = np.random.default_rng() >>> b = rng.normal(size=(2*3, 4)) >>> x = np.linalg.tensorsolve(a, b) @@ -495,8 +494,7 @@ def tensorinv(a, ind=2): Examples -------- >>> import numpy as np - >>> a = np.eye(4*6) - >>> a.shape = (4, 6, 8, 3) + >>> a = np.eye(4*6).reshape((4, 6, 8, 3)) >>> ainv = np.linalg.tensorinv(a, ind=2) >>> ainv.shape (8, 3, 4, 6) @@ -505,8 +503,7 @@ def tensorinv(a, ind=2): >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) True - >>> a = np.eye(4*6) - >>> a.shape = (24, 8, 3) + >>> a = np.eye(4*6).reshape((24, 8, 3)) >>> ainv = np.linalg.tensorinv(a, ind=1) >>> ainv.shape (8, 3, 24) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 4cc9ac7a5496..b3744024fd88 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -2218,8 +2218,7 @@ def test_non_square_handling(self, arr, ind): ((24, 8, 3), 1), ]) def test_tensorinv_shape(self, shape, ind): - a = np.eye(24) - a.shape = shape + a = np.eye(24).reshape(shape) ainv = linalg.tensorinv(a=a, ind=ind) expected = a.shape[ind:] + a.shape[:ind] actual = ainv.shape @@ -2229,15 +2228,13 @@ def test_tensorinv_shape(self, shape, ind): 0, -2, ]) def test_tensorinv_ind_limit(self, ind): - a = np.eye(24) - a.shape = (4, 6, 8, 3) + a = np.eye(24).reshape((4, 6, 8, 3)) with assert_raises(ValueError): linalg.tensorinv(a=a, ind=ind) def test_tensorinv_result(self): # mimic a docstring example - a = np.eye(24) - a.shape = (24, 8, 3) + a = np.eye(24).reshape((24, 8, 3)) ainv = linalg.tensorinv(a, ind=1) b = np.ones(24) assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index c46f83adb0af..e02e955cfa40 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -33,7 +33,7 @@ def test_eig_build(self): 1.51971555e-15 + 0.j, -1.51308713e-15 + 0.j]) a = arange(13 * 13, dtype=float64) - a.shape = (13, 13) + a = a.reshape((13, 13)) a = a % 17 va, ve = linalg.eig(a) va.sort() diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 2704857308a3..5327963999e0 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -216,11 +216,11 @@ def test_basic2d(self): # Test of basic array creation and properties in 2 dimensions. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d for s in [(4, 3), (6, 2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) @@ -246,7 +246,12 @@ def test_concatenate_alongaxis(self): (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d # Concatenation along an axis s = (3, 4) - x.shape = y.shape = xm.shape = ym.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) + assert_equal(xm.mask, np.reshape(m1, s)) assert_equal(ym.mask, np.reshape(m2, s)) xmym = concatenate((xm, ym), 1) @@ -762,8 +767,7 @@ def test_pickling_wstructured(self): def test_pickling_keepalignment(self): # Tests pickling w/ F_CONTIGUOUS arrays - a = arange(10) - a.shape = (-1, 2) + a = arange(10).reshape( (-1, 2)) b = a.T for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): test = pickle.loads(pickle.dumps(b, protocol=proto)) @@ -1176,8 +1180,7 @@ def test_basic_arithmetic(self): assert_equal(np.divide(x, y), divide(xm, ym)) def test_divide_on_different_shapes(self): - x = arange(6, dtype=float) - x.shape = (2, 3) + x = arange(6, dtype=float).reshape((2, 3)) y = arange(3, dtype=float) z = x / y diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 34a032087536..f9deb33fc2c5 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -1077,7 +1077,7 @@ def test_3d(self): x = np.ma.arange(24).reshape(3, 4, 2) x[x % 3 == 0] = masked assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) - x.shape = (4, 3, 2) + x = x.reshape((4, 3, 2)) assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) x = np.ma.arange(24).reshape(4, 3, 2) x[x % 5 == 0] = masked diff --git a/tools/swig/test/testFlat.py b/tools/swig/test/testFlat.py index ce6f74819e86..543ee0c41d9f 100755 --- a/tools/swig/test/testFlat.py +++ b/tools/swig/test/testFlat.py @@ -43,7 +43,7 @@ def testProcess3D(self): for i in range(24): pack_output += struct.pack(self.typeCode, i) x = np.frombuffer(pack_output, dtype=self.typeCode) - x.shape = (2, 3, 4) + x = x.reshape((2, 3, 4)) y = x.copy() process(y) self.assertEqual(np.all((x + 1) == y), True) @@ -56,7 +56,7 @@ def testProcess3DTranspose(self): for i in range(24): pack_output += struct.pack(self.typeCode, i) x = np.frombuffer(pack_output, dtype=self.typeCode) - x.shape = (2, 3, 4) + x = x.reshape((2, 3, 4)) y = x.copy() process(y.T) self.assertEqual(np.all((x.T + 1) == y.T), True) @@ -69,7 +69,7 @@ def testProcessNoncontiguous(self): for i in range(24): pack_output += struct.pack(self.typeCode, i) x = np.frombuffer(pack_output, dtype=self.typeCode) - x.shape = (2, 3, 4) + x = x.reshape((2, 3, 4)) self.assertRaises(TypeError, process, x[:, :, 0]) From bac0a8e844a6cf18335177e6203e0e8a92cddeca Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Thu, 31 Jul 2025 09:52:27 -0400 Subject: [PATCH 0269/1018] DOC: Add 'See Also' refs for sign, copysign and signbit. (#29489) [skip azp] [skip cirrus] [skip actions] --- numpy/_core/code_generators/ufunc_docstrings.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index ddae87bd6012..e976be723287 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -3792,6 +3792,11 @@ def add_newdoc(place, name, doc): The sign of `x`. $OUT_SCALAR_1 + See Also + -------- + signbit + copysign + Notes ----- There is more than one definition of sign in common use for complex @@ -3828,6 +3833,11 @@ def add_newdoc(place, name, doc): Output array, or reference to `out` if that was supplied. $OUT_SCALAR_1 + See Also + -------- + sign + copysign + Examples -------- >>> import numpy as np @@ -3859,6 +3869,11 @@ def add_newdoc(place, name, doc): The values of `x1` with the sign of `x2`. $OUT_SCALAR_2 + See Also + -------- + sign + signbit + Examples -------- >>> import numpy as np From 4ab0c51c5203ef58dfae19ecb90ef3f6ae79126a Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 31 Jul 2025 16:23:11 +0100 Subject: [PATCH 0270/1018] MAINT: Do not exclude `typing/tests/data` from ruff (#29479) --- numpy/typing/tests/data/fail/arithmetic.pyi | 2 +- numpy/typing/tests/data/fail/bitwise_ops.pyi | 2 +- numpy/typing/tests/data/fail/chararray.pyi | 1 + numpy/typing/tests/data/fail/comparisons.pyi | 8 ++-- numpy/typing/tests/data/fail/datasource.pyi | 1 + numpy/typing/tests/data/fail/fromnumeric.pyi | 6 +-- numpy/typing/tests/data/fail/ma.pyi | 34 +++++++------- .../tests/data/fail/nested_sequence.pyi | 1 + numpy/typing/tests/data/fail/npyio.pyi | 2 +- numpy/typing/tests/data/fail/scalars.pyi | 1 - numpy/typing/tests/data/fail/shape.pyi | 1 + numpy/typing/tests/data/fail/twodim_base.pyi | 2 +- numpy/typing/tests/data/fail/type_check.pyi | 1 - .../tests/data/misc/extended_precision.pyi | 4 +- numpy/typing/tests/data/pass/arithmetic.py | 6 ++- .../tests/data/pass/array_constructors.py | 1 + numpy/typing/tests/data/pass/array_like.py | 2 +- numpy/typing/tests/data/pass/arrayterator.py | 1 + numpy/typing/tests/data/pass/bitwise_ops.py | 2 +- numpy/typing/tests/data/pass/comparisons.py | 9 ++-- numpy/typing/tests/data/pass/index_tricks.py | 2 + numpy/typing/tests/data/pass/literal.py | 3 +- numpy/typing/tests/data/pass/mod.py | 2 +- numpy/typing/tests/data/pass/ndarray_misc.py | 4 +- numpy/typing/tests/data/pass/numeric.py | 2 + numpy/typing/tests/data/pass/random.py | 1 + numpy/typing/tests/data/pass/recfunctions.py | 18 ++++---- numpy/typing/tests/data/pass/scalars.py | 1 + numpy/typing/tests/data/pass/shape.py | 2 +- numpy/typing/tests/data/pass/simple.py | 2 +- numpy/typing/tests/data/pass/ufunclike.py | 2 + numpy/typing/tests/data/reveal/arithmetic.pyi | 3 +- numpy/typing/tests/data/reveal/arrayprint.pyi | 2 +- .../typing/tests/data/reveal/bitwise_ops.pyi | 3 +- .../typing/tests/data/reveal/comparisons.pyi | 8 ++-- .../typing/tests/data/reveal/fromnumeric.pyi | 1 - numpy/typing/tests/data/reveal/ma.pyi | 44 +++++++++---------- numpy/typing/tests/data/reveal/mod.pyi | 3 +- .../tests/data/reveal/ndarray_conversion.pyi | 1 - .../typing/tests/data/reveal/ndarray_misc.pyi | 3 +- .../tests/data/reveal/polynomial_polybase.pyi | 3 +- .../data/reveal/polynomial_polyutils.pyi | 3 +- .../tests/data/reveal/polynomial_series.pyi | 2 +- .../typing/tests/data/reveal/ufunc_config.pyi | 3 +- numpy/typing/tests/data/reveal/ufunclike.pyi | 2 +- ruff.toml | 3 +- 46 files changed, 110 insertions(+), 100 deletions(-) diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index e696083b8614..d62906ae87d9 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -30,7 +30,7 @@ AR_LIKE_M: list[np.datetime64] # NOTE: mypys `NoReturn` errors are, unfortunately, not that great _1 = AR_b - AR_LIKE_b # type: ignore[var-annotated] _2 = AR_LIKE_b - AR_b # type: ignore[var-annotated] -AR_i - bytes() # type: ignore[operator] +AR_i - b"" # type: ignore[operator] AR_f - AR_LIKE_m # type: ignore[operator] AR_f - AR_LIKE_M # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/bitwise_ops.pyi b/numpy/typing/tests/data/fail/bitwise_ops.pyi index 3538ec7d64c7..29dfe79287ad 100644 --- a/numpy/typing/tests/data/fail/bitwise_ops.pyi +++ b/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -4,7 +4,7 @@ i8 = np.int64() i4 = np.int32() u8 = np.uint64() b_ = np.bool() -i = int() +i = 0 f8 = np.float64() diff --git a/numpy/typing/tests/data/fail/chararray.pyi b/numpy/typing/tests/data/fail/chararray.pyi index fb52f7349dd1..f1b7009439d1 100644 --- a/numpy/typing/tests/data/fail/chararray.pyi +++ b/numpy/typing/tests/data/fail/chararray.pyi @@ -1,4 +1,5 @@ from typing import Any + import numpy as np AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] diff --git a/numpy/typing/tests/data/fail/comparisons.pyi b/numpy/typing/tests/data/fail/comparisons.pyi index 3c8a94bff240..d2965b5c1a91 100644 --- a/numpy/typing/tests/data/fail/comparisons.pyi +++ b/numpy/typing/tests/data/fail/comparisons.pyi @@ -21,7 +21,7 @@ AR_M > AR_i # type: ignore[operator] AR_M > AR_f # type: ignore[operator] AR_M > AR_m # type: ignore[operator] -AR_i > str() # type: ignore[operator] -AR_i > bytes() # type: ignore[operator] -str() > AR_M # type: ignore[operator] -bytes() > AR_M # type: ignore[operator] +AR_i > "" # type: ignore[operator] +AR_i > b"" # type: ignore[operator] +"" > AR_M # type: ignore[operator] +b"" > AR_M # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/datasource.pyi b/numpy/typing/tests/data/fail/datasource.pyi index 267b672baea7..4c603cf693a1 100644 --- a/numpy/typing/tests/data/fail/datasource.pyi +++ b/numpy/typing/tests/data/fail/datasource.pyi @@ -1,4 +1,5 @@ from pathlib import Path + import numpy as np path: Path diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index 51ef26810e21..d0a81f0bfbfe 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -32,18 +32,18 @@ np.swapaxes(A, 1, [0]) # type: ignore[call-overload] np.transpose(A, axes=1.0) # type: ignore[call-overload] np.partition(a, None) # type: ignore[call-overload] -np.partition(a, 0, axis="bob") # type: ignore[call-overload] +np.partition(a, 0, axis="bob") # type: ignore[call-overload] np.partition(A, 0, kind="bob") # type: ignore[call-overload] np.partition(A, 0, order=range(5)) # type: ignore[arg-type] np.argpartition(a, None) # type: ignore[arg-type] np.argpartition(a, 0, axis="bob") # type: ignore[arg-type] -np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] +np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] np.argpartition(A, 0, order=range(5)) # type: ignore[arg-type] np.sort(A, axis="bob") # type: ignore[call-overload] np.sort(A, kind="bob") # type: ignore[call-overload] -np.sort(A, order=range(5)) # type: ignore[arg-type] +np.sort(A, order=range(5)) # type: ignore[arg-type] np.argsort(A, axis="bob") # type: ignore[arg-type] np.argsort(A, kind="bob") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index ed973462e2d4..b5921660d617 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -82,7 +82,7 @@ MAR_1d_f8.any(axis=1.0) # type: ignore[call-overload] MAR_1d_f8.any(keepdims=1.0) # type: ignore[call-overload] MAR_1d_f8.any(out=1.0) # type: ignore[call-overload] -MAR_1d_f8.sort(axis=(0,1)) # type: ignore[arg-type] +MAR_1d_f8.sort(axis=(0, 1)) # type: ignore[arg-type] MAR_1d_f8.sort(axis=None) # type: ignore[arg-type] MAR_1d_f8.sort(kind='cabbage') # type: ignore[arg-type] MAR_1d_f8.sort(order=lambda: 'cabbage') # type: ignore[arg-type] @@ -101,13 +101,13 @@ np.ma.take(out=1) # type: ignore[call-overload] np.ma.take(mode="bob") # type: ignore[call-overload] MAR_1d_f8.partition(['cabbage']) # type: ignore[arg-type] -MAR_1d_f8.partition(axis=(0,1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(axis=(0, 1)) # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(kind='cabbage') # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] MAR_1d_f8.argpartition(['cabbage']) # type: ignore[arg-type] -MAR_1d_f8.argpartition(axis=(0,1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(axis=(0, 1)) # type: ignore[arg-type, call-arg] MAR_1d_f8.argpartition(kind='cabbage') # type: ignore[arg-type, call-arg] MAR_1d_f8.argpartition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] @@ -116,10 +116,10 @@ np.ma.ndim(lambda: 'lambda') # type: ignore[arg-type] np.ma.size(AR_b, axis='0') # type: ignore[arg-type] -MAR_1d_f8 >= (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 > (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 <= (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 < (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 >= (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 > (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 <= (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 < (lambda x: 'mango') # type: ignore[operator] MAR_1d_f8.count(axis=0.) # type: ignore[call-overload] @@ -129,15 +129,15 @@ MAR_1d_f8.put(4, 999, mode='flip') # type: ignore[arg-type] np.ma.put(MAR_1d_f8, 4, 999, mode='flip') # type: ignore[arg-type] -np.ma.put([1,1,3], 0, 999) # type: ignore[arg-type] +np.ma.put([1, 1, 3], 0, 999) # type: ignore[arg-type] np.ma.compressed(lambda: 'compress me') # type: ignore[call-overload] -np.ma.allequal(MAR_1d_f8, [1,2,3], fill_value=1.5) # type: ignore[arg-type] +np.ma.allequal(MAR_1d_f8, [1, 2, 3], fill_value=1.5) # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1,2,3], masked_equal=4.5) # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1,2,3], rtol='.4') # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1,2,3], atol='.5') # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], masked_equal=4.5) # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], rtol='.4') # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], atol='.5') # type: ignore[arg-type] MAR_1d_f8.__setmask__('mask') # type: ignore[arg-type] @@ -147,9 +147,9 @@ MAR_td64 **= 2 # type: ignore[misc] MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] -MAR_1d_f8.argsort(axis=(1,0)) # type: ignore[arg-type] +MAR_1d_f8.argsort(axis=(1, 0)) # type: ignore[arg-type] -np.ma.MaskedArray(np.array([1,2,3]), keep_mask='yes') # type: ignore[call-overload] -np.ma.MaskedArray(np.array([1,2,3]), subok=None) # type: ignore[call-overload] -np.ma.MaskedArray(np.array([1,2,3]), ndim=None) # type: ignore[call-overload] -np.ma.MaskedArray(np.array([1,2,3]), order='Corinthian') # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), keep_mask='yes') # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), subok=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), ndim=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), order='Corinthian') # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/nested_sequence.pyi b/numpy/typing/tests/data/fail/nested_sequence.pyi index a28d3df3c749..1004a36accc7 100644 --- a/numpy/typing/tests/data/fail/nested_sequence.pyi +++ b/numpy/typing/tests/data/fail/nested_sequence.pyi @@ -1,4 +1,5 @@ from collections.abc import Sequence + from numpy._typing import _NestedSequence a: Sequence[float] diff --git a/numpy/typing/tests/data/fail/npyio.pyi b/numpy/typing/tests/data/fail/npyio.pyi index e204566a5877..d57ebe3a2e3e 100644 --- a/numpy/typing/tests/data/fail/npyio.pyi +++ b/numpy/typing/tests/data/fail/npyio.pyi @@ -1,8 +1,8 @@ import pathlib from typing import IO -import numpy.typing as npt import numpy as np +import numpy.typing as npt str_path: str bytes_path: bytes diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index bfbe9125e529..0560a855a80f 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -1,4 +1,3 @@ -import sys import numpy as np f2: np.float16 diff --git a/numpy/typing/tests/data/fail/shape.pyi b/numpy/typing/tests/data/fail/shape.pyi index fea055583073..a024d7bda273 100644 --- a/numpy/typing/tests/data/fail/shape.pyi +++ b/numpy/typing/tests/data/fail/shape.pyi @@ -1,4 +1,5 @@ from typing import Any + import numpy as np # test bounds of _ShapeT_co diff --git a/numpy/typing/tests/data/fail/twodim_base.pyi b/numpy/typing/tests/data/fail/twodim_base.pyi index d0f2b7ad8322..e146d68c7418 100644 --- a/numpy/typing/tests/data/fail/twodim_base.pyi +++ b/numpy/typing/tests/data/fail/twodim_base.pyi @@ -1,4 +1,4 @@ -from typing import Any, TypeVar +from typing import Any import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/fail/type_check.pyi b/numpy/typing/tests/data/fail/type_check.pyi index 94b6ee425af5..8b68e996304c 100644 --- a/numpy/typing/tests/data/fail/type_check.pyi +++ b/numpy/typing/tests/data/fail/type_check.pyi @@ -1,5 +1,4 @@ import numpy as np -import numpy.typing as npt DTYPE_i8: np.dtype[np.int64] diff --git a/numpy/typing/tests/data/misc/extended_precision.pyi b/numpy/typing/tests/data/misc/extended_precision.pyi index 84b5f516bdde..7978faf4d5bd 100644 --- a/numpy/typing/tests/data/misc/extended_precision.pyi +++ b/numpy/typing/tests/data/misc/extended_precision.pyi @@ -1,8 +1,8 @@ +from typing import assert_type + import numpy as np from numpy._typing import _96Bit, _128Bit -from typing import assert_type - assert_type(np.float96(), np.floating[_96Bit]) assert_type(np.float128(), np.floating[_128Bit]) assert_type(np.complex192(), np.complexfloating[_96Bit, _96Bit]) diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index 3b2901cf2b51..62c978a2fc51 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -1,9 +1,11 @@ from __future__ import annotations from typing import Any, cast + +import pytest + import numpy as np import numpy.typing as npt -import pytest c16 = np.complex128(1) f8 = np.float64(1) @@ -23,7 +25,7 @@ b = bool(1) c = complex(1) f = float(1) -i = int(1) +i = 1 class Object: diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py index 17b6fab93ad8..d91d257cb17c 100644 --- a/numpy/typing/tests/data/pass/array_constructors.py +++ b/numpy/typing/tests/data/pass/array_constructors.py @@ -3,6 +3,7 @@ import numpy as np import numpy.typing as npt + class Index: def __index__(self) -> int: return 0 diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py index 264ec55da053..f922beae34ce 100644 --- a/numpy/typing/tests/data/pass/array_like.py +++ b/numpy/typing/tests/data/pass/array_like.py @@ -5,7 +5,7 @@ import numpy as np if TYPE_CHECKING: - from numpy._typing import NDArray, ArrayLike, _SupportsArray + from numpy._typing import ArrayLike, NDArray, _SupportsArray x1: ArrayLike = True x2: ArrayLike = 5 diff --git a/numpy/typing/tests/data/pass/arrayterator.py b/numpy/typing/tests/data/pass/arrayterator.py index 572be5e2fe29..a99c09a25231 100644 --- a/numpy/typing/tests/data/pass/arrayterator.py +++ b/numpy/typing/tests/data/pass/arrayterator.py @@ -2,6 +2,7 @@ from __future__ import annotations from typing import Any + import numpy as np AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10) diff --git a/numpy/typing/tests/data/pass/bitwise_ops.py b/numpy/typing/tests/data/pass/bitwise_ops.py index 22a245d21809..2d4815b0d940 100644 --- a/numpy/typing/tests/data/pass/bitwise_ops.py +++ b/numpy/typing/tests/data/pass/bitwise_ops.py @@ -9,7 +9,7 @@ b_ = np.bool(1) b = bool(1) -i = int(1) +i = 1 AR = np.array([0, 1, 2], dtype=np.int32) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py index a461d8b660da..b2e52762c7a8 100644 --- a/numpy/typing/tests/data/pass/comparisons.py +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import cast, Any +from typing import Any, cast + import numpy as np c16 = np.complex128() @@ -18,10 +19,10 @@ b_ = np.bool() -b = bool() +b = False c = complex() -f = float() -i = int() +f = 0.0 +i = 0 SEQ = (0, 1, 2, 3, 4) diff --git a/numpy/typing/tests/data/pass/index_tricks.py b/numpy/typing/tests/data/pass/index_tricks.py index dfc4ff2f314a..ea98156a8f2e 100644 --- a/numpy/typing/tests/data/pass/index_tricks.py +++ b/numpy/typing/tests/data/pass/index_tricks.py @@ -1,5 +1,7 @@ from __future__ import annotations + from typing import Any + import numpy as np AR_LIKE_b = [[True, True], [True, True]] diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py index c8fa476210e3..f1e0cb2a69d3 100644 --- a/numpy/typing/tests/data/pass/literal.py +++ b/numpy/typing/tests/data/pass/literal.py @@ -1,9 +1,10 @@ from __future__ import annotations -from typing import Any, TYPE_CHECKING from functools import partial +from typing import TYPE_CHECKING, Any import pytest + import numpy as np if TYPE_CHECKING: diff --git a/numpy/typing/tests/data/pass/mod.py b/numpy/typing/tests/data/pass/mod.py index 2b7e6cd85c73..464326486fa2 100644 --- a/numpy/typing/tests/data/pass/mod.py +++ b/numpy/typing/tests/data/pass/mod.py @@ -13,7 +13,7 @@ b = bool(1) f = float(1) -i = int(1) +i = 1 AR = np.array([1], dtype=np.bool) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index b3428f6b48c1..30ad270edd6e 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -9,14 +9,16 @@ from __future__ import annotations import operator -from typing import cast, Any +from typing import Any, cast import numpy as np import numpy.typing as npt + class SubClass(npt.NDArray[np.float64]): ... class IntSubClass(npt.NDArray[np.intp]): ... + i4 = np.int32(1) A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32) B0 = np.empty((), dtype=np.int32).view(SubClass) diff --git a/numpy/typing/tests/data/pass/numeric.py b/numpy/typing/tests/data/pass/numeric.py index 1eb14cf3a2a2..4c1ffa0b2776 100644 --- a/numpy/typing/tests/data/pass/numeric.py +++ b/numpy/typing/tests/data/pass/numeric.py @@ -6,11 +6,13 @@ """ from __future__ import annotations + from typing import cast import numpy as np import numpy.typing as npt + class SubClass(npt.NDArray[np.float64]): ... diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index bce204a7378e..fd07e378e553 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -1,6 +1,7 @@ from __future__ import annotations from typing import Any + import numpy as np SEED_NONE = None diff --git a/numpy/typing/tests/data/pass/recfunctions.py b/numpy/typing/tests/data/pass/recfunctions.py index 52a3d78a7622..cca0c3988708 100644 --- a/numpy/typing/tests/data/pass/recfunctions.py +++ b/numpy/typing/tests/data/pass/recfunctions.py @@ -12,7 +12,7 @@ def test_recursive_fill_fields() -> None: [(1, 10.0), (2, 20.0)], dtype=[("A", np.int64), ("B", np.float64)], ) - b = np.zeros((int(3),), dtype=a.dtype) + b = np.zeros((3,), dtype=a.dtype) out = rfn.recursive_fill_fields(a, b) assert_type(out, np.ndarray[tuple[int], np.dtype[np.void]]) @@ -51,8 +51,8 @@ def test_get_fieldstructure() -> None: def test_merge_arrays() -> None: assert_type( rfn.merge_arrays(( - np.ones((int(2),), np.int_), - np.ones((int(3),), np.float64), + np.ones((2,), np.int_), + np.ones((3,), np.float64), )), np.recarray[tuple[int], np.dtype[np.void]], ) @@ -60,7 +60,7 @@ def test_merge_arrays() -> None: def test_drop_fields() -> None: ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] - a = np.ones((int(3),), dtype=ndtype) + a = np.ones((3,), dtype=ndtype) assert_type( rfn.drop_fields(a, "a"), @@ -78,7 +78,7 @@ def test_drop_fields() -> None: def test_rename_fields() -> None: ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] - a = np.ones((int(3),), dtype=ndtype) + a = np.ones((3,), dtype=ndtype) assert_type( rfn.rename_fields(a, {"a": "A", "b_b": "B_B"}), @@ -92,7 +92,7 @@ def test_repack_fields() -> None: assert_type(rfn.repack_fields(dt), np.dtype[np.void]) assert_type(rfn.repack_fields(dt.type(0)), np.void) assert_type( - rfn.repack_fields(np.ones((int(3),), dtype=dt)), + rfn.repack_fields(np.ones((3,), dtype=dt)), np.ndarray[tuple[int], np.dtype[np.void]], ) @@ -133,14 +133,14 @@ def test_require_fields() -> None: def test_stack_arrays() -> None: - x = np.zeros((int(2),), np.int32) + x = np.zeros((2,), np.int32) assert_type( rfn.stack_arrays(x), np.ndarray[tuple[int], np.dtype[np.int32]], ) - z = np.ones((int(2),), [("A", "|S3"), ("B", float)]) - zz = np.ones((int(2),), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) + z = np.ones((2,), [("A", "|S3"), ("B", float)]) + zz = np.ones((2,), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) assert_type( rfn.stack_arrays((z, zz)), np.ma.MaskedArray[tuple[Any, ...], np.dtype[np.void]], diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 655903a50bce..eeb707b255e1 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -1,6 +1,7 @@ import datetime as dt import pytest + import numpy as np b = np.bool() diff --git a/numpy/typing/tests/data/pass/shape.py b/numpy/typing/tests/data/pass/shape.py index 286c8a81dacf..e3b497bc0310 100644 --- a/numpy/typing/tests/data/pass/shape.py +++ b/numpy/typing/tests/data/pass/shape.py @@ -1,4 +1,4 @@ -from typing import Any, NamedTuple, cast +from typing import Any, NamedTuple import numpy as np diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py index 408d2482b0ef..003e9ee58bb1 100644 --- a/numpy/typing/tests/data/pass/simple.py +++ b/numpy/typing/tests/data/pass/simple.py @@ -1,9 +1,9 @@ """Simple expression that should pass with mypy.""" import operator +from collections.abc import Iterable import numpy as np import numpy.typing as npt -from collections.abc import Iterable # Basic checks array = np.array([1, 2]) diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index f993939ddba1..c02a68cc062c 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -1,5 +1,7 @@ from __future__ import annotations + from typing import Any + import numpy as np diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 5dd78a197b8f..81a98a9d96d2 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -64,7 +64,6 @@ AR_LIKE_m: list[np.timedelta64] AR_LIKE_M: list[np.datetime64] AR_LIKE_O: list[np.object_] - # Array subtraction assert_type(AR_number - AR_number, npt.NDArray[np.number]) @@ -556,7 +555,7 @@ assert_type(f16 + f8, np.floating[_128Bit] | np.float64) assert_type(f8 + f8, np.float64) assert_type(i8 + f8, np.float64) assert_type(f4 + f8, np.float32 | np.float64) -assert_type(i4 + f8,np.float64) +assert_type(i4 + f8, np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) assert_type(c + f8, np.complex128 | np.float64) diff --git a/numpy/typing/tests/data/reveal/arrayprint.pyi b/numpy/typing/tests/data/reveal/arrayprint.pyi index 3b339edced32..de4077654bea 100644 --- a/numpy/typing/tests/data/reveal/arrayprint.pyi +++ b/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -1,6 +1,6 @@ import contextlib from collections.abc import Callable -from typing import Any, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index 6c6b56197546..bef7c6739605 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -1,5 +1,4 @@ -from typing import Any, TypeAlias, assert_type -from typing import Literal as L +from typing import Literal as L, TypeAlias, assert_type import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/reveal/comparisons.pyi b/numpy/typing/tests/data/reveal/comparisons.pyi index 2165d17fce34..6df5a3d94314 100644 --- a/numpy/typing/tests/data/reveal/comparisons.pyi +++ b/numpy/typing/tests/data/reveal/comparisons.pyi @@ -1,6 +1,6 @@ import decimal import fractions -from typing import Any, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt @@ -20,10 +20,10 @@ td = np.timedelta64(0, "D") b_ = np.bool() -b = bool() +b = False c = complex() -f = float() -i = int() +f = 0.0 +i = 0 AR = np.array([0], dtype=np.int64) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index a2ba83e6c4c8..f6cc9b9fe0d7 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -1,7 +1,6 @@ """Tests for :mod:`_core.fromnumeric`.""" from typing import Any, assert_type -from typing import Literal as L import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 52c0dfa6ab75..40caad61f24c 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -286,7 +286,7 @@ assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) assert_type(MAR_byte.count(), int) assert_type(MAR_f4.count(axis=None), int) assert_type(MAR_f4.count(axis=0), NDArray[np.int_]) -assert_type(MAR_b.count(axis=(0,1)), NDArray[np.int_]) +assert_type(MAR_b.count(axis=(0, 1)), NDArray[np.int_]) assert_type(MAR_o.count(keepdims=True), NDArray[np.int_]) assert_type(MAR_o.count(axis=None, keepdims=True), NDArray[np.int_]) assert_type(MAR_o.count(None, True), NDArray[np.int_]) @@ -294,7 +294,7 @@ assert_type(MAR_o.count(None, True), NDArray[np.int_]) assert_type(np.ma.count(MAR_byte), int) assert_type(np.ma.count(MAR_byte, axis=None), int) assert_type(np.ma.count(MAR_f4, axis=0), NDArray[np.int_]) -assert_type(np.ma.count(MAR_b, axis=(0,1)), NDArray[np.int_]) +assert_type(np.ma.count(MAR_b, axis=(0, 1)), NDArray[np.int_]) assert_type(np.ma.count(MAR_o, keepdims=True), NDArray[np.int_]) assert_type(np.ma.count(MAR_o, axis=None, keepdims=True), NDArray[np.int_]) assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) @@ -302,13 +302,13 @@ assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) assert_type(MAR_f4.compressed(), np.ndarray[tuple[int], np.dtype[np.float32]]) assert_type(np.ma.compressed(MAR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(np.ma.compressed([[1,2,3]]), np.ndarray[tuple[int], np.dtype]) +assert_type(np.ma.compressed([[1, 2, 3]]), np.ndarray[tuple[int], np.dtype]) -assert_type(MAR_f4.put([0,4,8], [10,20,30]), None) +assert_type(MAR_f4.put([0, 4, 8], [10, 20, 30]), None) assert_type(MAR_f4.put(4, 999), None) assert_type(MAR_f4.put(4, 999, mode='clip'), None) -assert_type(np.ma.put(MAR_f4, [0,4,8], [10,20,30]), None) +assert_type(np.ma.put(MAR_f4, [0, 4, 8], [10, 20, 30]), None) assert_type(np.ma.put(MAR_f4, 4, 999), None) assert_type(np.ma.put(MAR_f4, 4, 999, mode='clip'), None) @@ -320,7 +320,7 @@ assert_type(MAR_i8.filled(), NDArray[np.int64]) assert_type(MAR_1d.filled(), np.ndarray[tuple[int], np.dtype]) assert_type(np.ma.filled(MAR_f4, float('nan')), NDArray[np.float32]) -assert_type(np.ma.filled([[1,2,3]]), NDArray[Any]) +assert_type(np.ma.filled([[1, 2, 3]]), NDArray[Any]) # PyRight detects this one correctly, but mypy doesn't. # https://github.com/numpy/numpy/pull/28742#discussion_r2048968375 assert_type(np.ma.filled(MAR_1d), np.ndarray[tuple[int], np.dtype]) # type: ignore[assert-type] @@ -345,7 +345,7 @@ assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | np.bool) # `Revealed type is "Union[numpy.ndarray[Any, Any], numpy.bool[Any]]"` assert_type(np.ma.getmask(MAR_1d), np.ndarray[tuple[int], np.dtype[np.bool]] | np.bool) # type: ignore[assert-type] assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | np.bool) -assert_type(np.ma.getmask([1,2]), NDArray[np.bool] | np.bool) +assert_type(np.ma.getmask([1, 2]), NDArray[np.bool] | np.bool) assert_type(np.ma.getmask(np.int64(1)), np.bool) assert_type(np.ma.is_mask(MAR_1d), bool) @@ -458,12 +458,12 @@ assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedAr assert_type(MAR_f8.flat, np.ma.core.MaskedIterator) def invalid_resize() -> None: - assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] + assert_type(MAR_f8.resize((1, 1)), NoReturn) # type: ignore[arg-type] assert_type(np.ma.MaskedArray(AR_f4), MaskedArray[np.float32]) -assert_type(np.ma.MaskedArray(np.array([1,2,3]), [True, True, False], np.float16), MaskedArray[np.float16]) -assert_type(np.ma.MaskedArray(np.array([1,2,3]), dtype=np.float16), MaskedArray[np.float16]) -assert_type(np.ma.MaskedArray(np.array([1,2,3]), copy=True), MaskedArray[Any]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), [True, True, False], np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), dtype=np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), copy=True), MaskedArray[Any]) # TODO: This one could be made more precise, the return type could be `MaskedArraySubclassC` assert_type(np.ma.MaskedArray(MAR_subclass), MaskedArray[np.complex128]) # TODO: This one could be made more precise, the return type could be `MaskedArraySubclass[np.float32]` @@ -1017,12 +1017,12 @@ assert_type(MAR_u4 ** AR_LIKE_f, MaskedArray[np.floating]) assert_type(MAR_u4 ** AR_LIKE_c, MaskedArray[np.complexfloating]) assert_type(MAR_u4 ** AR_LIKE_o, Any) -assert_type(AR_LIKE_b ** MAR_u4 , MaskedArray[np.uint32]) -assert_type(AR_LIKE_u ** MAR_u4 , MaskedArray[np.unsignedinteger]) -assert_type(AR_LIKE_i ** MAR_u4 , MaskedArray[np.signedinteger]) -assert_type(AR_LIKE_f ** MAR_u4 , MaskedArray[np.floating]) -assert_type(AR_LIKE_c ** MAR_u4 , MaskedArray[np.complexfloating]) -assert_type(AR_LIKE_o ** MAR_u4 , Any) +assert_type(AR_LIKE_b ** MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u ** MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i ** MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_u4, Any) assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) assert_type(MAR_i8 ** AR_LIKE_u, MaskedArray[np.signedinteger]) @@ -1032,11 +1032,11 @@ assert_type(MAR_i8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) assert_type(MAR_i8 ** AR_LIKE_o, Any) assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) -assert_type(AR_LIKE_u ** MAR_i8 , MaskedArray[np.signedinteger]) -assert_type(AR_LIKE_i ** MAR_i8 , MaskedArray[np.signedinteger]) -assert_type(AR_LIKE_f ** MAR_i8 , MaskedArray[np.floating]) -assert_type(AR_LIKE_c ** MAR_i8 , MaskedArray[np.complexfloating]) -assert_type(AR_LIKE_o ** MAR_i8 , Any) +assert_type(AR_LIKE_u ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_i8, Any) assert_type(MAR_f8 ** AR_LIKE_b, MaskedArray[np.float64]) assert_type(MAR_f8 ** AR_LIKE_u, MaskedArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index 59a6a1016479..9bbbd5c52d7f 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -1,6 +1,5 @@ import datetime as dt -from typing import Literal as L -from typing import assert_type +from typing import Literal as L, assert_type import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index bbd42573a774..5cb9b98029dd 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -37,7 +37,6 @@ any_sctype: np.ndarray[Any, Any] assert_type(any_dtype.tolist(), Any) assert_type(any_sctype.tolist(), Any) - # itemset does not return a value # tobytes is pretty simple # tofile does not return a value diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index b219f4e5bec2..2972a58c328f 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -6,12 +6,11 @@ function-based counterpart in `../from_numeric.py`. """ -from collections.abc import Iterator import ctypes as ct import operator +from collections.abc import Iterator from types import ModuleType from typing import Any, Literal, assert_type - from typing_extensions import CapsuleType import numpy as np diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index bb927035e40c..11265b92ff67 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,8 +1,7 @@ from collections.abc import Sequence from decimal import Decimal from fractions import Fraction -from typing import Any, LiteralString, TypeAlias, TypeVar, assert_type -from typing import Literal as L +from typing import Any, Literal as L, LiteralString, TypeAlias, TypeVar, assert_type import numpy as np import numpy.polynomial as npp diff --git a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi index 45522e72102f..07d6c9d1af65 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -1,8 +1,7 @@ from collections.abc import Sequence from decimal import Decimal from fractions import Fraction -from typing import Any, TypeAlias, assert_type -from typing import Literal as L +from typing import Literal as L, TypeAlias, assert_type import numpy as np import numpy.polynomial.polyutils as pu diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi index 93f0799c818d..0f4a9e09f2e7 100644 --- a/numpy/typing/tests/data/reveal/polynomial_series.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -1,5 +1,5 @@ from collections.abc import Sequence -from typing import Any, TypeAlias, assert_type +from typing import TypeAlias, assert_type import numpy as np import numpy.polynomial as npp diff --git a/numpy/typing/tests/data/reveal/ufunc_config.pyi b/numpy/typing/tests/data/reveal/ufunc_config.pyi index 748507530aa1..77c27eb3b4ca 100644 --- a/numpy/typing/tests/data/reveal/ufunc_config.pyi +++ b/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -1,10 +1,9 @@ """Typing tests for `_core._ufunc_config`.""" +from _typeshed import SupportsWrite from collections.abc import Callable from typing import Any, assert_type -from _typeshed import SupportsWrite - import numpy as np def func(a: str, b: int) -> None: ... diff --git a/numpy/typing/tests/data/reveal/ufunclike.pyi b/numpy/typing/tests/data/reveal/ufunclike.pyi index a0ede60e0158..aaae5e80e470 100644 --- a/numpy/typing/tests/data/reveal/ufunclike.pyi +++ b/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -1,4 +1,4 @@ -from typing import Any, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt diff --git a/ruff.toml b/ruff.toml index 4666efdf39bc..b3cfea15b190 100644 --- a/ruff.toml +++ b/ruff.toml @@ -2,7 +2,6 @@ extend-exclude = [ "numpy/__config__.py", "numpy/distutils", "numpy/typing/_char_codes.py", - "numpy/typing/tests/data", "spin/cmds.py", # Submodules. "doc/source/_static/scipy-mathjax", @@ -96,6 +95,8 @@ ignore = [ "numpy/_typing/_array_like.py" = ["E501"] "numpy/_typing/_dtype_like.py" = ["E501"] "numpy*pyi" = ["E501"] +# "useless assignments" aren't so useless when you're testing that they don't make type checkers scream +"numpy/typing/tests/data/*" = ["B015", "B018", "E501"] "__init__.py" = ["F401", "F403", "F405"] "__init__.pyi" = ["F401"] From 3231ee85e69f222dad7c39b3e662d87d486a069c Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 31 Jul 2025 16:24:56 +0100 Subject: [PATCH 0271/1018] TYP: Type ``MaskedArray.compress`` (#29480) --- numpy/ma/core.pyi | 31 ++++++++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 5 +++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index d79b4695bb32..70088a44a000 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -614,7 +614,36 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): set_fill_value: Any def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... - def compress(self, condition, axis=..., out=...): ... + + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None, + out: _ArrayT + ) -> _ArrayT: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + *, + out: _ArrayT + ) -> _ArrayT: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: None = None, + out: None = None + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + out: None = None + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # TODO: How to deal with the non-commutative nature of `==` and `!=`? # xref numpy/numpy#17368 diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 40caad61f24c..441d39296ffa 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -301,6 +301,11 @@ assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) assert_type(MAR_f4.compressed(), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_f4.compress([True, False]), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_f4.compress([True, False], axis=0), MaskedArray[np.float32]) +assert_type(MAR_f4.compress([True, False], axis=0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.compress([True, False], 0, MAR_subclass), MaskedArraySubclassC) + assert_type(np.ma.compressed(MAR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) assert_type(np.ma.compressed([[1, 2, 3]]), np.ndarray[tuple[int], np.dtype]) From 0fbd49aad6d941008ccbb32c919186770b153483 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 31 Jul 2025 16:25:36 +0100 Subject: [PATCH 0272/1018] TYP: Type ``MaskedArray.__setitem__`` (#29478) --- numpy/ma/core.pyi | 1 - numpy/typing/tests/data/pass/ma.py | 32 +++++++++++++++++++++--------- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 70088a44a000..c1e98a61b4de 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -577,7 +577,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __getitem__(self: _MaskedArray[void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[void]]: ... - def __setitem__(self, indx, value): ... @property def shape(self) -> _ShapeT_co: ... @shape.setter diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 02655d7f88c7..2e2246fb88d2 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -1,3 +1,4 @@ +import datetime as dt from typing import Any, TypeAlias, TypeVar, cast import numpy as np @@ -15,11 +16,16 @@ MAR_f: MaskedArray[np.float64] = np.ma.MaskedArray([1.0]) MAR_c: MaskedArray[np.complex128] = np.ma.MaskedArray([1j]) MAR_td64: MaskedArray[np.timedelta64] = np.ma.MaskedArray([np.timedelta64(1, "D")]) -MAR_M_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) +MAR_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) MAR_S: MaskedArray[np.bytes_] = np.ma.MaskedArray([b'foo'], dtype=np.bytes_) MAR_U: MaskedArray[np.str_] = np.ma.MaskedArray(['foo'], dtype=np.str_) MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], np.ma.MaskedArray(["a"], dtype="T")) +MAR_V: MaskedArray[np.void] = np.ma.MaskedArray( + [(1, 1)], + mask=[(False, False)], + dtype=[('a', int), ('b', int)] +) AR_b: npt.NDArray[np.bool] = np.array([True, False, True]) @@ -38,6 +44,14 @@ MAR_f.flat = [9] +MAR_b[MAR_i > 0] = False +MAR_i[:] = 1 +MAR_f[[0]] = AR_LIKE_f +MAR_c[[0, 0]] = [3, 4 + 3j] +MAR_td64[0] = dt.timedelta(1) +MAR_dt64[0] = dt.datetime(2020, 1, 1) +MAR_V['a'] = [2] + # Inplace addition MAR_b += AR_LIKE_b @@ -65,10 +79,10 @@ MAR_td64 += AR_LIKE_u MAR_td64 += AR_LIKE_i MAR_td64 += AR_LIKE_m -MAR_M_dt64 += AR_LIKE_b -MAR_M_dt64 += AR_LIKE_u -MAR_M_dt64 += AR_LIKE_i -MAR_M_dt64 += AR_LIKE_m +MAR_dt64 += AR_LIKE_b +MAR_dt64 += AR_LIKE_u +MAR_dt64 += AR_LIKE_i +MAR_dt64 += AR_LIKE_m MAR_S += b'snakes' MAR_U += 'snakes' @@ -97,10 +111,10 @@ MAR_td64 -= AR_LIKE_u MAR_td64 -= AR_LIKE_i MAR_td64 -= AR_LIKE_m -MAR_M_dt64 -= AR_LIKE_b -MAR_M_dt64 -= AR_LIKE_u -MAR_M_dt64 -= AR_LIKE_i -MAR_M_dt64 -= AR_LIKE_m +MAR_dt64 -= AR_LIKE_b +MAR_dt64 -= AR_LIKE_u +MAR_dt64 -= AR_LIKE_i +MAR_dt64 -= AR_LIKE_m # Inplace floor division From 9da497f5c88a81669c954c3a9e328a15ce0a7220 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 1 Aug 2025 01:57:37 +0200 Subject: [PATCH 0273/1018] MAINT: bump `mypy` to `1.17.1` (#29493) --- environment.yml | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 17de8d3eeb5e..dd7fa1a83e5b 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.16.1 + - mypy=1.17.1 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index e50919ef4f7b..b26a5f27bb05 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -12,7 +12,7 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.16.1; platform_python_implementation != "PyPy" +mypy==1.17.1; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer From 60ad38e990073ee7bb6ca657139e67ea2bc8ad18 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Aug 2025 17:13:36 +0000 Subject: [PATCH 0274/1018] MAINT: Bump pypa/cibuildwheel from 3.1.2 to 3.1.3 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.1.2 to 3.1.3. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/9e4e50bd76b3190f55304387e333f6234823ea9b...352e01339f0a173aa2a3eb57f01492e341e83865) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.1.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 5c1b35653d68..2b2ab586ba18 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@9e4e50bd76b3190f55304387e333f6234823ea9b # v3.1.2 + - uses: pypa/cibuildwheel@352e01339f0a173aa2a3eb57f01492e341e83865 # v3.1.3 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 3fd37bb60ee5..4fcd8a58f53a 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -174,7 +174,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@9e4e50bd76b3190f55304387e333f6234823ea9b # v3.1.2 + uses: pypa/cibuildwheel@352e01339f0a173aa2a3eb57f01492e341e83865 # v3.1.3 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From f5a6af86acab7fcd1644fad76b5fbe466a0a98dd Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Sun, 3 Aug 2025 03:56:15 +0100 Subject: [PATCH 0275/1018] TYP: Type ``MaskedArray.__array_finalize__`` and ``MaskedArray.__array_wrap__`` (#29483) --- numpy/ma/core.pyi | 9 +++++++-- numpy/typing/tests/data/reveal/ma.pyi | 2 ++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index c1e98a61b4de..e99e0a527773 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -54,6 +54,7 @@ from numpy import ( signedinteger, str_, timedelta64, + ufunc, unsignedinteger, void, ) @@ -520,8 +521,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): order: _OrderKACF | None = None, ) -> _MaskedArray[Any]: ... - def __array_finalize__(self, obj): ... - def __array_wrap__(self, obj, context=..., return_scalar=...): ... + def __array_wrap__( + self, + obj: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[Any, ...], int] | None = None, + return_scalar: bool = False, + ) -> MaskedArray[_ShapeT, _DTypeT]: ... @overload # () def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 441d39296ffa..a09fb2d75997 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -313,6 +313,8 @@ assert_type(MAR_f4.put([0, 4, 8], [10, 20, 30]), None) assert_type(MAR_f4.put(4, 999), None) assert_type(MAR_f4.put(4, 999, mode='clip'), None) +assert_type(MAR_c8.__array_wrap__(AR_b), MaskedArray[np.bool]) + assert_type(np.ma.put(MAR_f4, [0, 4, 8], [10, 20, 30]), None) assert_type(np.ma.put(MAR_f4, 4, 999), None) assert_type(np.ma.put(MAR_f4, 4, 999, mode='clip'), None) From 4bb30632369548e1e91f37ecfd7eab4d9815b24f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Sun, 3 Aug 2025 05:07:01 +0200 Subject: [PATCH 0276/1018] Document `cpu-baeline-detect` --- doc/source/reference/simd/build-options.rst | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 2b7039136e75..1f105e27077d 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -17,6 +17,11 @@ that target certain CPU features: During the runtime, NumPy modules will fail to load if any of specified features are not supported by the target CPU (raises Python runtime error). +- ``cpu-baseline-detect``: controls detection of CPU baseline based on compiler + flags. Default value is ``auto`` that enables detection if ``-march=`` + or a similar compiler flag is used. The other possible values are ``enabled`` + and ``disabled`` to respective enable or disable it unconditionally. + - ``cpu-dispatch``: dispatched set of additional CPU features. Default value is ``max -xop -fma4`` which enables all CPU features, except for AMD legacy features (in case of X86). @@ -182,7 +187,7 @@ Behaviors - ``cpu-baseline`` will be treated as "native" if compiler native flag ``-march=native`` or ``-xHost`` or ``/QxHost`` is enabled through environment variable - ``CFLAGS``:: + ``CFLAGS`` and ``cpu-baseline-detect`` is not ``disabled``:: export CFLAGS="-march=native" pip install . From ae772111208d2e0e536dac94830d13b13198b99e Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Tue, 5 Aug 2025 00:17:45 +0800 Subject: [PATCH 0277/1018] ENH: Use extern C in arraytypes.h.src file for cpp files (#29504) Signed-off-by: Wang Yang --- numpy/_core/src/multiarray/arraytypes.h.src | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/numpy/_core/src/multiarray/arraytypes.h.src b/numpy/_core/src/multiarray/arraytypes.h.src index a5613aa8dad6..ca8dbeaa67eb 100644 --- a/numpy/_core/src/multiarray/arraytypes.h.src +++ b/numpy/_core/src/multiarray/arraytypes.h.src @@ -1,6 +1,10 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ #define NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ +#ifdef __cplusplus +extern "C" { +#endif + #include "common.h" NPY_NO_EXPORT int @@ -165,4 +169,8 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT int BOOL_argmax, NPY_NO_EXPORT npy_intp count_nonzero_trivial_dispatcher(npy_intp count, const char* data, npy_intp stride, int dtype_num); +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ */ From 02d0964feeec4af27787c02242eff26961200705 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Mon, 4 Aug 2025 14:22:59 -0400 Subject: [PATCH 0278/1018] BUG: Casting from one timedelta64 to another didn't handle NAT. Closes gh-29497. --- numpy/_core/src/multiarray/datetime.c | 15 +++++++++------ numpy/_core/tests/test_datetime.py | 15 +++++++++++++++ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index d820474532ca..0dac36a0903b 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -3118,15 +3118,18 @@ cast_datetime_to_datetime(PyArray_DatetimeMetaData *src_meta, */ NPY_NO_EXPORT int cast_timedelta_to_timedelta(PyArray_DatetimeMetaData *src_meta, - PyArray_DatetimeMetaData *dst_meta, - npy_timedelta src_dt, - npy_timedelta *dst_dt) + PyArray_DatetimeMetaData *dst_meta, + npy_timedelta src_dt, + npy_timedelta *dst_dt) { npy_int64 num = 0, denom = 0; - /* If the metadata is the same, short-circuit the conversion */ - if (src_meta->base == dst_meta->base && - src_meta->num == dst_meta->num) { + /* + * If the metadata is the same or if src_dt is NAT, short-circuit + * the conversion. + */ + if ((src_meta->base == dst_meta->base && src_meta->num == dst_meta->num) + || src_dt == NPY_DATETIME_NAT) { *dst_dt = src_dt; return 0; } diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 888bb7db293b..70b55c500f3d 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -844,6 +844,21 @@ def test_timedelta_array_str(self): a = np.array([-1, 'NaT', 1234567], dtype=' Date: Tue, 5 Aug 2025 13:27:04 +0200 Subject: [PATCH 0279/1018] BLD: update vendored Meson to 1.8.3 (#29509) --- vendored-meson/meson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendored-meson/meson b/vendored-meson/meson index f754c4258805..e72c717199fa 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit f754c4258805056ed7be09830d96af45215d341b +Subproject commit e72c717199fa18d34020c7c97f9de3f388c5e055 From 287cf8f7edadc99999da64f8be053f55b9c5d63e Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Tue, 5 Aug 2025 22:05:55 +0900 Subject: [PATCH 0280/1018] DOC: Add narrative documentation for printing NumPy arrays (#29502) * DOC: Add narrative documentation for printing NumPy arrays Resolves #29476. This commit adds a narrative guide on how to print NumPy arrays to the "how-to" section. [skip cirrus] [skip actions] [skip azp] * DOC: apply suggested changes to narrative documentation [skip cirrus] [skip actions] [skip azp] --- doc/source/user/how-to-print.rst | 112 +++++++++++++++++++++++++++++++ doc/source/user/howtos_index.rst | 1 + 2 files changed, 113 insertions(+) create mode 100644 doc/source/user/how-to-print.rst diff --git a/doc/source/user/how-to-print.rst b/doc/source/user/how-to-print.rst new file mode 100644 index 000000000000..6195b6ed4c70 --- /dev/null +++ b/doc/source/user/how-to-print.rst @@ -0,0 +1,112 @@ +.. _how-to-print: + +======================= + Printing NumPy Arrays +======================= + + +This page explains how to control the formatting of printed NumPy arrays. +Note that these printing options apply only to arrays, not to scalars. + +Defining printing options +========================= + +Applying settings globally +-------------------------- + +Use :func:`numpy.set_printoptions` to change printing options for the entire runtime session. To inspect current print settings, use :func:`numpy.get_printoptions`: + + >>> np.set_printoptions(precision=2) + >>> np.get_printoptions() + {'edgeitems': 3, 'threshold': 1000, 'floatmode': 'maxprec', 'precision': 2, 'suppress': False, 'linewidth': 75, 'nanstr': 'nan', 'infstr': 'inf', 'sign': '-', 'formatter': None, 'legacy': False, 'override_repr': None} + +To restore the default settings, use: + + >>> np.set_printoptions(edgeitems=3, infstr='inf', + ... linewidth=75, nanstr='nan', precision=8, + ... suppress=False, threshold=1000, formatter=None) + + +Applying settings temporarily +----------------------------- + +Use :func:`numpy.printoptions` as a context manager to temporarily override print settings within a specific scope: + + + >>> arr = np.array([0.155, 0.184, 0.173]) + >>> with np.printoptions(precision=2): + ... print(arr) + [0.15 0.18 0.17] + + +All keywords that apply to :func:`numpy.set_printoptions` also apply to :func:`numpy.printoptions`. + + +Changing the number of digits of precision +========================================== + +The default number of fractional digits displayed is 8. You can change this number using ``precision`` keyword. + + >>> arr = np.array([0.1, 0.184, 0.17322]) + >>> with np.printoptions(precision=2): + ... print(arr) + [0.1 0.18 0.17] + + +The ``floatmode`` option determines how the ``precision`` setting is interpreted. +By default, ``floatmode=maxprec_equal`` displays values with the minimal number of digits needed to uniquely represent them, +using the same number of digits across all elements. +If you want to show exactly the same number of digits specified by ``precision``, use ``floatmode=fixed``: + + >>> arr = np.array([0.1, 0.184, 0.173], dtype=np.float32) + >>> with np.printoptions(precision=2, floatmode="fixed"): + ... print(arr) + [0.10 0.18 0.17] + + +Changing how `nan` and `inf` are displayed +========================================== + +By default, `numpy.nan` is displayed as `nan` and `numpy.inf` is displayed as `inf`. +You can override these representations using the ``nanstr`` and ``infstr`` options: + + >>> arr = np.array([np.inf, np.nan, 0]) + >>> with np.printoptions(nanstr="NAN", infstr="INF"): + ... print(arr) + [INF NAN 0.] + + +Controlling scientific notations +================================ + +By default, NumPy uses scientific notation when: + +- The absolute value of the smallest number is less than ``1e-4``, or +- The ratio of the largest to the smallest absolute value is greater than ``1e3`` + + >>> arr = np.array([0.00002, 210000.0, 3.14]) + >>> print(arr) + [2.00e-05 2.10e+05 3.14e+00] + +To suppress scientific notation and always use fixed-point notation, set ``suppress=True``: + + >>> arr = np.array([0.00002, 210000.0, 3.14]) + >>> with np.printoptions(suppress=True): + ... print(arr) + [ 0.00002 210000. 3.14 ] + + + +Applying custom formatting functions +==================================== + +You can apply custom formatting functions to specific or all data types using ``formatter`` keyword. +See :func:`numpy.set_printoptions` for more details on supported format keys. + +For example, to format `datetime64` values with a custom function: + + >>> arr = np.array([np.datetime64("2025-01-01"), np.datetime64("2024-01-01")]) + >>> with np.printoptions(formatter={"datetime":lambda x: f"(Year: {x.item().year}, Month: {x.item().month})"}): + ... print(arr) + [(Year: 2025, Month: 1) (Year: 2024, Month: 1)] + diff --git a/doc/source/user/howtos_index.rst b/doc/source/user/howtos_index.rst index ca30f7e9115d..a8a8229dd7dd 100644 --- a/doc/source/user/howtos_index.rst +++ b/doc/source/user/howtos_index.rst @@ -16,3 +16,4 @@ the package, see the :ref:`API reference `. how-to-index how-to-verify-bug how-to-partition + how-to-print From 6ad69259282d007bdfd15f12ea638b50b7e95c76 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 5 Aug 2025 10:44:27 -0600 Subject: [PATCH 0281/1018] TST: don't explicitly specify -j in TSAN build [skip cirrus] [skip azp] [skip circleci] --- .github/workflows/compiler_sanitizers.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 0581f7fc591b..a02f6f0c1609 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -89,7 +89,7 @@ jobs: run: pip uninstall -y pytest-xdist - name: Build NumPy with ThreadSanitizer - run: python -m spin build -j2 -- -Db_sanitize=thread + run: python -m spin build -- -Db_sanitize=thread - name: Run tests under prebuilt TSAN container run: | From d1a2ccc53b8c4ac13f1d10c6c1db4d5be1eecadd Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Tue, 5 Aug 2025 14:52:40 -0400 Subject: [PATCH 0282/1018] MAINT: Bump hypothesis to 6.137.1 (#29513) --- requirements/test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index b26a5f27bb05..44f73a844591 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -2,7 +2,7 @@ Cython wheel==0.38.1 setuptools==65.5.1 ; python_version < '3.12' setuptools ; python_version >= '3.12' -hypothesis==6.104.1 +hypothesis==6.137.1 pytest==7.4.0 pytest-cov==4.1.0 meson From 72574772c1cefe1ccdabc254994a4b45602f977f Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Wed, 6 Aug 2025 13:20:28 -0400 Subject: [PATCH 0283/1018] BUG: random: Fix handling of very small p in Generator.binomial. Use `log1p` to compute an expression of `1 - p` with more precision. To not change the legacy interface, a copy of the unaltered function `random_binomial_inversion` is used in the legacy distributions code. --- .../random/src/distributions/distributions.c | 2 +- .../random/src/legacy/legacy-distributions.c | 42 ++++++++++++++++++- numpy/random/tests/test_generator_mt19937.py | 18 ++++++++ 3 files changed, 59 insertions(+), 3 deletions(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index aa81a4a173d4..5ff694b2cde9 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -770,7 +770,7 @@ RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, RAND_INT_TYPE n, binomial->psave = p; binomial->has_binomial = 1; binomial->q = q = 1.0 - p; - binomial->r = qn = exp(n * log(q)); + binomial->r = qn = exp(n * log1p(-p)); binomial->c = np = n * p; binomial->m = bound = (RAND_INT_TYPE)MIN(n, np + 10.0 * sqrt(np * q + 1)); } else { diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index 14d9ce25f255..385c4c239a57 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -228,6 +228,44 @@ double legacy_exponential(aug_bitgen_t *aug_state, double scale) { return scale * legacy_standard_exponential(aug_state); } +static RAND_INT_TYPE legacy_random_binomial_inversion( + bitgen_t *bitgen_state, RAND_INT_TYPE n, double p, binomial_t *binomial +) +{ + double q, qn, np, px, U; + RAND_INT_TYPE X, bound; + + if (!(binomial->has_binomial) || (binomial->nsave != n) || + (binomial->psave != p)) { + binomial->nsave = n; + binomial->psave = p; + binomial->has_binomial = 1; + binomial->q = q = 1.0 - p; + binomial->r = qn = exp(n * log(q)); + binomial->c = np = n * p; + binomial->m = bound = (RAND_INT_TYPE)MIN(n, np + 10.0 * sqrt(np * q + 1)); + } else { + q = binomial->q; + qn = binomial->r; + np = binomial->c; + bound = binomial->m; + } + X = 0; + px = qn; + U = next_double(bitgen_state); + while (U > px) { + X++; + if (X > bound) { + X = 0; + px = qn; + U = next_double(bitgen_state); + } else { + U -= px; + px = ((n - X + 1) * p * px) / (X * q); + } + } + return X; +} static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, double p, @@ -237,14 +275,14 @@ static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, if (p <= 0.5) { if (p * n <= 30.0) { - return random_binomial_inversion(bitgen_state, n, p, binomial); + return legacy_random_binomial_inversion(bitgen_state, n, p, binomial); } else { return random_binomial_btpe(bitgen_state, n, p, binomial); } } else { q = 1.0 - p; if (q * n <= 30.0) { - return n - random_binomial_inversion(bitgen_state, n, q, binomial); + return n - legacy_random_binomial_inversion(bitgen_state, n, q, binomial); } else { return n - random_binomial_btpe(bitgen_state, n, q, binomial); } diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 50c232d4a8e7..c32612218fcf 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -99,6 +99,24 @@ def test_p_is_nan(self): # Issue #4571. assert_raises(ValueError, random.binomial, 1, np.nan) + def test_p_extremely_small(self): + n = 50000000000 + p = 5e-17 + sample_size = 20000000 + x = random.binomial(n, p, size=sample_size) + sample_mean = x.mean() + expected_mean = n*p + sigma = np.sqrt(n*p*(1 - p)/sample_size) + # Note: the parameters were chosen so that expected_mean - 6*sigma + # is a positive value. The first `assert` below validates that + # assumption (in case someone edits the parameters in the future). + # The second `assert` is the actual test. + low_bound = expected_mean - 6*sigma + assert low_bound > 0, "bad test params: 6-sigma lower bound is negative" + test_msg = (f"sample mean {sample_mean} deviates from the expected mean " + f"{expected_mean} by more than 6*sigma") + assert abs(expected_mean - sample_mean) < 6*sigma, test_msg + class TestMultinomial: def test_basic(self): From 3f7f06fd9a22bbe1c1450dc993fdb3fd57d91dbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Aug 2025 17:34:59 +0000 Subject: [PATCH 0284/1018] MAINT: Bump actions/download-artifact from 4.3.0 to 5.0.0 Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4.3.0 to 5.0.0. - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/d3f86a106a0bac45b974a628896c90dbdf5c8093...634f93cb2916e3fdff6788551b99b062d0335ce0) --- updated-dependencies: - dependency-name: actions/download-artifact dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 2b2ab586ba18..8269a8dcd705 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -73,7 +73,7 @@ jobs: (github.event_name == 'schedule') steps: - name: Download wheel artifact(s) - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 with: path: wheelhouse/ merge-multiple: true From 8a153faba14b7e65583682bf6af7ecf12f173c70 Mon Sep 17 00:00:00 2001 From: Maryanne Wachter Date: Wed, 6 Aug 2025 10:58:42 -0700 Subject: [PATCH 0285/1018] CI: Add UBSAN CI jobs for macOS arm64 and Linux x86-64 (#29487) --- .github/workflows/compiler_sanitizers.yml | 51 ++++++++++++++++++++--- tools/ci/ubsan_suppressions_arm64.txt | 51 +++++++++++++++++++++++ tools/ci/ubsan_suppressions_x86_64.txt | 31 ++++++++++++++ 3 files changed, 128 insertions(+), 5 deletions(-) create mode 100644 tools/ci/ubsan_suppressions_arm64.txt create mode 100644 tools/ci/ubsan_suppressions_x86_64.txt diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index a02f6f0c1609..dc65449ba752 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -21,7 +21,7 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - clang_ASAN: + clang_ASAN_UBSAN: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' runs-on: macos-latest @@ -64,12 +64,14 @@ jobs: pip uninstall -y pytest-xdist - name: Build run: - python -m spin build -j2 -- -Db_sanitize=address + python -m spin build -- -Db_sanitize=address,undefined -Db_lundef=false - name: Test run: | # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them + # Ignore test_casting_floatingpoint_errors on macOS for now - causes crash inside UBSAN ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1 \ - python -m spin test -- -v -s --timeout=600 --durations=10 + UBSAN_OPTIONS=halt_on_error=1:suppressions=${GITHUB_WORKSPACE}/tools/ci/ubsan_suppressions_arm64.txt \ + python -m spin test -- -k "not test_casting_floatingpoint_errors" -v -s --timeout=600 --durations=10 clang_TSAN: # To enable this workflow on a fork, comment out: @@ -78,9 +80,9 @@ jobs: container: image: ghcr.io/nascheme/numpy-tsan:3.14t options: --shm-size=2g # increase memory for large matrix ops - + steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy @@ -98,3 +100,42 @@ jobs: python -m spin test \ `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ -- -v -s --timeout=600 --durations=10 + + ubuntu_UBSAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + - name: Set up pyenv + run: | + git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" + PYENV_ROOT="$HOME/.pyenv" + PYENV_BIN="$PYENV_ROOT/bin" + PYENV_SHIMS="$PYENV_ROOT/shims" + echo "$PYENV_BIN" >> $GITHUB_PATH + echo "$PYENV_SHIMS" >> $GITHUB_PATH + echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV + - name: Check pyenv is working + run: pyenv --version + - name: Build python with address sanitizer + run: | + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14t + pyenv global 3.14t + - name: Install dependencies + run: | + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + - name: Build numpy with UndefinedBehaviorSanitizer + run: python -m spin build -- -Db_sanitize=address,undefined -Db_lundef=false + - name: Test + run: | + # pass -s to pytest to see UBSAN errors and warnings, otherwise pytest captures them + ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1 \ + UBSAN_OPTIONS=halt_on_error=1:suppressions=${GITHUB_WORKSPACE}/tools/ci/ubsan_suppressions_x86_64.txt \ + spin test -- -v -s --timeout=600 --durations=10 diff --git a/tools/ci/ubsan_suppressions_arm64.txt b/tools/ci/ubsan_suppressions_arm64.txt new file mode 100644 index 000000000000..69de4a4c425f --- /dev/null +++ b/tools/ci/ubsan_suppressions_arm64.txt @@ -0,0 +1,51 @@ +# This file contains suppressions for the default (with GIL) build to prevent runtime errors +# when numpy is built with -Db_sanitize=undefined for arm64 architecture +# +# reference: https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html#available-checks + +# Per this prior discussion, integer overflow is not a concern +# https://github.com/numpy/numpy/issues/24209#issuecomment-2160154181 +signed-integer-overflow:* + +# all alignment runtime errors ignored in favor of this being tracked via TypeSanitizer +# otherwise ubsan may detect system file alignment errors outside numpy +alignment:* + +# suggested fix for runtime error: use INT_MIN constant +shift-base:_core/src/umath/_rational_tests.c +# suggested fix for runtime error: check for overflow if signed +shift-base:_core/src/npymath/npy_math_internal.h + +# suggested fix for runtime error: null check before loop +pointer-overflow:_core/src/common/simd/neon/memory.h +pointer-overflow:_core/src/multiarray/datetime_busdaycal.c +pointer-overflow:_core/src/multiarray/nditer_templ.c +pointer-overflow:_core/src/multiarray/nditer_constr.c +pointer-overflow:_core/src/umath/loops_arithm_fp.dispatch.c.src +pointer-overflow:_core/src/umath/loops_unary.dispatch.c.src +pointer-overflow:_core/src/umath/loops_unary_complex.dispatch.c.src +pointer-overflow:_core/src/umath/loops_unary_fp_le.dispatch.c.src +pointer-overflow:_core/src/umath/string_buffer.h +pointer-overflow:linalg/umath_linalg.cpp +pointer-overflow:numpy/random/bit_generator.pyx.c + +float-cast-overflow:_core/src/multiarray/lowlevel_strided_loops.c.src + +# flagged in CI - call to function through pointer to incorrect function type +# Many functions in the modules/files listed below cause undefined behavior in CI +# general disable this check until further investigation, but keep the specific files +# as a starting point for resolving the checks later +function:_core/src/* +function:numpy/random/* +# function:_core/src/common/cblasfunc.c +# function:_core/src/common/npy_argparse.c +# function:_core/src/multiarray/number.c +# function:_core/src/multiarray/ctors.c +# function:_core/src/multiarray/convert_datatype.c +# function:_core/src/multiarray/dtype_transfer.c +# function:_core/src/multiarray/dtype_traversal.c +# function:_core/src/multiarray/getset.c +# function:_core/src/multiarray/scalarapi.c +# function:_core/src/multiarray/scalartypes.c.src +# function:_core/src/umath/* +# function:numpy/random/* diff --git a/tools/ci/ubsan_suppressions_x86_64.txt b/tools/ci/ubsan_suppressions_x86_64.txt new file mode 100644 index 000000000000..d9872a691a81 --- /dev/null +++ b/tools/ci/ubsan_suppressions_x86_64.txt @@ -0,0 +1,31 @@ +# This file contains suppressions for the default (with GIL) build to prevent runtime errors +# when numpy is built with -Db_sanitize=undefined for x86_64 architecture +# +# reference: https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html#available-checks + +# Per this prior discussion, integer overflow is not a concern +# https://github.com/numpy/numpy/issues/24209#issuecomment-2160154181 +signed-integer-overflow:* + +# all alignment runtime errors ignored in favor of this being tracked via TypeSanitizer +# otherwise ubsan may detect system file alignment errors outside numpy +alignment:* + +# suggested fix for runtime error: replace left bit shift with LLONG_MIN constant +shift-base:_core/src/common/simd/sse/arithmetic.h +shift-base:_core/src/common/simd/avx2/arithmetic.h +# suggested fix for runtime error: use INT_MIN constant +shift-base:_core/src/umath/_rational_tests.c +# suggested fix for runtime error: check for overflow if signed +shift-base:_core/src/npymath/npy_math_internal.h + + +# suggested fix for runtime error: check that pointer is not null before calling function +nonnull-attribute:_core/src/multiarray/array_coercion.c +nonnull-attribute:_core/src/multiarray/ctors.c +nonnull-attribute:_core/src/multiarray/datetime_busdaycal.c +nonnull-attribute:_core/src/multiarray/scalarapi.c +nonnull-attribute:_core/src/multiarray/calculation.c + +# suggested fix for runtime error: null check before loop +pointer-overflow:_core/src/multiarray/nditer_templ.c From 7ffe59ff1585de25b5f4e4da92fd7efc93aca2c8 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Wed, 6 Aug 2025 17:00:15 -0400 Subject: [PATCH 0286/1018] STY: Add whitespace to appease ruff (rule E226). --- numpy/random/tests/test_generator_mt19937.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index c32612218fcf..a06212efce0d 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -105,17 +105,17 @@ def test_p_extremely_small(self): sample_size = 20000000 x = random.binomial(n, p, size=sample_size) sample_mean = x.mean() - expected_mean = n*p - sigma = np.sqrt(n*p*(1 - p)/sample_size) + expected_mean = n * p + sigma = np.sqrt(n * p * (1 - p) / sample_size) # Note: the parameters were chosen so that expected_mean - 6*sigma # is a positive value. The first `assert` below validates that # assumption (in case someone edits the parameters in the future). # The second `assert` is the actual test. - low_bound = expected_mean - 6*sigma + low_bound = expected_mean - 6 * sigma assert low_bound > 0, "bad test params: 6-sigma lower bound is negative" test_msg = (f"sample mean {sample_mean} deviates from the expected mean " f"{expected_mean} by more than 6*sigma") - assert abs(expected_mean - sample_mean) < 6*sigma, test_msg + assert abs(expected_mean - sample_mean) < 6 * sigma, test_msg class TestMultinomial: From 0a7162cbe2bee464cf6a3ab4efc4b013f7467691 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 17:51:24 +0000 Subject: [PATCH 0287/1018] MAINT: Bump actions/cache from 4.2.3 to 4.2.4 Bumps [actions/cache](https://github.com/actions/cache) from 4.2.3 to 4.2.4. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/5a3ec84eff668545956fd18022155c47e93e2684...0400d5f644dc74513175e3cd8d07132dd4860809) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 4.2.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/macos.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 1293e9c37c2f..84e221439aa6 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -108,7 +108,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -213,7 +213,7 @@ jobs: sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 418dc7d52fc1..8931b76c5dee 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -46,7 +46,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -70,7 +70,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From bd6507407a335486e1bee38b6c0039d98fe3115c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 17:51:37 +0000 Subject: [PATCH 0288/1018] MAINT: Bump github/codeql-action from 3.29.5 to 3.29.6 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.5 to 3.29.6. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/51f77329afa6477de8c49fc9c7046c15b9a4e79d...a4e1a019f5e24960714ff6296aee04b736cbc3cf) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 2d5fdb08a9d4..59489ea79e65 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/init@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/autobuild@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/analyze@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 079813c3aea3..e2508a09bba7 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v2.1.27 + uses: github/codeql-action/upload-sarif@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v2.1.27 with: sarif_file: results.sarif From ed46cdeb9bc788c8fdd8c6c318e9af933b7e0edb Mon Sep 17 00:00:00 2001 From: Amelia Thurdekoos Date: Thu, 7 Aug 2025 16:14:24 -0700 Subject: [PATCH 0289/1018] DOC:clarify build compatibility to dev depending page Review page to make distinction clearer per GH discussion Standardized the use of C-API vs C API Closes #27265 --- doc/source/dev/depending_on_numpy.rst | 33 ++++++++++++++------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index 70476a3cc1b3..fd5c13885a74 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -26,12 +26,13 @@ well known scientific Python projects, does **not** use semantic versioning. Instead, backwards incompatible API changes require deprecation warnings for at least two releases. For more details, see :ref:`NEP23`. -NumPy has both a Python API and a C API. The C API can be used directly or via -Cython, f2py, or other such tools. If your package uses the C API, then ABI -(application binary interface) stability of NumPy is important. NumPy's ABI is -forward but not backward compatible. This means: binaries compiled against a -given target version of NumPy's C API will still run correctly with newer NumPy -versions, but not with older versions. +NumPy provides both a Python API and a C-API. The C-API can be accessed +directly or through tools like Cython or f2py. If your package uses the +C-API, it's important to understand NumPy's application binary interface +(ABI) compatibility: NumPy's ABI is forward compatible but not backward +compatible. This means that binaries compiled against an older version of +NumPy will still work with newer versions, but binaries compiled against a +newer version will not necessarily work with older ones. Modules can also be safely built against NumPy 2.0 or later in :ref:`CPython's abi3 mode `, which allows @@ -87,16 +88,16 @@ Build-time dependency `__. -If a package either uses the NumPy C API directly or it uses some other tool +If a package either uses the NumPy C-API directly or it uses some other tool that depends on it like Cython or Pythran, NumPy is a *build-time* dependency of the package. -By default, NumPy will expose an API that is backwards compatible with the -oldest NumPy version that supports the currently oldest compatible Python -version. NumPy 1.25.0 supports Python 3.9 and higher and NumPy 1.19 is the -first version to support Python 3.9. Thus, we guarantee that, when using -defaults, NumPy 1.25 will expose a C-API compatible with NumPy 1.19. -(the exact version is set within NumPy-internal header files). +By default, NumPy exposes a API that is backward compatible with the earliest +NumPy version that supports the oldest Python version currently supported by +NumPy. For example, NumPy 1.25.0 supports Python 3.9 and above; and the +earliest NumPy version to support Python 3.9 was 1.19. Therefore we guarantee +NumPy 1.25 will, when using defaults, expose a C API compatible with NumPy +1.19. (the exact version is set within NumPy-internal header files). NumPy is also forward compatible for all minor releases, but a major release will require recompilation (see NumPy 2.0-specific advice further down). @@ -157,7 +158,7 @@ frequently, (b) use a large part of NumPy's API surface, and (c) is worried that changes in NumPy may break your code, you can set an upper bound of ``=2.0`` (or go. We'll focus on the "keep compatibility with 1.xx and 2.x" now, which is a little more involved. -*Example for a package using the NumPy C API (via C/Cython/etc.) which wants to support +*Example for a package using the NumPy C-API (via C/Cython/etc.) which wants to support NumPy 1.23.5 and up*: .. code:: ini From 4640d9317f76dcef11f1d9f92d35218ec3b32fd7 Mon Sep 17 00:00:00 2001 From: Amelia Thurdekoos Date: Thu, 7 Aug 2025 16:45:46 -0700 Subject: [PATCH 0290/1018] Update depending_on_numpy.rst --- doc/source/dev/depending_on_numpy.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index fd5c13885a74..e3c03b0fea65 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -96,7 +96,7 @@ By default, NumPy exposes a API that is backward compatible with the earliest NumPy version that supports the oldest Python version currently supported by NumPy. For example, NumPy 1.25.0 supports Python 3.9 and above; and the earliest NumPy version to support Python 3.9 was 1.19. Therefore we guarantee -NumPy 1.25 will, when using defaults, expose a C API compatible with NumPy +NumPy 1.25 will, when using defaults, expose a C-API compatible with NumPy 1.19. (the exact version is set within NumPy-internal header files). NumPy is also forward compatible for all minor releases, but a major release From a0515ad027cc61100987bbd8c783d1c121fbd803 Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Fri, 8 Aug 2025 17:57:02 +0300 Subject: [PATCH 0291/1018] ENH: Use array indexing preparation routines for flatiter objects (#28590) * [ENH] Use array indexing preparation routines for flatiter objects * Fix assign subscript and add tests for it * Fix regression test * Remove unnecessary dims array * Add release note * Remove unnecessary branch from iter_subscript * Add more benchmarks * Add special cases to fix performance regression * Address some feedback - Add test with int16 index - Cast integer index array to intp before using it - Remove unused functions - Change error message back to array - Move docstring to prepare_index_noarray * Address more feedback * Address more feedback; raise error on non-array index * Fix linting errors * Update changelog item * Fix tests * Fix typing tests * Address review feedback * Fix linter errors * Address feedback * Remove wrong comments * Fix linting errors * Fix test_deprecations * Apply suggestions from code review * Update numpy/_core/tests/test_indexing.py --------- Co-authored-by: Sebastian Berg --- benchmarks/benchmarks/bench_indexing.py | 19 + .../upcoming_changes/28590.improvement.rst | 33 ++ numpy/_core/src/multiarray/iterators.c | 524 +++++++----------- numpy/_core/src/multiarray/mapping.c | 133 +++-- numpy/_core/src/multiarray/mapping.h | 22 + numpy/_core/tests/test_deprecations.py | 36 ++ numpy/_core/tests/test_indexing.py | 245 +++++++- numpy/_core/tests/test_regression.py | 4 +- numpy/lib/_shape_base_impl.py | 13 +- numpy/matrixlib/tests/test_masked_matrix.py | 2 +- numpy/typing/tests/data/fail/flatiter.pyi | 2 + numpy/typing/tests/data/pass/flatiter.py | 1 - 12 files changed, 624 insertions(+), 410 deletions(-) create mode 100644 doc/release/upcoming_changes/28590.improvement.rst diff --git a/benchmarks/benchmarks/bench_indexing.py b/benchmarks/benchmarks/bench_indexing.py index 6ac124cac88d..81c812e81e19 100644 --- a/benchmarks/benchmarks/bench_indexing.py +++ b/benchmarks/benchmarks/bench_indexing.py @@ -134,6 +134,7 @@ def setup(self): self.m_half = np.copy(self.m_all) self.m_half[::2] = False self.m_none = np.repeat(False, 200 * 50000) + self.m_index_2d = np.arange(200 * 50000).reshape((100, 100000)) def time_flat_bool_index_none(self): self.a.flat[self.m_none] @@ -143,3 +144,21 @@ def time_flat_bool_index_half(self): def time_flat_bool_index_all(self): self.a.flat[self.m_all] + + def time_flat_fancy_index_2d(self): + self.a.flat[self.m_index_2d] + + def time_flat_empty_tuple_index(self): + self.a.flat[()] + + def time_flat_ellipsis_index(self): + self.a.flat[...] + + def time_flat_bool_index_0d(self): + self.a.flat[True] + + def time_flat_int_index(self): + self.a.flat[1_000_000] + + def time_flat_slice_index(self): + self.a.flat[1_000_000:2_000_000] diff --git a/doc/release/upcoming_changes/28590.improvement.rst b/doc/release/upcoming_changes/28590.improvement.rst new file mode 100644 index 000000000000..35f5cb3c2ad2 --- /dev/null +++ b/doc/release/upcoming_changes/28590.improvement.rst @@ -0,0 +1,33 @@ +Fix ``flatiter`` indexing edge cases +------------------------------------ + +The ``flatiter`` object now shares the same index preparation logic as +``ndarray``, ensuring consistent behavior and fixing several issues where +invalid indices were previously accepted or misinterpreted. + +Key fixes and improvements: + +* Stricter index validation + + - Boolean non-array indices like ``arr.flat[[True, True]]`` were + incorrectly treated as ``arr.flat[np.array([1, 1], dtype=int)]``. + They now raise an index error. Note that indices that match the + iterator's shape are expected to not raise in the future and be + handled as regular boolean indices. Use ``np.asarray()`` if + you want to match that behavior. + - Float non-array indices were also cast to integer and incorrectly + treated as ``arr.flat[np.array([1.0, 1.0], dtype=int)]``. This is now + deprecated and will be removed in a future version. + - 0-dimensional boolean indices like ``arr.flat[True]`` are also + deprecated and will be removed in a future version. + +* Consistent error types: + + Certain invalid `flatiter` indices that previously raised `ValueError` + now correctly raise `IndexError`, aligning with `ndarray` behavior. + +* Improved error messages: + + The error message for unsupported index operations now provides more + specific details, including explicitly listing the valid index types, + instead of the generic ``IndexError: unsupported index operation``. diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 7557662ad56c..704fd4738589 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -29,78 +29,6 @@ #define ELLIPSIS_INDEX -2 #define SINGLE_INDEX -3 -/* - * Tries to convert 'o' into an npy_intp interpreted as an - * index. Returns 1 if it was successful, 0 otherwise. Does - * not set an exception. - */ -static int -coerce_index(PyObject *o, npy_intp *v) -{ - *v = PyArray_PyIntAsIntp(o); - - if ((*v) == -1 && PyErr_Occurred()) { - PyErr_Clear(); - return 0; - } - return 1; -} - -/* - * This function converts one element of the indexing tuple - * into a step size and a number of steps, returning the - * starting index. Non-slices are signalled in 'n_steps', - * as NEWAXIS_INDEX, ELLIPSIS_INDEX, or SINGLE_INDEX. - */ -NPY_NO_EXPORT npy_intp -parse_index_entry(PyObject *op, npy_intp *step_size, - npy_intp *n_steps, npy_intp max, - int axis, int check_index) -{ - npy_intp i; - - if (op == Py_None) { - *n_steps = NEWAXIS_INDEX; - i = 0; - } - else if (op == Py_Ellipsis) { - *n_steps = ELLIPSIS_INDEX; - i = 0; - } - else if (PySlice_Check(op)) { - npy_intp stop; - if (PySlice_GetIndicesEx(op, max, &i, &stop, step_size, n_steps) < 0) { - goto fail; - } - if (*n_steps <= 0) { - *n_steps = 0; - *step_size = 1; - i = 0; - } - } - else if (coerce_index(op, &i)) { - *n_steps = SINGLE_INDEX; - *step_size = 0; - if (check_index) { - if (check_and_adjust_index(&i, max, axis, NULL) < 0) { - goto fail; - } - } - } - else { - PyErr_SetString(PyExc_IndexError, - "each index entry must be either a " - "slice, an integer, Ellipsis, or " - "newaxis"); - goto fail; - } - return i; - - fail: - return -1; -} - - /*********************** Element-wise Array Iterator ***********************/ /* Aided by Peter J. Verveer's nd_image package and numpy's arraymap ****/ /* and Python's array iterator ***/ @@ -427,7 +355,7 @@ iter_length(PyArrayIterObject *self) } -static PyArrayObject * +static PyObject * iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind, NPY_cast_info *cast_info) { @@ -484,7 +412,7 @@ iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind, } PyArray_ITER_RESET(self); } - return ret; + return (PyObject *) ret; } static PyObject * @@ -562,195 +490,154 @@ iter_subscript_int(PyArrayIterObject *self, PyArrayObject *ind, NPY_NO_EXPORT PyObject * iter_subscript(PyArrayIterObject *self, PyObject *ind) { - PyArray_Descr *indtype = NULL; - PyArray_Descr *dtype; - npy_intp start, step_size; - npy_intp n_steps; - PyArrayObject *ret; - char *dptr; - int size; - PyObject *obj = NULL; - PyObject *new; + PyObject *ret = NULL; + + int index_type; + int index_num = -1; + int ndim, fancy_ndim; + npy_intp start, stop, step, n_steps; + npy_index_info indices[NPY_MAXDIMS * 2 + 1]; + + PyArray_Descr *dtype = PyArray_DESCR(self->ao); + npy_intp dtype_size = dtype->elsize; NPY_cast_info cast_info = {.func = NULL}; - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - obj = iter_subscript(self, ind); - Py_DECREF(ind); - return obj; + /* Prepare the indices */ + index_type = prepare_index_noarray(1, &self->size, ind, indices, &index_num, + &ndim, &fancy_ndim, 1, 1); + + if (index_type < 0) { + return NULL; } - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) { - goto fail; - } - if (len == 0) { - Py_INCREF(self->ao); - return (PyObject *)self->ao; - } - ind = PyTuple_GET_ITEM(ind, 0); + + else if (indices[0].type == HAS_NEWAXIS) { + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); + goto finish; } - /* - * Tuples >1d not accepted --- i.e. no newaxis - * Could implement this with adjusted strides and dimensions in iterator - * Check for Boolean -- this is first because Bool is a subclass of Int - */ - PyArray_ITER_RESET(self); + // Single ellipsis index + else if (index_type == HAS_ELLIPSIS) { + ind = PySlice_New(NULL, NULL, NULL); + if (ind == NULL) { + goto finish; + } - if (PyBool_Check(ind)) { - int istrue = PyObject_IsTrue(ind); - if (istrue == -1) { - goto fail; + ret = iter_subscript(self, ind); + Py_DECREF(ind); + goto finish; + } + + // Single boolean index + else if (indices[0].type == HAS_0D_BOOL) { + /* Deprecated 2025-07, NumPy 2.4 */ + if (DEPRECATE("Indexing flat iterators with a 0-dimensional boolean index is deprecated " + "and may be removed in a future version. (Deprecated NumPy 2.4)") < 0) { + goto finish; } - if (istrue) { - return PyArray_ToScalar(self->dataptr, self->ao); + if (indices[0].value) { + ret = PyArray_ToScalar(self->dataptr, self->ao); + goto finish; } else { /* empty array */ npy_intp ii = 0; - dtype = PyArray_DESCR(self->ao); Py_INCREF(dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self->ao), - dtype, - 1, &ii, - NULL, NULL, 0, - (PyObject *)self->ao); - return (PyObject *)ret; + ret = PyArray_NewFromDescr(Py_TYPE(self->ao), + dtype, + 1, &ii, + NULL, NULL, 0, + (PyObject *)self->ao); + goto finish; } } - dtype = PyArray_DESCR(self->ao); - size = dtype->elsize; + PyArray_ITER_RESET(self); - /* set up a cast to handle item copying */ + if (index_type == HAS_INTEGER) { + if (check_and_adjust_index(&indices[0].value, self->size, -1, NULL) < 0) { + goto finish; + } + PyArray_ITER_GOTO1D(self, indices[0].value); + ret = PyArray_ToScalar(self->dataptr, self->ao); + PyArray_ITER_RESET(self); + goto finish; + } + /* set up a cast to handle item copying */ NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; npy_intp one = 1; + /* We can assume the newly allocated output array is aligned */ int is_aligned = IsUintAligned(self->ao); if (PyArray_GetDTypeTransferFunction( - is_aligned, size, size, dtype, dtype, 0, &cast_info, + is_aligned, dtype_size, dtype_size, dtype, dtype, 0, &cast_info, &transfer_flags) < 0) { - goto fail; + goto finish; } - /* Check for Integer or Slice */ - if (PyLong_Check(ind) || PySlice_Check(ind)) { - start = parse_index_entry(ind, &step_size, &n_steps, - self->size, 0, 1); - if (start == -1) { - goto fail; - } - if (n_steps == ELLIPSIS_INDEX || n_steps == NEWAXIS_INDEX) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); - goto fail; + if (index_type == HAS_SLICE) { + if (PySlice_GetIndicesEx(indices[0].object, + self->size, + &start, &stop, &step, &n_steps) < 0) { + goto finish; } + PyArray_ITER_GOTO1D(self, start); - if (n_steps == SINGLE_INDEX) { /* Integer */ - PyObject *tmp; - tmp = PyArray_ToScalar(self->dataptr, self->ao); - PyArray_ITER_RESET(self); - NPY_cast_info_xfree(&cast_info); - return tmp; - } Py_INCREF(dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self->ao), + ret = PyArray_NewFromDescr(Py_TYPE(self->ao), dtype, 1, &n_steps, NULL, NULL, 0, (PyObject *)self->ao); if (ret == NULL) { - goto fail; + goto finish; } - dptr = PyArray_DATA(ret); + + char *dptr = PyArray_DATA((PyArrayObject *) ret); while (n_steps--) { char *args[2] = {self->dataptr, dptr}; - npy_intp transfer_strides[2] = {size, size}; + npy_intp transfer_strides[2] = {dtype_size, dtype_size}; if (cast_info.func(&cast_info.context, args, &one, transfer_strides, cast_info.auxdata) < 0) { - goto fail; + goto finish; } - start += step_size; + start += step; PyArray_ITER_GOTO1D(self, start); - dptr += size; + dptr += dtype_size; } PyArray_ITER_RESET(self); - NPY_cast_info_xfree(&cast_info); - return (PyObject *)ret; + goto finish; } - /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(NPY_INTP); - if (PyArray_IsScalar(ind, Integer) || PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); - if (obj == NULL) { - goto fail; - } - } - else { - Py_INCREF(ind); - obj = ind; + if (index_type == HAS_BOOL) { + ret = iter_subscript_Bool(self, (PyArrayObject *) indices[0].object, &cast_info); + goto finish; } - if (!PyArray_Check(obj)) { - PyArrayObject *tmp_arr = (PyArrayObject *) PyArray_FROM_O(obj); - if (tmp_arr == NULL) { - goto fail; - } - - if (PyArray_SIZE(tmp_arr) == 0) { - PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); - Py_SETREF(obj, PyArray_FromArray(tmp_arr, indtype, NPY_ARRAY_FORCECAST)); - Py_DECREF(tmp_arr); - if (obj == NULL) { - goto fail; - } - } - else { - Py_SETREF(obj, (PyObject *) tmp_arr); + if (index_type == HAS_FANCY) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + PyArrayObject *cast_array = (PyArrayObject *) + PyArray_FromArray((PyArrayObject *) indices[0].object, indtype, NPY_ARRAY_FORCECAST); + if (cast_array == NULL) { + goto finish; } - } - - /* Check for Boolean array */ - if (PyArray_TYPE((PyArrayObject *)obj) == NPY_BOOL) { - ret = iter_subscript_Bool(self, (PyArrayObject *)obj, &cast_info); + ret = iter_subscript_int(self, cast_array, &cast_info); + Py_DECREF(cast_array); goto finish; } - /* Only integer arrays left */ - if (!PyArray_ISINTEGER((PyArrayObject *)obj)) { - goto fail; - } - - Py_INCREF(indtype); - new = PyArray_FromAny(obj, indtype, 0, 0, - NPY_ARRAY_FORCECAST | NPY_ARRAY_ALIGNED, NULL); - if (new == NULL) { - goto fail; - } - ret = (PyArrayObject *)iter_subscript_int(self, (PyArrayObject *)new, - &cast_info); - Py_DECREF(new); - - finish: - Py_DECREF(indtype); - Py_DECREF(obj); + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); +finish: NPY_cast_info_xfree(&cast_info); - return (PyObject *)ret; - - fail: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); + for (int i = 0; i < index_num; i++) { + Py_XDECREF(indices[i].object); } - Py_XDECREF(indtype); - Py_XDECREF(obj); - NPY_cast_info_xfree(&cast_info); - - return NULL; - + return ret; } @@ -858,140 +745,132 @@ iter_ass_sub_int(PyArrayIterObject *self, PyArrayObject *ind, NPY_NO_EXPORT int iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) { - PyArrayObject *arrval = NULL; - PyArrayIterObject *val_it = NULL; - PyArray_Descr *type; - PyArray_Descr *indtype = NULL; - int retval = -1; - npy_intp start, step_size; - npy_intp n_steps; - PyObject *obj = NULL; - NPY_cast_info cast_info = {.func = NULL}; - if (val == NULL) { PyErr_SetString(PyExc_TypeError, "Cannot delete iterator elements"); return -1; } - if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) + if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) { return -1; + } - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - retval = iter_ass_subscript(self, ind, val); - Py_DECREF(ind); - return retval; + int ret = -1; + + int index_type; + int index_num = -1; + int ndim, fancy_ndim; + npy_intp start, stop, step, n_steps; + npy_index_info indices[NPY_MAXDIMS * 2 + 1]; + + PyArray_Descr *dtype = PyArray_DESCR(self->ao); + npy_intp dtype_size = dtype->elsize; + NPY_cast_info cast_info = {.func = NULL}; + + /* Prepare the indices */ + index_type = prepare_index_noarray(1, &self->size, ind, indices, &index_num, + &ndim, &fancy_ndim, 1, 1); + + if (index_type < 0) { + goto finish; + } + else if (indices[0].type == HAS_NEWAXIS) { + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); + goto finish; } - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) { + // Single ellipsis index + else if (index_type == HAS_ELLIPSIS) { + if (PyTuple_Check(ind)) { + PyErr_SetString(PyExc_IndexError, "Assigning to a flat iterator with a 0-D index is not supported"); + goto finish; + } + + ind = PySlice_New(NULL, NULL, NULL); + if (ind == NULL) { goto finish; } - ind = PyTuple_GET_ITEM(ind, 0); - } - type = PyArray_DESCR(self->ao); + ret = iter_ass_subscript(self, ind, val); + Py_DECREF(ind); + goto finish; + } - /* - * Check for Boolean -- this is first because - * Bool is a subclass of Int - */ - - if (PyBool_Check(ind)) { - retval = 0; - int istrue = PyObject_IsTrue(ind); - if (istrue == -1) { - return -1; + // Single boolean index + else if (indices[0].type == HAS_0D_BOOL) { + /* Deprecated 2025-07, NumPy 2.4 */ + if (DEPRECATE("Indexing flat iterators with a 0-dimensional boolean index is deprecated " + "and may be removed in a future version. (Deprecated NumPy 2.4)") < 0) { + goto finish; } - if (istrue) { - retval = PyArray_Pack( - PyArray_DESCR(self->ao), self->dataptr, val); + ret = 0; + if (indices[0].value) { + ret = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); } goto finish; } - if (PySequence_Check(ind) || PySlice_Check(ind)) { - goto skip; - } - start = PyArray_PyIntAsIntp(ind); - if (error_converting(start)) { - PyErr_Clear(); - } - else { - if (check_and_adjust_index(&start, self->size, -1, NULL) < 0) { + PyArray_ITER_RESET(self); + + if (index_type == HAS_INTEGER) { + if (check_and_adjust_index(&indices[0].value, self->size, -1, NULL) < 0) { goto finish; } - PyArray_ITER_GOTO1D(self, start); - retval = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); + PyArray_ITER_GOTO1D(self, indices[0].value); + ret = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); PyArray_ITER_RESET(self); - if (retval < 0) { + if (ret < 0) { PyErr_SetString(PyExc_ValueError, "Error setting single item of array."); } goto finish; } - skip: - Py_INCREF(type); - arrval = (PyArrayObject *)PyArray_FromAny(val, type, 0, 0, + Py_INCREF(dtype); + PyArrayObject *arrval = (PyArrayObject *)PyArray_FromAny(val, dtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); if (arrval == NULL) { - return -1; + goto finish; } - val_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arrval); + PyArrayIterObject *val_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arrval); if (val_it == NULL) { goto finish; } if (val_it->size == 0) { - retval = 0; + ret = 0; goto finish; } /* set up cast to handle single-element copies into arrval */ NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; npy_intp one = 1; - int itemsize = type->elsize; /* We can assume the newly allocated array is aligned */ int is_aligned = IsUintAligned(self->ao); if (PyArray_GetDTypeTransferFunction( - is_aligned, itemsize, itemsize, type, type, 0, + is_aligned, dtype_size, dtype_size, dtype, dtype, 0, &cast_info, &transfer_flags) < 0) { goto finish; } - /* Check Slice */ - if (PySlice_Check(ind)) { - start = parse_index_entry(ind, &step_size, &n_steps, self->size, 0, 0); - if (start == -1) { - goto finish; - } - if (n_steps == ELLIPSIS_INDEX || n_steps == NEWAXIS_INDEX) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); + if (index_type == HAS_SLICE) { + if (PySlice_GetIndicesEx(indices[0].object, + self->size, + &start, &stop, &step, &n_steps) < 0) { goto finish; } + PyArray_ITER_GOTO1D(self, start); - npy_intp transfer_strides[2] = {itemsize, itemsize}; - if (n_steps == SINGLE_INDEX) { - char *args[2] = {PyArray_DATA(arrval), self->dataptr}; - if (cast_info.func(&cast_info.context, args, &one, - transfer_strides, cast_info.auxdata) < 0) { - goto finish; - } - PyArray_ITER_RESET(self); - retval = 0; - goto finish; - } + npy_intp transfer_strides[2] = {dtype_size, dtype_size}; while (n_steps--) { char *args[2] = {val_it->dataptr, self->dataptr}; if (cast_info.func(&cast_info.context, args, &one, transfer_strides, cast_info.auxdata) < 0) { goto finish; } - start += step_size; + start += step; PyArray_ITER_GOTO1D(self, start); PyArray_ITER_NEXT(val_it); if (val_it->index == val_it->size) { @@ -999,60 +878,37 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) } } PyArray_ITER_RESET(self); - retval = 0; + ret = 0; goto finish; } - /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(NPY_INTP); - if (PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); - } - else { - Py_INCREF(ind); - obj = ind; + if (index_type == HAS_BOOL) { + ret = iter_ass_sub_Bool(self, (PyArrayObject *) indices[0].object, val_it, &cast_info); + goto finish; } - if (obj != NULL && PyArray_Check(obj)) { - /* Check for Boolean object */ - if (PyArray_TYPE((PyArrayObject *)obj)==NPY_BOOL) { - if (iter_ass_sub_Bool(self, (PyArrayObject *)obj, - val_it, &cast_info) < 0) { - goto finish; - } - retval=0; - } - /* Check for integer array */ - else if (PyArray_ISINTEGER((PyArrayObject *)obj)) { - PyObject *new; - Py_INCREF(indtype); - new = PyArray_CheckFromAny(obj, indtype, 0, 0, - NPY_ARRAY_FORCECAST | NPY_ARRAY_BEHAVED_NS, NULL); - Py_DECREF(obj); - obj = new; - if (new == NULL) { - goto finish; - } - if (iter_ass_sub_int(self, (PyArrayObject *)obj, - val_it, &cast_info) < 0) { - goto finish; - } - retval = 0; + if (index_type == HAS_FANCY) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + PyArrayObject *cast_array = (PyArrayObject *) + PyArray_FromArray((PyArrayObject *) indices[0].object, indtype, NPY_ARRAY_FORCECAST); + if (cast_array == NULL) { + goto finish; } + ret = iter_ass_sub_int(self, cast_array, val_it, &cast_info); + Py_DECREF(cast_array); + goto finish; } - finish: - if (!PyErr_Occurred() && retval < 0) { - PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); - } - Py_XDECREF(indtype); - Py_XDECREF(obj); - Py_XDECREF(val_it); - Py_XDECREF(arrval); + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); +finish: NPY_cast_info_xfree(&cast_info); - return retval; - + for (int i = 0; i < index_num; i++) { + Py_XDECREF(indices[i].object); + } + return ret; } diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 12f25534f1d0..28d6a5a26938 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -33,23 +33,6 @@ #include "umathmodule.h" -#define HAS_INTEGER 1 -#define HAS_NEWAXIS 2 -#define HAS_SLICE 4 -#define HAS_ELLIPSIS 8 -/* HAS_FANCY can be mixed with HAS_0D_BOOL, be careful when to use & or == */ -#define HAS_FANCY 16 -#define HAS_BOOL 32 -/* NOTE: Only set if it is neither fancy nor purely integer index! */ -#define HAS_SCALAR_ARRAY 64 -/* - * Indicate that this is a fancy index that comes from a 0d boolean. - * This means that the index does not operate along a real axis. The - * corresponding index type is just HAS_FANCY. - */ -#define HAS_0D_BOOL (HAS_FANCY | 128) - - static int _nonzero_indices(PyObject *myBool, PyArrayObject **arrays); @@ -263,20 +246,22 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n) * * Checks everything but the bounds. * - * @param self the array being indexed + * @param array_ndims The number of dimensions of the array being indexed (1 for iterators) + * @param array_dims The dimensions of the array being indexed (self->size for iterators) * @param index the index object * @param indices index info struct being filled (size of NPY_MAXDIMS * 2 + 1) * @param num number of indices found * @param ndim dimension of the indexing result * @param out_fancy_ndim dimension of the fancy/advanced indices part * @param allow_boolean whether to allow the boolean special case + * @param is_flatiter_object Whether the object indexed is an iterator * * @returns the index_type or -1 on failure and fills the number of indices. */ NPY_NO_EXPORT int -prepare_index(PyArrayObject *self, PyObject *index, - npy_index_info *indices, - int *num, int *ndim, int *out_fancy_ndim, int allow_boolean) +prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, + npy_index_info *indices, int *num, int *ndim, int *out_fancy_ndim, + int allow_boolean, int is_flatiter_object) { int new_ndim, fancy_ndim, used_ndim, index_ndim; int curr_idx, get_idx; @@ -314,8 +299,8 @@ prepare_index(PyArrayObject *self, PyObject *index, while (get_idx < index_ndim) { if (curr_idx > NPY_MAXDIMS * 2) { - PyErr_SetString(PyExc_IndexError, - "too many indices for array"); + PyErr_Format(PyExc_IndexError, + "too many indices for %s", is_flatiter_object ? "flat iterator" : "array"); goto failed_building_indices; } @@ -377,7 +362,7 @@ prepare_index(PyArrayObject *self, PyObject *index, * Since this is always an error if it was not a boolean, we can * allow the 0-d special case before the rest. */ - else if (PyArray_NDIM(self) != 0) { + else if (array_ndims != 0) { /* * Single integer index, there are two cases here. * It could be an array, a 0-d array is handled @@ -418,17 +403,55 @@ prepare_index(PyArrayObject *self, PyObject *index, goto failed_building_indices; } + // We raise here because we changed the behavior for boolean + // indices for flat iterators from being handled as integers + // to being regular boolean indices. + // TODO: This should go away fairly soon and lists of booleans + // should be handled as regular boolean indices. + if (is_flatiter_object && PyArray_ISBOOL(tmp_arr) && !PyBool_Check(index)) { + Py_DECREF(tmp_arr); + PyErr_Format(PyExc_IndexError, + "boolean indices for iterators are not supported because " + "of previous behavior that was confusing (valid boolean " + "indices are expected to work in the future)" + ); + goto failed_building_indices; + } + /* * For example an empty list can be cast to an integer array, * however it will default to a float one. */ - if (PyArray_SIZE(tmp_arr) == 0) { - PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + if (PyArray_SIZE(tmp_arr) == 0 + || (is_flatiter_object && !PyArray_ISINTEGER(tmp_arr) && !PyArray_ISBOOL(tmp_arr))) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); arr = (PyArrayObject *)PyArray_FromArray(tmp_arr, indtype, NPY_ARRAY_FORCECAST); + + // If the cast succeeded (which means that the previous flat iterator + // indexing routine would have succeeded as well), we need to issue a + // deprecation warning. + if (arr + && is_flatiter_object + && PyArray_SIZE(tmp_arr) != 0 + && !PyArray_ISINTEGER(tmp_arr) + && !PyArray_ISBOOL(tmp_arr) + && DEPRECATE("Invalid non-array indices for iterator objects are deprecated and will be " + "removed in a future version. (Deprecated NumPy 2.4)") < 0) { + Py_DECREF(tmp_arr); + goto failed_building_indices; + } Py_DECREF(tmp_arr); if (arr == NULL) { + // Raise a helpful error if this was a ValueError (i.e. could not cast) + if (PyErr_ExceptionMatches(PyExc_ValueError)) { + PyErr_Format(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`)%s and integer or boolean " + "arrays are valid indices", + is_flatiter_object ? "" : ", numpy.newaxis (`None`)" + ); + } goto failed_building_indices; } } @@ -458,9 +481,9 @@ prepare_index(PyArrayObject *self, PyObject *index, * this is always an error. The check ensures that these errors are raised * and match those of the generic path. */ - if ((PyArray_NDIM(arr) == PyArray_NDIM(self)) + if ((PyArray_NDIM(arr) == array_ndims) && PyArray_CompareLists(PyArray_DIMS(arr), - PyArray_DIMS(self), + array_dims, PyArray_NDIM(arr))) { index_type = HAS_BOOL; @@ -468,8 +491,8 @@ prepare_index(PyArrayObject *self, PyObject *index, indices[curr_idx].object = (PyObject *)arr; /* keep track anyway, just to be complete */ - used_ndim = PyArray_NDIM(self); - fancy_ndim = PyArray_NDIM(self); + used_ndim = array_ndims; + fancy_ndim = array_ndims; curr_idx += 1; break; } @@ -524,8 +547,8 @@ prepare_index(PyArrayObject *self, PyObject *index, /* Check that we will not run out of indices to store new ones */ if (curr_idx + n >= NPY_MAXDIMS * 2) { - PyErr_SetString(PyExc_IndexError, - "too many indices for array"); + PyErr_Format(PyExc_IndexError, + "too many indices for %s", is_flatiter_object ? "flat iterator" : "array"); for (i=0; i < n; i++) { Py_DECREF(nonzero_result[i]); } @@ -603,10 +626,11 @@ prepare_index(PyArrayObject *self, PyObject *index, } else { /* The input was not an array, so give a general error message */ - PyErr_SetString(PyExc_IndexError, - "only integers, slices (`:`), ellipsis (`...`), " - "numpy.newaxis (`None`) and integer or boolean " - "arrays are valid indices"); + PyErr_Format(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`)%s and integer or boolean " + "arrays are valid indices", + is_flatiter_object ? "" : ", numpy.newaxis (`None`)" + ); } Py_DECREF(arr); goto failed_building_indices; @@ -616,10 +640,10 @@ prepare_index(PyArrayObject *self, PyObject *index, * Compare dimension of the index to the real ndim. this is * to find the ellipsis value or append an ellipsis if necessary. */ - if (used_ndim < PyArray_NDIM(self)) { + if (used_ndim < array_ndims) { if (index_type & HAS_ELLIPSIS) { - indices[ellipsis_pos].value = PyArray_NDIM(self) - used_ndim; - used_ndim = PyArray_NDIM(self); + indices[ellipsis_pos].value = array_ndims - used_ndim; + used_ndim = array_ndims; new_ndim += indices[ellipsis_pos].value; } else { @@ -630,19 +654,21 @@ prepare_index(PyArrayObject *self, PyObject *index, index_type |= HAS_ELLIPSIS; indices[curr_idx].object = NULL; indices[curr_idx].type = HAS_ELLIPSIS; - indices[curr_idx].value = PyArray_NDIM(self) - used_ndim; + indices[curr_idx].value = array_ndims - used_ndim; ellipsis_pos = curr_idx; - used_ndim = PyArray_NDIM(self); + used_ndim = array_ndims; new_ndim += indices[curr_idx].value; curr_idx += 1; } } - else if (used_ndim > PyArray_NDIM(self)) { + else if (used_ndim > array_ndims) { PyErr_Format(PyExc_IndexError, - "too many indices for array: " - "array is %d-dimensional, but %d were indexed", - PyArray_NDIM(self), + "too many indices for %s: " + "%s is %d-dimensional, but %d were indexed", + is_flatiter_object ? "flat iterator" : "array", + is_flatiter_object ? "flat iterator" : "array", + array_ndims, used_ndim); goto failed_building_indices; } @@ -697,14 +723,15 @@ prepare_index(PyArrayObject *self, PyObject *index, used_ndim = 0; for (i = 0; i < curr_idx; i++) { if ((indices[i].type == HAS_FANCY) && indices[i].value > 0) { - if (indices[i].value != PyArray_DIM(self, used_ndim)) { + if (indices[i].value != array_dims[used_ndim]) { char err_msg[174]; PyOS_snprintf(err_msg, sizeof(err_msg), - "boolean index did not match indexed array along " + "boolean index did not match indexed %s along " "axis %d; size of axis is %" NPY_INTP_FMT " but size of corresponding boolean axis is %" NPY_INTP_FMT, - used_ndim, PyArray_DIM(self, used_ndim), + is_flatiter_object ? "flat iterator" : "array", + used_ndim, array_dims[used_ndim], indices[i].value); PyErr_SetString(PyExc_IndexError, err_msg); goto failed_building_indices; @@ -740,6 +767,16 @@ prepare_index(PyArrayObject *self, PyObject *index, return -1; } +NPY_NO_EXPORT int +prepare_index(PyArrayObject *self, PyObject *index, + npy_index_info *indices, + int *num, int *ndim, int *out_fancy_ndim, int allow_boolean) +{ + return prepare_index_noarray(PyArray_NDIM(self), PyArray_DIMS(self), + index, indices, num, ndim, out_fancy_ndim, + allow_boolean, 0); +} + /** * Check if self has memory overlap with one of the index arrays, or with extra_op. diff --git a/numpy/_core/src/multiarray/mapping.h b/numpy/_core/src/multiarray/mapping.h index 528cb6604892..d4577c78fdbb 100644 --- a/numpy/_core/src/multiarray/mapping.h +++ b/numpy/_core/src/multiarray/mapping.h @@ -3,6 +3,23 @@ extern NPY_NO_EXPORT PyMappingMethods array_as_mapping; +/* Indexing types */ +#define HAS_INTEGER 1 +#define HAS_NEWAXIS 2 +#define HAS_SLICE 4 +#define HAS_ELLIPSIS 8 +/* HAS_FANCY can be mixed with HAS_0D_BOOL, be careful when to use & or == */ +#define HAS_FANCY 16 +#define HAS_BOOL 32 +/* NOTE: Only set if it is neither fancy nor purely integer index! */ +#define HAS_SCALAR_ARRAY 64 +/* + * Indicate that this is a fancy index that comes from a 0d boolean. + * This means that the index does not operate along a real axis. The + * corresponding index type is just HAS_FANCY. + */ +#define HAS_0D_BOOL (HAS_FANCY | 128) + /* * Object to store information needed for advanced (also fancy) indexing. @@ -113,6 +130,11 @@ typedef struct { } npy_index_info; +NPY_NO_EXPORT int +prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, + npy_index_info *indices, int *num, int *ndim, int *out_fancy_ndim, + int allow_boolean, int is_flatiter_object); + NPY_NO_EXPORT Py_ssize_t array_length(PyArrayObject *self); diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index d4f2e9984266..75f092a51808 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -480,3 +480,39 @@ def test_deprecated(self): def test_not_deprecated(self, align): # if the user passes a bool, it is accepted. self.assert_not_deprecated(lambda: np.dtype("f8", align=align)) + + +class TestFlatiterIndexing0dBoolIndex(_DeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + message = r"Indexing flat iterators with a 0-dimensional boolean index" + + def test_0d_boolean_index_deprecated(self): + arr = np.arange(3) + # 0d boolean indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[True]) + + def test_0d_boolean_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[True] = 10 + + self.assert_deprecated(assign_to_index) + + +class TestFlatiterIndexingFloatIndex(_DeprecationTestCase): + # Deprecated in NumPy 2.4, 2025-07 + message = r"Invalid non-array indices for iterator objects" + + def test_float_index_deprecated(self): + arr = np.arange(3) + # float indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[[1.]]) + + def test_float_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[[1.]] = 10 + + self.assert_deprecated(assign_to_index) diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 2a8a669d0787..c65468ebd24a 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -614,22 +614,6 @@ def test_nontuple_ndindex(self): assert_equal(a[[0, 1], [0, 1]], np.array([0, 6])) assert_raises(IndexError, a.__getitem__, [slice(None)]) - def test_flat_index_on_flatiter(self): - a = np.arange(9).reshape((3, 3)) - b = np.array([0, 5, 6]) - assert_equal(a.flat[b.flat], np.array([0, 5, 6])) - - def test_empty_string_flat_index_on_flatiter(self): - a = np.arange(9).reshape((3, 3)) - b = np.array([], dtype="S") - assert_equal(a.flat[b.flat], np.array([])) - - def test_nonempty_string_flat_index_on_flatiter(self): - a = np.arange(9).reshape((3, 3)) - b = np.array(["a"], dtype="S") - with pytest.raises(IndexError, match="unsupported iterator index"): - a.flat[b.flat] - class TestFieldIndexing: def test_scalar_return_type(self): @@ -1453,3 +1437,232 @@ def test_setitem(self): a = a.reshape(5, 2) assign(a, 4, 10) assert_array_equal(a[-1], [10, 10]) + + +class TestFlatiterIndexing: + def test_flatiter_indexing_single_integer(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[0], 0) + assert_array_equal(a.flat[4], 4) + assert_array_equal(a.flat[-1], 8) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] + + def test_flatiter_indexing_slice(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[:], np.arange(9)) + assert_array_equal(a.flat[:5], np.arange(5)) + assert_array_equal(a.flat[5:10], np.arange(5, 9)) + assert_array_equal(a.flat[::2], np.arange(0, 9, 2)) + assert_array_equal(a.flat[::-1], np.arange(8, -1, -1)) + assert_array_equal(a.flat[10:5], np.array([])) + + assert_array_equal(a.flat[()], np.arange(9)) + assert_array_equal(a.flat[...], np.arange(9)) + + def test_flatiter_indexing_boolean(self): + a = np.arange(9).reshape((3, 3)) + + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[True], 0) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[False], np.array([])) + + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + assert_array_equal(a.flat[mask], np.arange(0, 9, 2)) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] + + def test_flatiter_indexing_fancy(self): + a = np.arange(9).reshape((3, 3)) + + indices = np.array([1, 3, 5]) + assert_array_equal(a.flat[indices], indices) + + assert_array_equal(a.flat[[-1, -2]], np.array([8, 7])) + + indices_2d = np.array([[1, 2], [3, 4]]) + assert_array_equal(a.flat[indices_2d], indices_2d) + + assert_array_equal(a.flat[[True, 1]], np.array([1, 1])) + + assert_array_equal(a.flat[[]], np.array([], dtype=a.dtype)) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] + assert_array_equal(a.flat[np.asarray([True, False, True])], np.array([0, 2])) + + def test_flatiter_indexing_not_supported_newaxis_mutlidimensional_float(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] + + with pytest.raises(IndexError, + match=r"too many indices for flat iterator: flat iterator " + r"is 1-dimensional, but 2 were indexed"): + a.flat[1, 2] + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + assert_array_equal(a.flat[[1.0, 2.0]], np.array([1, 2])) + + def test_flatiter_assign_single_integer(self): + a = np.arange(9).reshape((3, 3)) + + a.flat[0] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a.flat[4] = 20 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 8]])) + + a.flat[-1] = 30 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 30]])) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] = 40 + + def test_flatiter_indexing_slice_assign(self): + a = np.arange(9).reshape((3, 3)) + a.flat[:] = 10 + assert_array_equal(a, np.full((3, 3), 10)) + + a = np.arange(9).reshape((3, 3)) + a.flat[:5] = 20 + assert_array_equal(a, np.array([[20, 20, 20], [20, 20, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[5:10] = 30 + assert_array_equal(a, np.array([[0, 1, 2], [3, 4, 30], [30, 30, 30]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::2] = 40 + assert_array_equal(a, np.array([[40, 1, 40], [3, 40, 5], [40, 7, 40]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::-1] = 50 + assert_array_equal(a, np.full((3, 3), 50)) + + a = np.arange(9).reshape((3, 3)) + a.flat[10:5] = 60 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match="Assigning to a flat iterator with a 0-D index"): + a.flat[()] = 70 + + a = np.arange(9).reshape((3, 3)) + a.flat[...] = 80 + assert_array_equal(a, np.full((3, 3), 80)) + + def test_flatiter_indexing_boolean_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[True] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[False] = 20 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + a.flat[mask] = 30 + assert_array_equal(a, np.array([[30, 1, 30], [3, 30, 5], [30, 7, 30]])) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] = 40 + + def test_flatiter_indexing_fancy_assign(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5]) + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + a.flat[[-1, -2]] = 20 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 20, 20]])) + + a = np.arange(9).reshape((3, 3)) + indices_2d = np.array([[1, 2], [3, 4]]) + a.flat[indices_2d] = 30 + assert_array_equal(a, np.array([[0, 30, 30], [30, 30, 5], [6, 7, 8]])) + + a.flat[[True, 1]] = 40 + assert_array_equal(a, np.array([[0, 40, 30], [30, 30, 5], [6, 7, 8]])) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] = 50 + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] = 20 + a.flat[np.asarray([True, False, True])] = 20 + assert_array_equal(a, np.array([20, 1, 20])) + + def test_flatiter_indexing_fancy_int16_dtype(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5], dtype=np.int16) + assert_array_equal(a.flat[indices], np.array([1, 3, 5])) + + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + def test_flatiter_indexing_not_supported_newaxis_mutlid_float_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] = 10 + + a.flat[[1, 2]] = 10 + assert_array_equal(a, np.array([[0, 10, 10], [3, 4, 5], [6, 7, 8]])) + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + a.flat[[1.0, 2.0]] = 20 + assert_array_equal(a, np.array([[0, 20, 20], [3, 4, 5], [6, 7, 8]])) + + def test_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([0, 5, 6]) + assert_equal(a.flat[b.flat], np.array([0, 5, 6])) + + def test_empty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([], dtype="S") + # This is arguably incorrect, and should be removed (ideally with + # deprecation). But it matches the array path and comes from not + # distinguishing `arr[np.array([]).flat]` and `arr[[]]` and the latter + # must pass. + assert_equal(a.flat[b.flat], np.array([])) + + def test_nonempty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array(["a"], dtype="S") + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), ellipsis \(`\.\.\.`\) " + r"and integer or boolean arrays are valid indices"): + a.flat[b.flat] diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 2aeb29a1320d..8ad9a26fcc9a 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -851,8 +851,8 @@ def ia(x, s, v): assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float)) # Old special case (different code path): - assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) - assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float)) + assert_raises(IndexError, ia, x.flat, s, np.zeros(9, dtype=float)) + assert_raises(IndexError, ia, x.flat, s, np.zeros(11, dtype=float)) def test_mem_scalar_indexing(self): # Ticket #603 diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index c44d603611ee..8b200cd8daa4 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -1,6 +1,7 @@ import functools import warnings +import numpy as np import numpy._core.numeric as _nx from numpy._core import atleast_3d, overrides, vstack from numpy._core._multiarray_umath import _array_converter @@ -171,15 +172,13 @@ def take_along_axis(arr, indices, axis=-1): if indices.ndim != 1: raise ValueError( 'when axis=None, `indices` must have a single dimension.') - arr = arr.flat - arr_shape = (len(arr),) # flatiter has no .shape + arr = np.array(arr.flat) axis = 0 else: axis = normalize_axis_index(axis, arr.ndim) - arr_shape = arr.shape # use the fancy index - return arr[_make_along_axis_idx(arr_shape, indices, axis)] + return arr[_make_along_axis_idx(arr.shape, indices, axis)] def _put_along_axis_dispatcher(arr, indices, values, axis): @@ -263,15 +262,13 @@ def put_along_axis(arr, indices, values, axis): if indices.ndim != 1: raise ValueError( 'when axis=None, `indices` must have a single dimension.') - arr = arr.flat + arr = np.array(arr.flat) axis = 0 - arr_shape = (len(arr),) # flatiter has no .shape else: axis = normalize_axis_index(axis, arr.ndim) - arr_shape = arr.shape # use the fancy index - arr[_make_along_axis_idx(arr_shape, indices, axis)] = values + arr[_make_along_axis_idx(arr.shape, indices, axis)] = values def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs): diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py index e6df047ee6ca..3f9414ff7d30 100644 --- a/numpy/matrixlib/tests/test_masked_matrix.py +++ b/numpy/matrixlib/tests/test_masked_matrix.py @@ -116,7 +116,7 @@ def test_flat(self): # Test setting test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) testflat = test.flat - testflat[:] = testflat[[2, 1, 0]] + testflat[:] = testflat[np.array([2, 1, 0])] assert_equal(test, control) testflat[0] = 9 # test that matrices keep the correct shape (#4615) diff --git a/numpy/typing/tests/data/fail/flatiter.pyi b/numpy/typing/tests/data/fail/flatiter.pyi index 06e23fed9e3f..cdf2a8b78b7b 100644 --- a/numpy/typing/tests/data/fail/flatiter.pyi +++ b/numpy/typing/tests/data/fail/flatiter.pyi @@ -18,3 +18,5 @@ a.copy(order='C') # type: ignore[call-arg] a[np.bool()] # type: ignore[index] a[Index()] # type: ignore[call-overload] a[supports_array] # type: ignore[index] + +a[[0, 1, 2]] diff --git a/numpy/typing/tests/data/pass/flatiter.py b/numpy/typing/tests/data/pass/flatiter.py index e64e4261b8e7..cc7c6069a89a 100644 --- a/numpy/typing/tests/data/pass/flatiter.py +++ b/numpy/typing/tests/data/pass/flatiter.py @@ -9,7 +9,6 @@ iter(a) next(a) a[0] -a[[0, 1, 2]] a[...] a[:] a.__array__() From c7aae66faf994facdf74f648cd94554f11740a5c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 17:34:04 +0000 Subject: [PATCH 0292/1018] MAINT: Bump github/codeql-action from 3.29.6 to 3.29.8 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.6 to 3.29.8. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/a4e1a019f5e24960714ff6296aee04b736cbc3cf...76621b61decf072c1cee8dd1ce2d2a82d33c17ed) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.8 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 59489ea79e65..cbf79b02142b 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 + uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 + uses: github/codeql-action/autobuild@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 + uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index e2508a09bba7..cf637aa947a0 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v2.1.27 + uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v2.1.27 with: sarif_file: results.sarif From e11fea7c7b0567b9b0e5558d295a1cbca4877be7 Mon Sep 17 00:00:00 2001 From: Zhi Li Date: Fri, 8 Aug 2025 16:35:54 -0400 Subject: [PATCH 0293/1018] DOC: Add 'today' string to datetime64 documentation (#29514) --- doc/source/reference/arrays.datetime.rst | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 8dbff88c918e..2095dc3aea49 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -53,7 +53,8 @@ months ('M'), weeks ('W'), and days ('D'), while the time units are hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and some additional SI-prefix seconds-based units. The `datetime64` data type also accepts the string "NAT", in any combination of lowercase/uppercase -letters, for a "Not A Time" value. +letters, for a "Not A Time" value. The string "today" is also supported and +returns the current UTC date with day precision. .. admonition:: Example @@ -91,6 +92,11 @@ letters, for a "Not A Time" value. >>> np.datetime64('nat') np.datetime64('NaT') + The current date: + + >>> np.datetime64('today') + np.datetime64('2025-08-05') # result will depend on the current date + When creating an array of datetimes from a string, it is still possible to automatically select the unit from the inputs, by using the datetime type with generic units. From 4d7161dc035a2666eb005dbe4256534d8747912e Mon Sep 17 00:00:00 2001 From: Zhi Li Date: Fri, 8 Aug 2025 22:17:44 -0400 Subject: [PATCH 0294/1018] DOC: Add 'now' string to datetime64 documentation [skip azp] [skip actions] [skip cirrus] --- doc/source/reference/arrays.datetime.rst | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 2095dc3aea49..ac402c973fd8 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -53,8 +53,13 @@ months ('M'), weeks ('W'), and days ('D'), while the time units are hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and some additional SI-prefix seconds-based units. The `datetime64` data type also accepts the string "NAT", in any combination of lowercase/uppercase -letters, for a "Not A Time" value. The string "today" is also supported and -returns the current UTC date with day precision. +letters, for a "Not A Time" value. The string "now" is also supported and +returns the current UTC time. By default, it uses second ('s') precision, but +you can specify a different unit (e.g., 'M', 'D', 'h') to truncate the result +to that precision. Units finer than seconds (such as 'ms' or 'ns') are +supported but will show fractional parts as zeros, effectively truncating to +whole seconds. The string "today" is also supported and returns the current UTC +date with day precision. .. admonition:: Example @@ -92,6 +97,17 @@ returns the current UTC date with day precision. >>> np.datetime64('nat') np.datetime64('NaT') + The current time (UTC, default second precision): + + >>> np.datetime64('now') + np.datetime64('2025-08-05T02:22:14') # result will depend on the current time + + >>> np.datetime64('now', 'D') + np.datetime64('2025-08-05') + + >>> np.datetime64('now', 'ms') + np.datetime64('2025-08-05T02:22:14.000') + The current date: >>> np.datetime64('today') From 0f39fd27ff149b25f74fc58d589c92b4fd881226 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Oct 2024 20:43:03 +0100 Subject: [PATCH 0295/1018] BLD: update licensing metadata to use PEP 639 --- LICENSES_bundled.txt | 36 ------------- meson.build | 9 ++-- .../_core/src/multiarray/dragon4_LICENSE.txt | 27 ++++++++++ pyproject.toml | 52 +++++++++++++++++-- tools/wheels/check_license.py | 2 +- tools/wheels/cibw_before_build.sh | 1 - 6 files changed, 78 insertions(+), 49 deletions(-) delete mode 100644 LICENSES_bundled.txt create mode 100644 numpy/_core/src/multiarray/dragon4_LICENSE.txt diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt deleted file mode 100644 index b3d8aa8bed06..000000000000 --- a/LICENSES_bundled.txt +++ /dev/null @@ -1,36 +0,0 @@ -The NumPy repository and source distributions bundle several libraries that are -compatibly licensed. We list these here. - -Name: lapack-lite -Files: numpy/linalg/lapack_lite/* -License: BSD-3-Clause - For details, see numpy/linalg/lapack_lite/LICENSE.txt - -Name: dragon4 -Files: numpy/_core/src/multiarray/dragon4.c -License: MIT - For license text, see numpy/_core/src/multiarray/dragon4.c - -Name: libdivide -Files: numpy/_core/include/numpy/libdivide/* -License: Zlib - For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt - - -Note that the following files are vendored in the repository and sdist but not -installed in built numpy packages: - -Name: Meson -Files: vendored-meson/meson/* -License: Apache 2.0 - For license text, see vendored-meson/meson/COPYING - -Name: spin -Files: .spin/cmds.py -License: BSD-3 - For license text, see .spin/LICENSE - -Name: tempita -Files: numpy/_build_utils/tempita/* -License: MIT - For details, see numpy/_build_utils/tempita/LICENCE.txt diff --git a/meson.build b/meson.build index 0d436352cbbd..2cb7ce987ad5 100644 --- a/meson.build +++ b/meson.build @@ -1,12 +1,9 @@ project( 'NumPy', 'c', 'cpp', 'cython', - version: run_command( - # This should become `numpy/_version.py` in NumPy 2.0 - ['numpy/_build_utils/gitversion.py'], - check: true).stdout().strip(), - license: 'BSD-3', - meson_version: '>=1.5.2', # version in vendored-meson is 1.5.2 + version: run_command(['numpy/_build_utils/gitversion.py'], check: true).stdout().strip(), + license: 'BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0', + meson_version: '>=1.8.3', # version in vendored-meson default_options: [ 'buildtype=debugoptimized', 'b_ndebug=if-release', diff --git a/numpy/_core/src/multiarray/dragon4_LICENSE.txt b/numpy/_core/src/multiarray/dragon4_LICENSE.txt new file mode 100644 index 000000000000..7bd49e7074a8 --- /dev/null +++ b/numpy/_core/src/multiarray/dragon4_LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2014 Ryan Juckett + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + +dragon4.c|h h contains a modified version of Ryan Juckett's Dragon4 +implementation, obtained from https://www.ryanjuckett.com, +which has been ported from C++ to C and which has +modifications specific to printing floats in numpy. + +Ryan Juckett's original code was under the Zlib license; he gave numpy +permission to include it under the MIT license instead. diff --git a/pyproject.toml b/pyproject.toml index f9cfeadee599..b678be83a486 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,16 +1,13 @@ [build-system] build-backend = "mesonpy" requires = [ - "meson-python>=0.15.0", + "meson-python>=0.18.0", "Cython>=3.0.6", # keep in sync with version check in meson.build ] [project] name = "numpy" version = "2.4.0.dev0" -# TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) -license = {file = "LICENSE.txt"} - description = "Fundamental package for array computing in Python" authors = [{name = "Travis E. Oliphant et al."}] maintainers = [ @@ -22,7 +19,6 @@ classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', - 'License :: OSI Approved :: BSD License', 'Programming Language :: C', 'Programming Language :: Python', 'Programming Language :: Python :: 3', @@ -40,6 +36,52 @@ classifiers = [ 'Operating System :: Unix', 'Operating System :: MacOS', ] +# License info: +# - The main NumPy project license is BSD-3-Clause. +# - The SPDX license expression below reflects installed numpy packages when +# built from source (e.g., with `python -m build --wheel`), with no vendoring. +# - That SPDX expression is therefore incomplete for: +# (a) sdists - see the comment below `license-files` for other licenses +# included in the sdist +# (b) wheels on PyPI - most wheels include vendored libraries with additional licenses: +# - libopenblas : BSD-3-Clause AND BSD-3-Clause-Attribution (all except arm64 macOS>=14) +# - libgfortran : GPL-3.0-with-GCC-exception (all except arm64 macOS>=14) +# - libquadmath : LGPL-2.1-or-later (all except arm64 macOS>=14 and Windows) +# The licenses for these vendored components are dynamically included +# in the build process for PyPI wheels. +license = 'BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0' +license-files = [ + 'LICENSE.txt', # BSD-3-Clause + 'numpy/_core/include/numpy/libdivide/LICENSE.txt', # Zlib + 'numpy/_core/src/common/pythoncapi-compat/COPYING', # 0BSD + 'numpy/_core/src/highway/LICENSE-BSD3', # BSD-3-Clause + 'numpy/_core/src/multiarray/dragon4_LICENSE.txt', # MIT + 'numpy/_core/src/npysort/x86-simd-sort/LICENSE.md', # BSD-3-Clause + 'numpy/_core/src/umath/svml/LICENSE', # BSD-3-Clause + 'numpy/fft/pocketfft/LICENSE.md', # BSD-3-Clause + 'numpy/ma/LICENSE', # BSD-3-Clause + 'numpy/linalg/lapack_lite/LICENSE.txt', # BSD-3-Clause + 'numpy/random/LICENSE.md', # BSD-3-Clause + 'numpy/random/src/distributions/LICENSE.md', # BSD-3-Clause AND MIT + 'numpy/random/src/mt19937/LICENSE.md', # BSD-3-Clause AND MIT + 'numpy/random/src/pcg64/LICENSE.md', # MIT + 'numpy/random/src/philox/LICENSE.md', # BSD-3-Clause + 'numpy/random/src/sfc64/LICENSE.md', # MIT + 'numpy/random/src/splitmix64/LICENSE.md', # CC0-1.0 +] +# The license files below apply only to files in the repo and sdist, not to +# installed `numpy` packages or wheels (build/doc tools don't affect the +# license of the installed package). We have to make a choice whether to add +# those to the SPDX expression above since PEP 639 is unclear on the +# differences; we choose to make the SPDX expression reflect *a wheel built +# from the sources*. +# '.spin/LICENSE', # BSD-3-Clause +# 'doc/source/_static/scipy-mathjax/LICENSE', # Apache-2.0 +# 'numpy/_build_utils/tempita/LICENSE.txt', # BSD-3-Clause +# 'vendored-meson/meson/COPYING', # Apache-2.0 +# Note that the commented out license files are still included in the sdist, +# just not in Core Metadata and in the .dist-info directory. + [project.scripts] f2py = 'numpy.f2py.f2py2e:main' diff --git a/tools/wheels/check_license.py b/tools/wheels/check_license.py index 572295b4ca2f..9aa50015d00b 100644 --- a/tools/wheels/check_license.py +++ b/tools/wheels/check_license.py @@ -38,7 +38,7 @@ def main(): distinfo_path = next(iter(sitepkgs.glob("numpy-*.dist-info"))) # Check license text - license_txt = distinfo_path / "LICENSE.txt" + license_txt = distinfo_path / "licenses" / "LICENSE.txt" with open(license_txt, encoding="utf-8") as f: text = f.read() diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index e41e5d37316b..ed2640471fed 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -10,7 +10,6 @@ rm -rf build echo "" >> $PROJECT_DIR/LICENSE.txt echo "----" >> $PROJECT_DIR/LICENSE.txt echo "" >> $PROJECT_DIR/LICENSE.txt -cat $PROJECT_DIR/LICENSES_bundled.txt >> $PROJECT_DIR/LICENSE.txt if [[ $RUNNER_OS == "Linux" ]] ; then cat $PROJECT_DIR/tools/wheels/LICENSE_linux.txt >> $PROJECT_DIR/LICENSE.txt elif [[ $RUNNER_OS == "macOS" ]]; then From 7c34da750a1f46de91ac66d34e5feed62de2d018 Mon Sep 17 00:00:00 2001 From: Maryanne Wachter Date: Mon, 11 Aug 2025 13:03:15 -0700 Subject: [PATCH 0296/1018] BUG: left bit shift undefined behavior (#29539) --- numpy/_core/src/common/simd/avx2/arithmetic.h | 6 +++--- numpy/_core/src/common/simd/sse/arithmetic.h | 6 +++--- tools/ci/ubsan_suppressions_x86_64.txt | 3 --- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/common/simd/avx2/arithmetic.h b/numpy/_core/src/common/simd/avx2/arithmetic.h index 58d842a6d3a4..15b9be85dc51 100644 --- a/numpy/_core/src/common/simd/avx2/arithmetic.h +++ b/numpy/_core/src/common/simd/avx2/arithmetic.h @@ -215,9 +215,9 @@ NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) // q = (a + mulhi) >> sh __m256i q = _mm256_add_epi64(a, mulhi); // emulate arithmetic right shift - const __m256i sigb = npyv_setall_s64(1LL << 63); - q = _mm256_srl_epi64(_mm256_add_epi64(q, sigb), shf1); - q = _mm256_sub_epi64(q, _mm256_srl_epi64(sigb, shf1)); + const __m256i sbit = npyv_setall_s64(0x8000000000000000); + q = _mm256_srl_epi64(_mm256_add_epi64(q, sbit), shf1); + q = _mm256_sub_epi64(q, _mm256_srl_epi64(sbit, shf1)); // q = q - XSIGN(a) // trunc(a/d) = (q ^ dsign) - dsign q = _mm256_sub_epi64(q, asign); diff --git a/numpy/_core/src/common/simd/sse/arithmetic.h b/numpy/_core/src/common/simd/sse/arithmetic.h index 357b136d25cd..b50942ab75ad 100644 --- a/numpy/_core/src/common/simd/sse/arithmetic.h +++ b/numpy/_core/src/common/simd/sse/arithmetic.h @@ -251,9 +251,9 @@ NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) // q = (a + mulhi) >> sh __m128i q = _mm_add_epi64(a, mulhi); // emulate arithmetic right shift - const __m128i sigb = npyv_setall_s64(1LL << 63); - q = _mm_srl_epi64(_mm_add_epi64(q, sigb), divisor.val[1]); - q = _mm_sub_epi64(q, _mm_srl_epi64(sigb, divisor.val[1])); + const __m128i sbit = npyv_setall_s64(0x8000000000000000); + q = _mm_srl_epi64(_mm_add_epi64(q, sbit), divisor.val[1]); + q = _mm_sub_epi64(q, _mm_srl_epi64(sbit, divisor.val[1])); // q = q - XSIGN(a) // trunc(a/d) = (q ^ dsign) - dsign q = _mm_sub_epi64(q, asign); diff --git a/tools/ci/ubsan_suppressions_x86_64.txt b/tools/ci/ubsan_suppressions_x86_64.txt index d9872a691a81..5e4316ce3715 100644 --- a/tools/ci/ubsan_suppressions_x86_64.txt +++ b/tools/ci/ubsan_suppressions_x86_64.txt @@ -11,9 +11,6 @@ signed-integer-overflow:* # otherwise ubsan may detect system file alignment errors outside numpy alignment:* -# suggested fix for runtime error: replace left bit shift with LLONG_MIN constant -shift-base:_core/src/common/simd/sse/arithmetic.h -shift-base:_core/src/common/simd/avx2/arithmetic.h # suggested fix for runtime error: use INT_MIN constant shift-base:_core/src/umath/_rational_tests.c # suggested fix for runtime error: check for overflow if signed From a2f142b6039413dfb43f79aee16dfa3b19c37d1f Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 11 Aug 2025 21:29:06 +0100 Subject: [PATCH 0297/1018] TYP: Type ``MaskedIterator`` (#29526) --- numpy/ma/core.pyi | 85 ++++++++++++++++++++++++--- numpy/typing/tests/data/pass/ma.py | 7 ++- numpy/typing/tests/data/reveal/ma.pyi | 10 +++- 3 files changed, 91 insertions(+), 11 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index e99e0a527773..55e5867247bb 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,15 +1,20 @@ # pyright: reportIncompatibleMethodOverride=false # ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 +import datetime as dt from _typeshed import Incomplete -from collections.abc import Sequence +from collections.abc import Iterator, Sequence from typing import ( Any, + Generic, Literal, Never, NoReturn, Self, + SupportsComplex, + SupportsFloat, SupportsIndex, + SupportsInt, TypeAlias, overload, ) @@ -37,6 +42,7 @@ from numpy import ( dtype, dtypes, expand_dims, + flexible, float16, float32, float64, @@ -82,9 +88,11 @@ from numpy._typing import ( _ArrayLikeString_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _CharLike_co, _DTypeLike, _DTypeLikeBool, _IntLike_co, + _NestedSequence, _ScalarLike_co, _Shape, _ShapeLike, @@ -296,6 +304,12 @@ _MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] _ArrayInt_co: TypeAlias = NDArray[integer | bool_] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co +_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None + MaskType = bool_ nomask: bool_[Literal[False]] @@ -447,15 +461,68 @@ masked_print_option: _MaskedPrintOption def flatten_structured_array(a): ... -class MaskedIterator: - ma: Any +class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): + ma: MaskedArray[_ShapeT_co, _DTypeT_co] dataiter: Any maskiter: Any - def __init__(self, ma): ... - def __iter__(self): ... - def __getitem__(self, indx): ... - def __setitem__(self, index, value): ... - def __next__(self): ... + def __init__(self, ma: MaskedArray[_ShapeT_co, _DTypeT_co]) -> None: ... + def __iter__(self) -> Iterator[Any]: ... + + # Similar to `MaskedArray.__getitem__` but without the `void` case. + @overload + def __getitem__(self, indx: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, indx: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, indx: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # Similar to `ndarray.__setitem__` but without the `void` case. + @overload # flexible | object_ | bool + def __setitem__( + self: MaskedIterator[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + index: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: MaskedIterator[Any, dtype[integer]], + index: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: MaskedIterator[Any, dtype[floating]], + index: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: MaskedIterator[Any, dtype[complexfloating]], + index: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: MaskedIterator[Any, dtype[timedelta64]], + index: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: MaskedIterator[Any, dtype[datetime64]], + index: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # catch-all + def __setitem__(self, index: _ToIndices, value: ArrayLike, /) -> None: ... + + def __next__(self) -> Any: ... class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: Any @@ -607,7 +674,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def baseclass(self) -> type[NDArray[Any]]: ... data: Any @property - def flat(self) -> MaskedIterator: ... + def flat(self) -> MaskedIterator[_ShapeT_co, _DTypeT_co]: ... @flat.setter def flat(self, value: ArrayLike, /) -> None: ... @property diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 2e2246fb88d2..3ccea66861eb 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -42,7 +42,12 @@ MAR_i.fill_value = 0 -MAR_f.flat = [9] +MAR_b.flat[MAR_i > 0] = False +MAR_i.flat[:] = 1 +MAR_f.flat[[0]] = AR_LIKE_f +MAR_c.flat[[0, 0]] = [3, 4 + 3j] +MAR_td64.flat[0] = dt.timedelta(1) +MAR_dt64.flat[0] = dt.datetime(2020, 1, 1) MAR_b[MAR_i > 0] = False MAR_i[:] = 1 diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index a09fb2d75997..bd4e4e98373a 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -462,7 +462,15 @@ assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(axis=0, kind='heap', order=('x', 'y')), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) -assert_type(MAR_f8.flat, np.ma.core.MaskedIterator) +assert_type(MAR_2d_f4.flat, np.ma.core.MaskedIterator[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat.ma, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[0, 0], Any) +assert_type(MAR_2d_f4.flat[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[..., -1], MaskedArray[np.float32]) def invalid_resize() -> None: assert_type(MAR_f8.resize((1, 1)), NoReturn) # type: ignore[arg-type] From dbae155b78e2dd47dbf3697d40732ddc75bac1ea Mon Sep 17 00:00:00 2001 From: MyUserNameWasTakenLinux Date: Mon, 11 Aug 2025 15:36:40 -0600 Subject: [PATCH 0298/1018] STY: fix typo in dtypemeta.c [skip azp][skip actions] --- numpy/_core/src/multiarray/dtypemeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 5ac7ef3b2320..293d7dfee1b3 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -1398,7 +1398,7 @@ PyArray_DTypeMeta *_Void_dtype = NULL; /*NUMPY_API - * Fetch the ArrFuncs struct which new lives on the DType and not the + * Fetch the ArrFuncs struct which now lives on the DType and not the * descriptor. Use of this struct should be avoided but remains necessary * for certain functionality. * From 05dbc23b5245b4f0e6fec7ef2329cffb85d3c62e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 01:44:01 +0000 Subject: [PATCH 0299/1018] MAINT: Bump actions/checkout from 4.2.2 to 5.0.0 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.2 to 5.0.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/11bd71901bbe5b1630ceea73d27597364c9af683...08c6903cd8c0fde910a37f88322edcfb5dd907a8) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 2 +- .github/workflows/compiler_sanitizers.yml | 6 +++--- .github/workflows/cygwin.yml | 2 +- .github/workflows/dependency-review.yml | 2 +- .github/workflows/emscripten.yml | 2 +- .github/workflows/linux.yml | 24 +++++++++++------------ .github/workflows/linux_blas.yml | 18 ++++++++--------- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 4 ++-- .github/workflows/mypy.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 6 +++--- .github/workflows/windows.yml | 6 +++--- 15 files changed, 47 insertions(+), 47 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index cbf79b02142b..267d4c5b768f 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,7 +41,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index dc65449ba752..e91468b85068 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -26,7 +26,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: macos-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -82,7 +82,7 @@ jobs: options: --shm-size=2g # increase memory for large matrix ops steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy @@ -106,7 +106,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 174d04efb567..9d95da102fee 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -18,7 +18,7 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 5036a94ce399..158a826825f1 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false - name: 'Dependency Review' diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 8269a8dcd705..a1d5b399988e 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -43,7 +43,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout NumPy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 7ac5ea673d61..40e372cbf3e4 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -33,7 +33,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-depth: 0 @@ -60,7 +60,7 @@ jobs: matrix: version: ["3.11", "3.12", "3.13", "3.14-dev", "3.14t-dev"] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -75,7 +75,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -94,7 +94,7 @@ jobs: runs-on: ubuntu-24.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -124,7 +124,7 @@ jobs: needs: [smoke_test] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -163,7 +163,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-22.04-arm steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -199,7 +199,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-22.04-arm steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -241,7 +241,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -280,7 +280,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -315,13 +315,13 @@ jobs: if: github.event_name != 'push' steps: - name: Checkout NumPy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Checkout array-api-tests - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: repository: data-apis/array-api-tests ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 @@ -353,7 +353,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 54d217cc12fb..633de1fbb84c 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -65,7 +65,7 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -127,7 +127,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel openblas-devel -y - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -163,7 +163,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel flexiblas-devel -y - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -194,7 +194,7 @@ jobs: runs-on: ubuntu-latest name: "OpenBLAS with CMake" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -223,7 +223,7 @@ jobs: runs-on: ubuntu-latest name: "Debian libblas/liblapack" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -260,7 +260,7 @@ jobs: # If it is needed in the future, use install name `pkgconf-pkg-config` zypper install -y git gcc-c++ python3-pip python3-devel blas cblas lapack - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -285,7 +285,7 @@ jobs: runs-on: ubuntu-latest name: "MKL (LP64, ILP64, SDL)" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -349,7 +349,7 @@ jobs: runs-on: ubuntu-latest name: "BLIS" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -386,7 +386,7 @@ jobs: runs-on: ubuntu-latest name: "ATLAS" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 84e221439aa6..53780ae097a3 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -91,7 +91,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -198,7 +198,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index a9f065e25cc0..dcc689739d20 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -58,7 +58,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -76,7 +76,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -123,7 +123,7 @@ jobs: args: "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none" name: "ARM64 SIMD - ${{ matrix.config.name }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -176,7 +176,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -191,7 +191,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -242,7 +242,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 8931b76c5dee..da0972678b15 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -29,7 +29,7 @@ jobs: python-version: ["3.12"] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -118,7 +118,7 @@ jobs: version: ["3.11", "3.14t-dev"] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 81fa57239b9b..e362a29f628a 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -50,7 +50,7 @@ jobs: - [ubuntu-latest, '3.12'] - [windows-latest, '3.11'] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index bfbf34fa7817..544c568a522d 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -24,7 +24,7 @@ jobs: shard-index: [0] # e.g. change this to [0, 1, 2] and --num-shards below to 3 fail-fast: false steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: path: numpy_to_test fetch-depth: 0 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index cf637aa947a0..50fb7a12f432 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,7 +25,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v3.1.0 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v3.1.0 with: persist-credentials: false diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 4fcd8a58f53a..e87af415d3c5 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -49,7 +49,7 @@ jobs: message: ${{ steps.commit_message.outputs.message }} steps: - name: Checkout numpy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 # Gets the correct commit message for pull request with: ref: ${{ github.event.pull_request.head.sha }} @@ -115,7 +115,7 @@ jobs: IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} steps: - name: Checkout numpy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: true persist-credentials: false @@ -245,7 +245,7 @@ jobs: # IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} steps: - name: Checkout numpy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: true persist-credentials: false diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e723b787a5de..dba4f74496b2 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -99,7 +99,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -173,7 +173,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true From 8276cb4cb3f3031bad510a776b6be8b3ffc1695f Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Tue, 12 Aug 2025 19:18:07 +0900 Subject: [PATCH 0300/1018] fix: File exists error --- tools/ci/check_c_api_usage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/check_c_api_usage.sh b/tools/ci/check_c_api_usage.sh index 9f4203284823..b04410d7667d 100644 --- a/tools/ci/check_c_api_usage.sh +++ b/tools/ci/check_c_api_usage.sh @@ -24,7 +24,7 @@ echo "Scanning $(echo "$ALL_FILES" | wc -l) C/C++ source files..." # Prepare a result file mkdir -p .tmp -OUTPUT=$(mktemp .tmp/c_api_usage_report.XXXXXX.txt) +OUTPUT=$(mktemp .tmp/c_api_usage_report.XXXXXX) echo -e "Running Suspicious C API usage report workflow...\n" > $OUTPUT FAIL=0 From bb198ee93eb15afd80b2706d1b9b483edaf4bf55 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 19:59:18 +0000 Subject: [PATCH 0301/1018] MAINT: Bump github/codeql-action from 3.29.8 to 3.29.9 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.8 to 3.29.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/76621b61decf072c1cee8dd1ce2d2a82d33c17ed...df559355d593797519d70b90fc8edd5db049e7a2) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.9 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index cbf79b02142b..61d9c6ec2248 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 + uses: github/codeql-action/init@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 + uses: github/codeql-action/autobuild@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 + uses: github/codeql-action/analyze@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index cf637aa947a0..50871457563e 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v2.1.27 + uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v2.1.27 with: sarif_file: results.sarif From 7683d859f6e81735d66289bd1a8a3d3b84186d67 Mon Sep 17 00:00:00 2001 From: Leonardo Paredes <49594379+phdparedes@users.noreply.github.com> Date: Wed, 13 Aug 2025 07:25:51 -0700 Subject: [PATCH 0302/1018] BUG: allow `MaskedArray.fill_value` be a string when `dtype=StringDType` (#29423) * BUG: allow ma.MaskedArray.fill_value be a string when dtype=StringDType This fix adds the StringDType data type character code 'T' to the list of accepted data types that can use a 'fill_value' as string. Currently just accepts None. "See #29421" Issue. * TST: add tests for StringDType MaskedArray use * TST: add tests for fill_value in StringDType MaskedArray * TST: fix format in the test functions added * Add default fill value 'N/A' for StringDType * TST: Add tests for StringDType masked array default fill and slicing * DOC: add release note for StringDType fill_value support (gh-29423) --------- Co-authored-by: Nathan Goldbaum --- .../upcoming_changes/29423.new_feature.rst | 7 ++ numpy/ma/core.py | 26 ++++--- numpy/ma/tests/test_core.py | 77 +++++++++++++++++-- 3 files changed, 93 insertions(+), 17 deletions(-) create mode 100644 doc/release/upcoming_changes/29423.new_feature.rst diff --git a/doc/release/upcoming_changes/29423.new_feature.rst b/doc/release/upcoming_changes/29423.new_feature.rst new file mode 100644 index 000000000000..7e83604b0049 --- /dev/null +++ b/doc/release/upcoming_changes/29423.new_feature.rst @@ -0,0 +1,7 @@ +``StringDType`` fill_value support in `numpy.ma.MaskedArray` +------------------------------------------------------------ +Masked arrays now accept and preserve a Python ``str`` as their ``fill_value`` when +using the variable‑width ``StringDType`` (kind ``'T'``), including through slicing +and views. The default is ``'N/A'`` and may be overridden by any valid string. +This fixes issue `gh‑29421 `__ and was +implemented in pull request `gh‑29423 `__. diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 9b705460dd85..621dbd94640b 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -181,7 +181,8 @@ class MaskError(MAError): 'S': b'N/A', 'u': 999999, 'V': b'???', - 'U': 'N/A' + 'U': 'N/A', + 'T': 'N/A' } # Add datetime64 and timedelta64 types @@ -264,16 +265,17 @@ def default_fill_value(obj): The default filling value depends on the datatype of the input array or the type of the input scalar: - ======== ======== - datatype default - ======== ======== - bool True - int 999999 - float 1.e20 - complex 1.e20+0j - object '?' - string 'N/A' - ======== ======== + =========== ======== + datatype default + =========== ======== + bool True + int 999999 + float 1.e20 + complex 1.e20+0j + object '?' + string 'N/A' + StringDType 'N/A' + =========== ======== For structured types, a structured scalar is returned, with each field the default fill value for its type. @@ -498,7 +500,7 @@ def _check_fill_value(fill_value, ndtype): fill_value = np.asarray(fill_value, dtype=object) fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), dtype=ndtype) - elif isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): + elif isinstance(fill_value, str) and (ndtype.char not in 'OSTVU'): # Note this check doesn't work if fill_value is not a scalar err_msg = "Cannot set fill value of string with array of dtype %s" raise TypeError(err_msg % ndtype) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 5327963999e0..bdef61dac3ba 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1810,7 +1810,7 @@ def test_eq_ne_structured_extra(self): el_by_el = [m1[name] != m2[name] for name in dt.names] assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) - @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('fill', [None, 'A']) def test_eq_for_strings(self, dt, fill): # Test the equality of structured arrays @@ -1842,7 +1842,7 @@ def test_eq_for_strings(self, dt, fill): assert_equal(test.mask, [True, False]) assert_(test.fill_value == True) - @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('fill', [None, 'A']) def test_ne_for_strings(self, dt, fill): # Test the equality of structured arrays @@ -1992,15 +1992,23 @@ def test_comparisons_for_numeric(self, op, dt1, dt2, fill): assert_equal(test.mask, [True, False]) assert_(test.fill_value == True) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('op', [operator.le, operator.lt, operator.ge, operator.gt]) @pytest.mark.parametrize('fill', [None, "N/A"]) - def test_comparisons_strings(self, op, fill): + def test_comparisons_strings(self, dt, op, fill): # See gh-21770, mask propagation is broken for strings (and some other # cases) so we explicitly test strings here. # In principle only == and != may need special handling... - ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill) - ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill) + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + + if isinstance(fill, str): + fill = np.array(fill, dtype=dt) + + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill, dtype=dt) assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) @pytest.mark.filterwarnings("ignore:.*Comparison to `None`.*:FutureWarning") @@ -5692,6 +5700,65 @@ def test_default_fill_value_complex(): assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) +def test_string_dtype_fill_value_on_construction(): + # Regression test for gh-29421: allow string fill_value on StringDType masked arrays + dt = np.dtypes.StringDType() + data = np.array(["A", "test", "variable", ""], dtype=dt) + mask = [True, False, True, True] + # Prior to the fix, this would TypeError; now it should succeed + arr = np.ma.MaskedArray(data, mask=mask, fill_value="FILL", dtype=dt) + assert isinstance(arr.fill_value, str) + assert arr.fill_value == "FILL" + filled = arr.filled() + # Masked positions should be replaced by 'FILL' + assert filled.tolist() == ["FILL", "test", "FILL", "FILL"] + + +def test_string_dtype_default_fill_value(): + # Regression test for gh-29421: default fill_value for StringDType is 'N/A' + dt = np.dtypes.StringDType() + data = np.array(['x', 'y', 'z'], dtype=dt) + # no fill_value passed → uses default_fill_value internally + arr = np.ma.MaskedArray(data, mask=[True, False, True], dtype=dt) + # ensure it’s stored as a Python str and equals the expected default + assert isinstance(arr.fill_value, str) + assert arr.fill_value == 'N/A' + # masked slots should be replaced by that default + assert arr.filled().tolist() == ['N/A', 'y', 'N/A'] + + +def test_string_dtype_fill_value_persists_through_slice(): + # Regression test for gh-29421: .fill_value survives slicing/viewing + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ['a', 'b', 'c'], + mask=[True, False, True], + dtype=dt + ) + arr.fill_value = 'Z' + # slice triggers __array_finalize__ + sub = arr[1:] + # the slice should carry the same fill_value and behavior + assert isinstance(sub.fill_value, str) + assert sub.fill_value == 'Z' + assert sub.filled().tolist() == ['b', 'Z'] + + +def test_setting_fill_value_attribute(): + # Regression test for gh-29421: setting .fill_value post-construction works too + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ["x", "longstring", "mid"], mask=[False, True, False], dtype=dt + ) + # Setting the attribute should not raise + arr.fill_value = "Z" + assert arr.fill_value == "Z" + # And filled() should use the new fill_value + assert arr.filled()[0] == "x" + assert arr.filled()[1] == "Z" + assert arr.filled()[2] == "mid" + + def test_ufunc_with_output(): # check that giving an output argument always returns that output. # Regression test for gh-8416. From 87e208c900ef0ac46bb3ea861c811f94f478698f Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Thu, 14 Aug 2025 09:20:12 -0400 Subject: [PATCH 0303/1018] BUG: Fix metadata not roundtripping when pickling datetime (#29555) * BUG: fix metadata not roundtripping for datetime pickles * STYLE: Remove unnecessary newline * REF: Simplify metadata handling in arraydescr_setstate function * TST: add test to ensure datetime dtype metadata round-trips on pickling * REF: clearer operation order * TST: add new regression test (gh-29555) * BUG: don't steal reference to Py_None * REF: move metadata variable declarations below for clarity --- numpy/_core/src/multiarray/descriptor.c | 44 ++++++++++++------------- numpy/_core/tests/test_datetime.py | 9 +++++ 2 files changed, 31 insertions(+), 22 deletions(-) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 135deee83c97..852a2c768948 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2671,8 +2671,10 @@ _get_pickleabletype_from_datetime_metadata(PyArray_Descr *dtype) if (dtype->metadata != NULL) { Py_INCREF(dtype->metadata); PyTuple_SET_ITEM(ret, 0, dtype->metadata); - } else { - PyTuple_SET_ITEM(ret, 0, PyDict_New()); + } + else { + PyTuple_SET_ITEM(ret, 0, Py_None); + Py_INCREF(Py_None); } /* Convert the datetime metadata into a tuple */ @@ -3197,16 +3199,8 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) self->flags = _descr_find_object((PyArray_Descr *)self); } - /* - * We have a borrowed reference to metadata so no need - * to alter reference count when throwing away Py_None. - */ - if (metadata == Py_None) { - metadata = NULL; - } - - if (PyDataType_ISDATETIME(self) && (metadata != NULL)) { - PyObject *old_metadata; + PyObject *old_metadata, *new_metadata; + if (PyDataType_ISDATETIME(self)) { PyArray_DatetimeMetaData temp_dt_data; if ((! PyTuple_Check(metadata)) || (PyTuple_Size(metadata) != 2)) { @@ -3223,20 +3217,26 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) return NULL; } - old_metadata = self->metadata; - self->metadata = PyTuple_GET_ITEM(metadata, 0); + new_metadata = PyTuple_GET_ITEM(metadata, 0); memcpy((char *) &((PyArray_DatetimeDTypeMetaData *)self->c_metadata)->meta, - (char *) &temp_dt_data, - sizeof(PyArray_DatetimeMetaData)); - Py_XINCREF(self->metadata); - Py_XDECREF(old_metadata); + (char *) &temp_dt_data, + sizeof(PyArray_DatetimeMetaData)); } else { - PyObject *old_metadata = self->metadata; - self->metadata = metadata; - Py_XINCREF(self->metadata); - Py_XDECREF(old_metadata); + new_metadata = metadata; + } + + old_metadata = self->metadata; + /* + * We have a borrowed reference to metadata so no need + * to alter reference count when throwing away Py_None. + */ + if (new_metadata == Py_None) { + new_metadata = NULL; } + self->metadata = new_metadata; + Py_XINCREF(new_metadata); + Py_XDECREF(old_metadata); Py_RETURN_NONE; } diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 70b55c500f3d..21151ed11ba0 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -889,6 +889,15 @@ def test_pickle(self): b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + def test_gh_29555(self): + # check that dtype metadata round-trips when none + dt = np.dtype('>M8[us]') + assert dt.metadata is None + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + res = pickle.loads(pickle.dumps(dt, protocol=proto)) + assert_equal(res, dt) + assert res.metadata is None + def test_setstate(self): "Verify that datetime dtype __setstate__ can handle bad arguments" dt = np.dtype('>M8[us]') From 8b89d0f127eb76c8f84c14b4814a18814f3bdec7 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 14 Aug 2025 23:07:19 +0100 Subject: [PATCH 0304/1018] maint: Use double quotes in .pyi files (#29548) --- numpy/__init__.pyi | 180 +++++++++--------- numpy/_array_api_info.pyi | 2 +- numpy/_core/numerictypes.pyi | 18 +- numpy/ctypeslib/_ctypeslib.pyi | 12 +- numpy/dtypes.pyi | 66 +++---- numpy/lib/_function_base_impl.pyi | 2 +- numpy/lib/_type_check_impl.pyi | 44 ++--- numpy/ma/core.pyi | 22 +-- .../tests/data/fail/array_constructors.pyi | 10 +- numpy/typing/tests/data/fail/flatiter.pyi | 2 +- numpy/typing/tests/data/fail/fromnumeric.pyi | 4 +- numpy/typing/tests/data/fail/ma.pyi | 50 ++--- numpy/typing/tests/data/fail/scalars.pyi | 10 +- .../tests/data/reveal/array_constructors.pyi | 18 +- numpy/typing/tests/data/reveal/arrayprint.pyi | 2 +- numpy/typing/tests/data/reveal/getlimits.pyi | 4 +- .../tests/data/reveal/lib_function_base.pyi | 4 +- numpy/typing/tests/data/reveal/ma.pyi | 28 +-- .../typing/tests/data/reveal/numerictypes.pyi | 2 +- .../tests/data/reveal/polynomial_polybase.pyi | 14 +- numpy/typing/tests/data/reveal/scalars.pyi | 2 +- numpy/typing/tests/data/reveal/testing.pyi | 2 +- numpy/version.pyi | 12 +- ruff.toml | 3 + 24 files changed, 258 insertions(+), 255 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 643327f29d1b..bf9ed8c9b802 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4841,96 +4841,96 @@ class ufunc: ) -> tuple[dtype, ...]: ... # Parameters: `__name__`, `ntypes` and `identity` -absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] -add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]] -arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None] -arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None] -arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None] -arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None] -arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None] -arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] -arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] -bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] -bitwise_count: _UFunc_Nin1_Nout1[L['bitwise_count'], L[11], None] -bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] -bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] -cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] -ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] -conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] -copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] -cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] -cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] -deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] -degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] -divide: _UFunc_Nin2_Nout1[L['divide'], L[11], None] -divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] -equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] -exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] -exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None] -expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None] -fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None] -float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None] -floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None] -floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None] -fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None] -fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None] -fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None] -frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None] -gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]] -greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None] -greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None] -heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None] -hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]] -invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None] -isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None] -isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None] -isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None] -isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None] -lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None] -ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None] -left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None] -less: _UFunc_Nin2_Nout1[L['less'], L[23], None] -less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None] -log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None] -log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None] -log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None] -log: _UFunc_Nin1_Nout1[L['log'], L[10], None] -logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float] -logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float] -logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]] -logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] -logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] -logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] -matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] -matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]] -maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] -minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] -modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] -multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] -negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] -nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None] -not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None] -positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None] -power: _UFunc_Nin2_Nout1[L['power'], L[18], None] -rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None] -radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None] -reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None] -remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] -right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None] -rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None] -sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None] -signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None] -sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None] -sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None] -spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None] -sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None] -square: _UFunc_Nin1_Nout1[L['square'], L[18], None] -subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] -tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] -tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] -trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] -vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] -vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]] +absolute: _UFunc_Nin1_Nout1[L["absolute"], L[20], None] +add: _UFunc_Nin2_Nout1[L["add"], L[22], L[0]] +arccos: _UFunc_Nin1_Nout1[L["arccos"], L[8], None] +arccosh: _UFunc_Nin1_Nout1[L["arccosh"], L[8], None] +arcsin: _UFunc_Nin1_Nout1[L["arcsin"], L[8], None] +arcsinh: _UFunc_Nin1_Nout1[L["arcsinh"], L[8], None] +arctan2: _UFunc_Nin2_Nout1[L["arctan2"], L[5], None] +arctan: _UFunc_Nin1_Nout1[L["arctan"], L[8], None] +arctanh: _UFunc_Nin1_Nout1[L["arctanh"], L[8], None] +bitwise_and: _UFunc_Nin2_Nout1[L["bitwise_and"], L[12], L[-1]] +bitwise_count: _UFunc_Nin1_Nout1[L["bitwise_count"], L[11], None] +bitwise_or: _UFunc_Nin2_Nout1[L["bitwise_or"], L[12], L[0]] +bitwise_xor: _UFunc_Nin2_Nout1[L["bitwise_xor"], L[12], L[0]] +cbrt: _UFunc_Nin1_Nout1[L["cbrt"], L[5], None] +ceil: _UFunc_Nin1_Nout1[L["ceil"], L[7], None] +conjugate: _UFunc_Nin1_Nout1[L["conjugate"], L[18], None] +copysign: _UFunc_Nin2_Nout1[L["copysign"], L[4], None] +cos: _UFunc_Nin1_Nout1[L["cos"], L[9], None] +cosh: _UFunc_Nin1_Nout1[L["cosh"], L[8], None] +deg2rad: _UFunc_Nin1_Nout1[L["deg2rad"], L[5], None] +degrees: _UFunc_Nin1_Nout1[L["degrees"], L[5], None] +divide: _UFunc_Nin2_Nout1[L["divide"], L[11], None] +divmod: _UFunc_Nin2_Nout2[L["divmod"], L[15], None] +equal: _UFunc_Nin2_Nout1[L["equal"], L[23], None] +exp2: _UFunc_Nin1_Nout1[L["exp2"], L[8], None] +exp: _UFunc_Nin1_Nout1[L["exp"], L[10], None] +expm1: _UFunc_Nin1_Nout1[L["expm1"], L[8], None] +fabs: _UFunc_Nin1_Nout1[L["fabs"], L[5], None] +float_power: _UFunc_Nin2_Nout1[L["float_power"], L[4], None] +floor: _UFunc_Nin1_Nout1[L["floor"], L[7], None] +floor_divide: _UFunc_Nin2_Nout1[L["floor_divide"], L[21], None] +fmax: _UFunc_Nin2_Nout1[L["fmax"], L[21], None] +fmin: _UFunc_Nin2_Nout1[L["fmin"], L[21], None] +fmod: _UFunc_Nin2_Nout1[L["fmod"], L[15], None] +frexp: _UFunc_Nin1_Nout2[L["frexp"], L[4], None] +gcd: _UFunc_Nin2_Nout1[L["gcd"], L[11], L[0]] +greater: _UFunc_Nin2_Nout1[L["greater"], L[23], None] +greater_equal: _UFunc_Nin2_Nout1[L["greater_equal"], L[23], None] +heaviside: _UFunc_Nin2_Nout1[L["heaviside"], L[4], None] +hypot: _UFunc_Nin2_Nout1[L["hypot"], L[5], L[0]] +invert: _UFunc_Nin1_Nout1[L["invert"], L[12], None] +isfinite: _UFunc_Nin1_Nout1[L["isfinite"], L[20], None] +isinf: _UFunc_Nin1_Nout1[L["isinf"], L[20], None] +isnan: _UFunc_Nin1_Nout1[L["isnan"], L[20], None] +isnat: _UFunc_Nin1_Nout1[L["isnat"], L[2], None] +lcm: _UFunc_Nin2_Nout1[L["lcm"], L[11], None] +ldexp: _UFunc_Nin2_Nout1[L["ldexp"], L[8], None] +left_shift: _UFunc_Nin2_Nout1[L["left_shift"], L[11], None] +less: _UFunc_Nin2_Nout1[L["less"], L[23], None] +less_equal: _UFunc_Nin2_Nout1[L["less_equal"], L[23], None] +log10: _UFunc_Nin1_Nout1[L["log10"], L[8], None] +log1p: _UFunc_Nin1_Nout1[L["log1p"], L[8], None] +log2: _UFunc_Nin1_Nout1[L["log2"], L[8], None] +log: _UFunc_Nin1_Nout1[L["log"], L[10], None] +logaddexp2: _UFunc_Nin2_Nout1[L["logaddexp2"], L[4], float] +logaddexp: _UFunc_Nin2_Nout1[L["logaddexp"], L[4], float] +logical_and: _UFunc_Nin2_Nout1[L["logical_and"], L[20], L[True]] +logical_not: _UFunc_Nin1_Nout1[L["logical_not"], L[20], None] +logical_or: _UFunc_Nin2_Nout1[L["logical_or"], L[20], L[False]] +logical_xor: _UFunc_Nin2_Nout1[L["logical_xor"], L[19], L[False]] +matmul: _GUFunc_Nin2_Nout1[L["matmul"], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] +matvec: _GUFunc_Nin2_Nout1[L["matvec"], L[19], None, L["(m,n),(n)->(m)"]] +maximum: _UFunc_Nin2_Nout1[L["maximum"], L[21], None] +minimum: _UFunc_Nin2_Nout1[L["minimum"], L[21], None] +modf: _UFunc_Nin1_Nout2[L["modf"], L[4], None] +multiply: _UFunc_Nin2_Nout1[L["multiply"], L[23], L[1]] +negative: _UFunc_Nin1_Nout1[L["negative"], L[19], None] +nextafter: _UFunc_Nin2_Nout1[L["nextafter"], L[4], None] +not_equal: _UFunc_Nin2_Nout1[L["not_equal"], L[23], None] +positive: _UFunc_Nin1_Nout1[L["positive"], L[19], None] +power: _UFunc_Nin2_Nout1[L["power"], L[18], None] +rad2deg: _UFunc_Nin1_Nout1[L["rad2deg"], L[5], None] +radians: _UFunc_Nin1_Nout1[L["radians"], L[5], None] +reciprocal: _UFunc_Nin1_Nout1[L["reciprocal"], L[18], None] +remainder: _UFunc_Nin2_Nout1[L["remainder"], L[16], None] +right_shift: _UFunc_Nin2_Nout1[L["right_shift"], L[11], None] +rint: _UFunc_Nin1_Nout1[L["rint"], L[10], None] +sign: _UFunc_Nin1_Nout1[L["sign"], L[19], None] +signbit: _UFunc_Nin1_Nout1[L["signbit"], L[4], None] +sin: _UFunc_Nin1_Nout1[L["sin"], L[9], None] +sinh: _UFunc_Nin1_Nout1[L["sinh"], L[8], None] +spacing: _UFunc_Nin1_Nout1[L["spacing"], L[4], None] +sqrt: _UFunc_Nin1_Nout1[L["sqrt"], L[10], None] +square: _UFunc_Nin1_Nout1[L["square"], L[18], None] +subtract: _UFunc_Nin2_Nout1[L["subtract"], L[21], None] +tan: _UFunc_Nin1_Nout1[L["tan"], L[8], None] +tanh: _UFunc_Nin1_Nout1[L["tanh"], L[8], None] +trunc: _UFunc_Nin1_Nout1[L["trunc"], L[7], None] +vecdot: _GUFunc_Nin2_Nout1[L["vecdot"], L[19], None, L["(n),(n)->()"]] +vecmat: _GUFunc_Nin2_Nout1[L["vecmat"], L[19], None, L["(n),(n,m)->(m)"]] abs = absolute acos = arccos diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index ee9f8a5660c3..b4592ba2c2ee 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -118,7 +118,7 @@ _EmptyDict: TypeAlias = dict[Never, Never] @final class __array_namespace_info__: - __module__: ClassVar[Literal['numpy']] + __module__: ClassVar[Literal["numpy"]] def capabilities(self) -> _Capabilities: ... def default_device(self) -> _Device: ... diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 4f810da6904b..26ed4b9d9524 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -144,15 +144,15 @@ __all__ = [ @type_check_only class _TypeCodes(TypedDict): - Character: L['c'] - Integer: L['bhilqnp'] - UnsignedInteger: L['BHILQNP'] - Float: L['efdg'] - Complex: L['FDG'] - AllInteger: L['bBhHiIlLqQnNpP'] - AllFloat: L['efdgFDG'] - Datetime: L['Mm'] - All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] + Character: L["c"] + Integer: L["bhilqnp"] + UnsignedInteger: L["BHILQNP"] + Float: L["efdg"] + Complex: L["FDG"] + AllInteger: L["bBhHiIlLqQnNpP"] + AllFloat: L["efdgFDG"] + Datetime: L["Mm"] + All: L["?bhilqnpBHILQNPefdgFDGSUVOMm"] def isdtype(dtype: dtype | type, kind: DTypeLike | tuple[DTypeLike, ...]) -> py_bool: ... def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> py_bool: ... diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index 996b2c0be388..e10e9fb51ab7 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -60,12 +60,12 @@ _DTypeOptionalT = TypeVar("_DTypeOptionalT", bound=dtype | None) _ScalarT = TypeVar("_ScalarT", bound=generic) _FlagsKind: TypeAlias = L[ - 'C_CONTIGUOUS', 'CONTIGUOUS', 'C', - 'F_CONTIGUOUS', 'FORTRAN', 'F', - 'ALIGNED', 'A', - 'WRITEABLE', 'W', - 'OWNDATA', 'O', - 'WRITEBACKIFCOPY', 'X', + "C_CONTIGUOUS", "CONTIGUOUS", "C", + "F_CONTIGUOUS", "FORTRAN", "F", + "ALIGNED", "A", + "WRITEABLE", "W", + "OWNDATA", "O", + "WRITEBACKIFCOPY", "X", ] # TODO: Add a shape typevar once we have variadic typevars (PEP 646) diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 36844a90d31f..3e34113edd4f 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -17,39 +17,39 @@ from typing_extensions import TypeVar import numpy as np __all__ = [ # noqa: RUF022 - 'BoolDType', - 'Int8DType', - 'ByteDType', - 'UInt8DType', - 'UByteDType', - 'Int16DType', - 'ShortDType', - 'UInt16DType', - 'UShortDType', - 'Int32DType', - 'IntDType', - 'UInt32DType', - 'UIntDType', - 'Int64DType', - 'LongDType', - 'UInt64DType', - 'ULongDType', - 'LongLongDType', - 'ULongLongDType', - 'Float16DType', - 'Float32DType', - 'Float64DType', - 'LongDoubleDType', - 'Complex64DType', - 'Complex128DType', - 'CLongDoubleDType', - 'ObjectDType', - 'BytesDType', - 'StrDType', - 'VoidDType', - 'DateTime64DType', - 'TimeDelta64DType', - 'StringDType', + "BoolDType", + "Int8DType", + "ByteDType", + "UInt8DType", + "UByteDType", + "Int16DType", + "ShortDType", + "UInt16DType", + "UShortDType", + "Int32DType", + "IntDType", + "UInt32DType", + "UIntDType", + "Int64DType", + "LongDType", + "UInt64DType", + "ULongDType", + "LongLongDType", + "ULongLongDType", + "Float16DType", + "Float32DType", + "Float64DType", + "LongDoubleDType", + "Complex64DType", + "Complex128DType", + "CLongDoubleDType", + "ObjectDType", + "BytesDType", + "StrDType", + "VoidDType", + "DateTime64DType", + "TimeDelta64DType", + "StringDType", ] # Helper base classes (typing-only) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 14e48f1cc3fd..41d6204cd684 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -107,7 +107,7 @@ _ScalarT2 = TypeVar("_ScalarT2", bound=generic) _ArrayT = TypeVar("_ArrayT", bound=np.ndarray) _2Tuple: TypeAlias = tuple[_T, _T] -_MeshgridIdx: TypeAlias = L['ij', 'xy'] +_MeshgridIdx: TypeAlias = L["ij", "xy"] @type_check_only class _TrimZerosSequence(Protocol[_T_co]): diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index 5e98ad22ca8b..8b665cd9a400 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -154,49 +154,49 @@ def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ... # @overload -def typename(char: L['S1']) -> L['character']: ... +def typename(char: L["S1"]) -> L["character"]: ... @overload -def typename(char: L['?']) -> L['bool']: ... +def typename(char: L["?"]) -> L["bool"]: ... @overload -def typename(char: L['b']) -> L['signed char']: ... +def typename(char: L["b"]) -> L["signed char"]: ... @overload -def typename(char: L['B']) -> L['unsigned char']: ... +def typename(char: L["B"]) -> L["unsigned char"]: ... @overload -def typename(char: L['h']) -> L['short']: ... +def typename(char: L["h"]) -> L["short"]: ... @overload -def typename(char: L['H']) -> L['unsigned short']: ... +def typename(char: L["H"]) -> L["unsigned short"]: ... @overload -def typename(char: L['i']) -> L['integer']: ... +def typename(char: L["i"]) -> L["integer"]: ... @overload -def typename(char: L['I']) -> L['unsigned integer']: ... +def typename(char: L["I"]) -> L["unsigned integer"]: ... @overload -def typename(char: L['l']) -> L['long integer']: ... +def typename(char: L["l"]) -> L["long integer"]: ... @overload -def typename(char: L['L']) -> L['unsigned long integer']: ... +def typename(char: L["L"]) -> L["unsigned long integer"]: ... @overload -def typename(char: L['q']) -> L['long long integer']: ... +def typename(char: L["q"]) -> L["long long integer"]: ... @overload -def typename(char: L['Q']) -> L['unsigned long long integer']: ... +def typename(char: L["Q"]) -> L["unsigned long long integer"]: ... @overload -def typename(char: L['f']) -> L['single precision']: ... +def typename(char: L["f"]) -> L["single precision"]: ... @overload -def typename(char: L['d']) -> L['double precision']: ... +def typename(char: L["d"]) -> L["double precision"]: ... @overload -def typename(char: L['g']) -> L['long precision']: ... +def typename(char: L["g"]) -> L["long precision"]: ... @overload -def typename(char: L['F']) -> L['complex single precision']: ... +def typename(char: L["F"]) -> L["complex single precision"]: ... @overload -def typename(char: L['D']) -> L['complex double precision']: ... +def typename(char: L["D"]) -> L["complex double precision"]: ... @overload -def typename(char: L['G']) -> L['complex long double precision']: ... +def typename(char: L["G"]) -> L["complex long double precision"]: ... @overload -def typename(char: L['S']) -> L['string']: ... +def typename(char: L["S"]) -> L["string"]: ... @overload -def typename(char: L['U']) -> L['unicode']: ... +def typename(char: L["U"]) -> L["unicode"]: ... @overload -def typename(char: L['V']) -> L['void']: ... +def typename(char: L["V"]) -> L["void"]: ... @overload -def typename(char: L['O']) -> L['object']: ... +def typename(char: L["O"]) -> L["object"]: ... # NOTE: The [overload-overlap] mypy errors are false positives @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 55e5867247bb..a033e735d3f5 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1958,7 +1958,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): indices: _IntLike_co, axis: None = None, out: None = None, - mode: _ModeKind = 'raise' + mode: _ModeKind = "raise" ) -> _ScalarT: ... @overload def take( @@ -1966,7 +1966,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _MaskedArray[_ScalarT]: ... @overload def take( @@ -1974,7 +1974,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): indices: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload def take( @@ -1983,7 +1983,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): axis: SupportsIndex | None = None, *, out: _ArrayT, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _ArrayT: ... copy: Any @@ -2326,7 +2326,7 @@ def take( indices: _IntLike_co, axis: None = None, out: None = None, - mode: _ModeKind = 'raise' + mode: _ModeKind = "raise" ) -> _ScalarT: ... @overload def take( @@ -2334,7 +2334,7 @@ def take( indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _MaskedArray[_ScalarT]: ... @overload def take( @@ -2342,7 +2342,7 @@ def take( indices: _IntLike_co, axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> Any: ... @overload def take( @@ -2350,7 +2350,7 @@ def take( indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _MaskedArray[Any]: ... @overload def take( @@ -2358,7 +2358,7 @@ def take( indices: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload def take( @@ -2367,7 +2367,7 @@ def take( axis: SupportsIndex | None = None, *, out: _ArrayT, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _ArrayT: ... def power(a, b, third=...): ... @@ -2402,7 +2402,7 @@ def concatenate(arrays, axis=...): ... def diag(v, k=...): ... def left_shift(a, n): ... def right_shift(a, n): ... -def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = 'raise') -> None: ... +def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def putmask(a: NDArray[Any], mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... def transpose(a, axes=...): ... def reshape(a, new_shape, order=...): ... diff --git a/numpy/typing/tests/data/fail/array_constructors.pyi b/numpy/typing/tests/data/fail/array_constructors.pyi index cadc2ae595e7..6ed619958c1c 100644 --- a/numpy/typing/tests/data/fail/array_constructors.pyi +++ b/numpy/typing/tests/data/fail/array_constructors.pyi @@ -15,17 +15,17 @@ np.ones() # type: ignore[call-overload] np.array(0, float, True) # type: ignore[call-overload] -np.linspace(None, 'bob') # type: ignore[call-overload] +np.linspace(None, "bob") # type: ignore[call-overload] np.linspace(0, 2, num=10.0) # type: ignore[call-overload] -np.linspace(0, 2, endpoint='True') # type: ignore[call-overload] -np.linspace(0, 2, retstep=b'False') # type: ignore[call-overload] +np.linspace(0, 2, endpoint="True") # type: ignore[call-overload] +np.linspace(0, 2, retstep=b"False") # type: ignore[call-overload] np.linspace(0, 2, dtype=0) # type: ignore[call-overload] np.linspace(0, 2, axis=None) # type: ignore[call-overload] -np.logspace(None, 'bob') # type: ignore[call-overload] +np.logspace(None, "bob") # type: ignore[call-overload] np.logspace(0, 2, base=None) # type: ignore[call-overload] -np.geomspace(None, 'bob') # type: ignore[call-overload] +np.geomspace(None, "bob") # type: ignore[call-overload] np.stack(generator) # type: ignore[call-overload] np.hstack({1, 2}) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/flatiter.pyi b/numpy/typing/tests/data/fail/flatiter.pyi index cdf2a8b78b7b..be63a082535d 100644 --- a/numpy/typing/tests/data/fail/flatiter.pyi +++ b/numpy/typing/tests/data/fail/flatiter.pyi @@ -10,7 +10,7 @@ supports_array: npt._SupportsArray[np.dtype[np.float64]] a.base = object() # type: ignore[assignment, misc] a.coords = object() # type: ignore[assignment, misc] a.index = object() # type: ignore[assignment, misc] -a.copy(order='C') # type: ignore[call-arg] +a.copy(order="C") # type: ignore[call-arg] # NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter` # does not accept objects with the `__array__` or `__index__` protocols; diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index d0a81f0bfbfe..92b0cb366207 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -137,12 +137,12 @@ np.mean(AR_M) # type: ignore[arg-type] np.std(a, axis=1.0) # type: ignore[call-overload] np.std(a, out=False) # type: ignore[call-overload] -np.std(a, ddof='test') # type: ignore[call-overload] +np.std(a, ddof="test") # type: ignore[call-overload] np.std(a, keepdims=1.0) # type: ignore[call-overload] np.std(AR_U) # type: ignore[arg-type] np.var(a, axis=1.0) # type: ignore[call-overload] np.var(a, out=False) # type: ignore[call-overload] -np.var(a, ddof='test') # type: ignore[call-overload] +np.var(a, ddof="test") # type: ignore[call-overload] np.var(a, keepdims=1.0) # type: ignore[call-overload] np.var(AR_U) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index b5921660d617..084ae971bdd0 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -84,11 +84,11 @@ MAR_1d_f8.any(out=1.0) # type: ignore[call-overload] MAR_1d_f8.sort(axis=(0, 1)) # type: ignore[arg-type] MAR_1d_f8.sort(axis=None) # type: ignore[arg-type] -MAR_1d_f8.sort(kind='cabbage') # type: ignore[arg-type] -MAR_1d_f8.sort(order=lambda: 'cabbage') # type: ignore[arg-type] -MAR_1d_f8.sort(endwith='cabbage') # type: ignore[arg-type] -MAR_1d_f8.sort(fill_value=lambda: 'cabbage') # type: ignore[arg-type] -MAR_1d_f8.sort(stable='cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(kind="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(order=lambda: "cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(endwith="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(fill_value=lambda: "cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(stable="cabbage") # type: ignore[arg-type] MAR_1d_f8.sort(stable=True) # type: ignore[arg-type] MAR_1d_f8.take(axis=1.0) # type: ignore[call-overload] @@ -100,46 +100,46 @@ np.ma.take(axis=1.0) # type: ignore[call-overload] np.ma.take(out=1) # type: ignore[call-overload] np.ma.take(mode="bob") # type: ignore[call-overload] -MAR_1d_f8.partition(['cabbage']) # type: ignore[arg-type] +MAR_1d_f8.partition(["cabbage"]) # type: ignore[arg-type] MAR_1d_f8.partition(axis=(0, 1)) # type: ignore[arg-type, call-arg] -MAR_1d_f8.partition(kind='cabbage') # type: ignore[arg-type, call-arg] -MAR_1d_f8.partition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(kind="cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] -MAR_1d_f8.argpartition(['cabbage']) # type: ignore[arg-type] +MAR_1d_f8.argpartition(["cabbage"]) # type: ignore[arg-type] MAR_1d_f8.argpartition(axis=(0, 1)) # type: ignore[arg-type, call-arg] -MAR_1d_f8.argpartition(kind='cabbage') # type: ignore[arg-type, call-arg] -MAR_1d_f8.argpartition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(kind="cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] -np.ma.ndim(lambda: 'lambda') # type: ignore[arg-type] +np.ma.ndim(lambda: "lambda") # type: ignore[arg-type] -np.ma.size(AR_b, axis='0') # type: ignore[arg-type] +np.ma.size(AR_b, axis="0") # type: ignore[arg-type] -MAR_1d_f8 >= (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 > (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 <= (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 < (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 >= (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 > (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 <= (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 < (lambda x: "mango") # type: ignore[operator] MAR_1d_f8.count(axis=0.) # type: ignore[call-overload] np.ma.count(MAR_1d_f8, axis=0.) # type: ignore[call-overload] -MAR_1d_f8.put(4, 999, mode='flip') # type: ignore[arg-type] +MAR_1d_f8.put(4, 999, mode="flip") # type: ignore[arg-type] -np.ma.put(MAR_1d_f8, 4, 999, mode='flip') # type: ignore[arg-type] +np.ma.put(MAR_1d_f8, 4, 999, mode="flip") # type: ignore[arg-type] np.ma.put([1, 1, 3], 0, 999) # type: ignore[arg-type] -np.ma.compressed(lambda: 'compress me') # type: ignore[call-overload] +np.ma.compressed(lambda: "compress me") # type: ignore[call-overload] np.ma.allequal(MAR_1d_f8, [1, 2, 3], fill_value=1.5) # type: ignore[arg-type] np.ma.allclose(MAR_1d_f8, [1, 2, 3], masked_equal=4.5) # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1, 2, 3], rtol='.4') # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1, 2, 3], atol='.5') # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], rtol=".4") # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], atol=".5") # type: ignore[arg-type] -MAR_1d_f8.__setmask__('mask') # type: ignore[arg-type] +MAR_1d_f8.__setmask__("mask") # type: ignore[arg-type] MAR_b *= 2 # type: ignore[arg-type] MAR_c //= 2 # type: ignore[misc] @@ -149,7 +149,7 @@ MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] MAR_1d_f8.argsort(axis=(1, 0)) # type: ignore[arg-type] -np.ma.MaskedArray(np.array([1, 2, 3]), keep_mask='yes') # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), keep_mask="yes") # type: ignore[call-overload] np.ma.MaskedArray(np.array([1, 2, 3]), subok=None) # type: ignore[call-overload] np.ma.MaskedArray(np.array([1, 2, 3]), ndim=None) # type: ignore[call-overload] -np.ma.MaskedArray(np.array([1, 2, 3]), order='Corinthian') # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), order="Corinthian") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index 0560a855a80f..018a88e652ae 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -57,15 +57,15 @@ np.float64(value=0.0) # type: ignore[call-arg] np.int64(value=0) # type: ignore[call-arg] np.uint64(value=0) # type: ignore[call-arg] np.complex128(value=0.0j) # type: ignore[call-overload] -np.str_(value='bob') # type: ignore[call-overload] -np.bytes_(value=b'test') # type: ignore[call-overload] -np.void(value=b'test') # type: ignore[call-overload] +np.str_(value="bob") # type: ignore[call-overload] +np.bytes_(value=b"test") # type: ignore[call-overload] +np.void(value=b"test") # type: ignore[call-overload] np.bool(value=True) # type: ignore[call-overload] np.datetime64(value="2019") # type: ignore[call-overload] np.timedelta64(value=0) # type: ignore[call-overload] -np.bytes_(b"hello", encoding='utf-8') # type: ignore[call-overload] -np.str_("hello", encoding='utf-8') # type: ignore[call-overload] +np.bytes_(b"hello", encoding="utf-8") # type: ignore[call-overload] +np.str_("hello", encoding="utf-8") # type: ignore[call-overload] f8.item(1) # type: ignore[call-overload] f8.item((0, 1)) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 99073ac4543a..e95e85093f6a 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -29,14 +29,14 @@ assert_type(np.empty_like(A), npt.NDArray[np.float64]) assert_type(np.empty_like(B), SubClass[np.float64]) assert_type(np.empty_like([1, 1.0]), npt.NDArray[Any]) assert_type(np.empty_like(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.empty_like(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.empty_like(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.array(A), npt.NDArray[np.float64]) assert_type(np.array(B), npt.NDArray[np.float64]) assert_type(np.array([1, 1.0]), npt.NDArray[Any]) assert_type(np.array(deque([1, 2, 3])), npt.NDArray[Any]) assert_type(np.array(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.array(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.array(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.array(A, like=A), npt.NDArray[np.float64]) assert_type(np.array(A, subok=True), npt.NDArray[np.float64]) assert_type(np.array(B, subok=True), SubClass[np.float64]) @@ -49,12 +49,12 @@ assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.zeros([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.zeros([1, 5, 6], dtype="c16"), npt.NDArray[Any]) assert_type(np.zeros(mixed_shape), npt.NDArray[np.float64]) assert_type(np.empty([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.empty([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.empty([1, 5, 6], dtype="c16"), npt.NDArray[Any]) assert_type(np.empty(mixed_shape), npt.NDArray[np.float64]) assert_type(np.concatenate(A), npt.NDArray[np.float64]) @@ -65,32 +65,32 @@ assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) assert_type(np.concatenate(([1], [1])), npt.NDArray[Any]) assert_type(np.concatenate([1, 1.0]), npt.NDArray[Any]) assert_type(np.concatenate(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.concatenate(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.concatenate(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.concatenate([1, 1.0], out=A), npt.NDArray[np.float64]) assert_type(np.asarray(A), npt.NDArray[np.float64]) assert_type(np.asarray(B), npt.NDArray[np.float64]) assert_type(np.asarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.asanyarray(A), npt.NDArray[np.float64]) assert_type(np.asanyarray(B), SubClass[np.float64]) assert_type(np.asanyarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asanyarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asanyarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asanyarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.ascontiguousarray(A), npt.NDArray[np.float64]) assert_type(np.ascontiguousarray(B), npt.NDArray[np.float64]) assert_type(np.ascontiguousarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.ascontiguousarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.ascontiguousarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.ascontiguousarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.asfortranarray(A), npt.NDArray[np.float64]) assert_type(np.asfortranarray(B), npt.NDArray[np.float64]) assert_type(np.asfortranarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asfortranarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asfortranarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asfortranarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.fromstring("1 1 1", sep=" "), npt.NDArray[np.float64]) assert_type(np.fromstring(b"1 1 1", sep=" "), npt.NDArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/arrayprint.pyi b/numpy/typing/tests/data/reveal/arrayprint.pyi index de4077654bea..17e175edc2b7 100644 --- a/numpy/typing/tests/data/reveal/arrayprint.pyi +++ b/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -12,7 +12,7 @@ func_int: Callable[[np.integer], str] assert_type(np.get_printoptions(), _FormatOptions) assert_type( - np.array2string(AR, formatter={'float_kind': func_float, 'int_kind': func_int}), + np.array2string(AR, formatter={"float_kind": func_float, "int_kind": func_int}), str, ) assert_type(np.format_float_scientific(1.0), str) diff --git a/numpy/typing/tests/data/reveal/getlimits.pyi b/numpy/typing/tests/data/reveal/getlimits.pyi index 825daba43064..3a1157121750 100644 --- a/numpy/typing/tests/data/reveal/getlimits.pyi +++ b/numpy/typing/tests/data/reveal/getlimits.pyi @@ -17,7 +17,7 @@ iinfo_i8: np.iinfo[np.int64] assert_type(np.finfo(f), np.finfo[np.float64]) assert_type(np.finfo(f8), np.finfo[np.floating[_64Bit]]) assert_type(np.finfo(c8), np.finfo[np.float32]) -assert_type(np.finfo('f2'), np.finfo[np.floating]) +assert_type(np.finfo("f2"), np.finfo[np.floating]) assert_type(finfo_f8.dtype, np.dtype[np.float64]) assert_type(finfo_f8.bits, int) @@ -41,7 +41,7 @@ assert_type(finfo_f8.smallest_subnormal, np.float64) assert_type(np.iinfo(i), np.iinfo[np.int_]) assert_type(np.iinfo(i8), np.iinfo[np.int64]) assert_type(np.iinfo(u4), np.iinfo[np.uint32]) -assert_type(np.iinfo('i2'), np.iinfo[Any]) +assert_type(np.iinfo("i2"), np.iinfo[Any]) assert_type(iinfo_i8.dtype, np.dtype[np.int64]) assert_type(iinfo_i8.kind, LiteralString) diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 3ce8d375201b..06096f5a7749 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -73,8 +73,8 @@ assert_type(np.asarray_chkfinite(AR_f8, dtype=float), npt.NDArray[Any]) assert_type(np.piecewise(AR_f8, AR_b, [func]), npt.NDArray[np.float64]) assert_type(np.piecewise(AR_f8, AR_b_list, [func]), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=''), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=''), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=""), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=""), npt.NDArray[np.float64]) assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), npt.NDArray[Any]) assert_type(np.select([AR_f8], [AR_f8]), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index bd4e4e98373a..a6857ef0a3dd 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -188,7 +188,7 @@ assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f4.sort(), None) -assert_type(MAR_f4.sort(axis=0, kind='quicksort', order='K', endwith=False, fill_value=42., stable=False), None) +assert_type(MAR_f4.sort(axis=0, kind="quicksort", order="K", endwith=False, fill_value=42., stable=False), None) assert_type(np.ma.sort(MAR_f4), MaskedArray[np.float32]) assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclassC) @@ -214,10 +214,10 @@ assert_type(np.ma.take([1], [0]), MaskedArray[Any]) assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedArray[np.float64]) assert_type(MAR_f4.partition(1), None) -assert_type(MAR_V.partition(1, axis=0, kind='introselect', order='K'), None) +assert_type(MAR_V.partition(1, axis=0, kind="introselect", order="K"), None) assert_type(MAR_f4.argpartition(1), MaskedArray[np.intp]) -assert_type(MAR_1d.argpartition(1, axis=0, kind='introselect', order='K'), MaskedArray[np.intp]) +assert_type(MAR_1d.argpartition(1, axis=0, kind="introselect", order="K"), MaskedArray[np.intp]) assert_type(np.ma.ndim(f4), int) assert_type(np.ma.ndim(MAR_b), int) @@ -311,22 +311,22 @@ assert_type(np.ma.compressed([[1, 2, 3]]), np.ndarray[tuple[int], np.dtype]) assert_type(MAR_f4.put([0, 4, 8], [10, 20, 30]), None) assert_type(MAR_f4.put(4, 999), None) -assert_type(MAR_f4.put(4, 999, mode='clip'), None) +assert_type(MAR_f4.put(4, 999, mode="clip"), None) assert_type(MAR_c8.__array_wrap__(AR_b), MaskedArray[np.bool]) assert_type(np.ma.put(MAR_f4, [0, 4, 8], [10, 20, 30]), None) assert_type(np.ma.put(MAR_f4, 4, 999), None) -assert_type(np.ma.put(MAR_f4, 4, 999, mode='clip'), None) +assert_type(np.ma.put(MAR_f4, 4, 999, mode="clip"), None) assert_type(np.ma.putmask(MAR_f4, [True, False], [0, 1]), None) assert_type(np.ma.putmask(MAR_f4, np.False_, [0, 1]), None) -assert_type(MAR_f4.filled(float('nan')), NDArray[np.float32]) +assert_type(MAR_f4.filled(float("nan")), NDArray[np.float32]) assert_type(MAR_i8.filled(), NDArray[np.int64]) assert_type(MAR_1d.filled(), np.ndarray[tuple[int], np.dtype]) -assert_type(np.ma.filled(MAR_f4, float('nan')), NDArray[np.float32]) +assert_type(np.ma.filled(MAR_f4, float("nan")), NDArray[np.float32]) assert_type(np.ma.filled([[1, 2, 3]]), NDArray[Any]) # PyRight detects this one correctly, but mypy doesn't. # https://github.com/numpy/numpy/pull/28742#discussion_r2048968375 @@ -345,7 +345,7 @@ assert_type(np.ma.allclose(AR_f4, MAR_f4, masked_equal=False), bool) assert_type(np.ma.allclose(AR_f4, MAR_f4, rtol=.4, atol=.3), bool) assert_type(MAR_2d_f4.ravel(), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) -assert_type(MAR_1d.ravel(order='A'), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) +assert_type(MAR_1d.ravel(order="A"), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | np.bool) # PyRight detects this one correctly, but mypy doesn't: @@ -381,8 +381,8 @@ assert_type(MAR_2d_f4[:], MaskedArray[np.float32]) assert_type(MAR_2d_f4[0, 0], Any) assert_type(MAR_2d_f4[:, np.newaxis], MaskedArray[np.float32]) assert_type(MAR_2d_f4[..., -1], MaskedArray[np.float32]) -assert_type(MAR_2d_V['field_0'], np.ma.MaskedArray[tuple[int, int], np.dtype]) -assert_type(MAR_2d_V[['field_0', 'field_1']], np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]]) +assert_type(MAR_2d_V["field_0"], np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_V[["field_0", "field_1"]], np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]]) assert_type(np.ma.nomask, np.bool[Literal[False]]) assert_type(np.ma.MaskType, type[np.bool]) @@ -406,7 +406,7 @@ assert_type(MAR_2d_f4.recordmask, np.ma.MaskType | np.ndarray[tuple[int, int], n assert_type(MAR_2d_f4.anom(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) -assert_type(MAR_2d_f4.anom(0, 'float16'), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, "float16"), np.ma.MaskedArray[tuple[int, int], np.dtype]) assert_type(MAR_i8.fill_value, np.int64) @@ -451,15 +451,15 @@ assert_type(MAR_f8.view(dtype=np.float32, fill_value=0), MaskedArray[np.float32] assert_type(MAR_f8.view(type=np.ndarray), np.ndarray[Any, Any]) assert_type(MAR_f8.view(None, np.ndarray), np.ndarray[Any, Any]) assert_type(MAR_f8.view(dtype=np.ndarray), np.ndarray[Any, Any]) -assert_type(MAR_f8.view(dtype='float32'), MaskedArray[Any]) -assert_type(MAR_f8.view(dtype='float32', type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype="float32"), MaskedArray[Any]) +assert_type(MAR_f8.view(dtype="float32", type=np.ndarray), np.ndarray[Any, Any]) assert_type(MAR_2d_f4.view(dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) assert_type(MAR_2d_f4.view(dtype=np.dtype(np.float16)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) assert_type(MAR_f8.__deepcopy__(), MaskedArray[np.float64]) assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) -assert_type(MAR_f8.argsort(axis=0, kind='heap', order=('x', 'y')), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(axis=0, kind="heap", order=("x", "y")), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) assert_type(MAR_2d_f4.flat, np.ma.core.MaskedIterator[tuple[int, int], np.dtype[np.float32]]) diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index 4a3e02c9afa6..784167467532 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -48,4 +48,4 @@ assert_type(np.typecodes["Character"], Literal["c"]) assert_type(np.typecodes["Complex"], Literal["FDG"]) assert_type(np.typecodes["All"], Literal["?bhilqnpBHILQNPefdgFDGSUVOMm"]) -assert_type(np.sctypeDict['uint8'], type[np.generic]) +assert_type(np.sctypeDict["uint8"], type[np.generic]) diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index 11265b92ff67..2870d50310a2 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -66,11 +66,11 @@ PS_all: ( # static- and classmethods assert_type(type(PS_poly).basis_name, None) -assert_type(type(PS_cheb).basis_name, L['T']) -assert_type(type(PS_herm).basis_name, L['H']) -assert_type(type(PS_herme).basis_name, L['He']) -assert_type(type(PS_lag).basis_name, L['L']) -assert_type(type(PS_leg).basis_name, L['P']) +assert_type(type(PS_cheb).basis_name, L["T"]) +assert_type(type(PS_herm).basis_name, L["H"]) +assert_type(type(PS_herme).basis_name, L["He"]) +assert_type(type(PS_lag).basis_name, L["L"]) +assert_type(type(PS_leg).basis_name, L["P"]) assert_type(type(PS_all).__hash__, None) assert_type(type(PS_all).__array_ufunc__, None) @@ -90,10 +90,10 @@ assert_type(type(PS_leg).fromroots(SQ_O), npp.Legendre) assert_type(type(PS_leg).fromroots(AR_O_co), npp.Legendre) assert_type(type(PS_poly).identity(), npp.Polynomial) -assert_type(type(PS_cheb).identity(symbol='z'), npp.Chebyshev) +assert_type(type(PS_cheb).identity(symbol="z"), npp.Chebyshev) assert_type(type(PS_lag).basis(SC_i), npp.Laguerre) -assert_type(type(PS_leg).basis(32, symbol='u'), npp.Legendre) +assert_type(type(PS_leg).basis(32, symbol="u"), npp.Legendre) assert_type(type(PS_herm).cast(PS_poly), npp.Hermite) assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index d7b277735c7c..67444e33dfc3 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -42,7 +42,7 @@ assert_type(c8.dtype, np.dtype[np.complex64]) assert_type(c8.real, np.float32) assert_type(c16.imag, np.float64) -assert_type(np.str_('foo'), np.str_) +assert_type(np.str_("foo"), np.str_) assert_type(V[0], Any) assert_type(V["field1"], Any) diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index d70bc971c15f..52d9ef6b3a5d 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -90,7 +90,7 @@ assert_type(np.testing.assert_equal({1}, {1}), None) assert_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) assert_type(np.testing.assert_equal(1, 1.0, verbose=True), None) -assert_type(np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]), None) +assert_type(np.testing.print_assert_equal("Test XYZ of func xyz", [0, 1], [0, 1]), None) assert_type(np.testing.assert_almost_equal(1.0, 1.1), None) assert_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) diff --git a/numpy/version.pyi b/numpy/version.pyi index 113cde3f5621..b3284d7608b0 100644 --- a/numpy/version.pyi +++ b/numpy/version.pyi @@ -1,12 +1,12 @@ from typing import Final, LiteralString __all__ = ( - '__version__', - 'full_version', - 'git_revision', - 'release', - 'short_version', - 'version', + "__version__", + "full_version", + "git_revision", + "release", + "short_version", + "version", ) version: Final[LiteralString] diff --git a/ruff.toml b/ruff.toml index b3cfea15b190..124f343bc061 100644 --- a/ruff.toml +++ b/ruff.toml @@ -37,6 +37,7 @@ extend-select = [ "PGH", "PLE", "UP", + "Q", ] ignore = [ "B006", # Do not use mutable data structures for argument defaults @@ -97,6 +98,8 @@ ignore = [ "numpy*pyi" = ["E501"] # "useless assignments" aren't so useless when you're testing that they don't make type checkers scream "numpy/typing/tests/data/*" = ["B015", "B018", "E501"] +# too disruptive to enable all at once +"**/*.py" = ["Q"] "__init__.py" = ["F401", "F403", "F405"] "__init__.pyi" = ["F401"] From 7545bc7fdf90548a296900b89de2b14e2d19250c Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 15 Aug 2025 01:03:51 +0200 Subject: [PATCH 0305/1018] STY: ruff rule name comments --- ruff.toml | 43 ++++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/ruff.toml b/ruff.toml index 124f343bc061..e0f0dd98872e 100644 --- a/ruff.toml +++ b/ruff.toml @@ -21,25 +21,26 @@ line-ending = "lf" [lint] preview = true extend-select = [ - "B", - "C4", - "ISC", - "LOG", - "G", - "PIE", - "TID", - "FLY", - "I", - "PD", - "PERF", - "E", - "W", - "PGH", - "PLE", - "UP", - "Q", + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "ISC", # flake8-implicit-str-concat + "LOG", # flake8-logging + "G", # flake8-logging-format + "PIE", # flake8-pie + "Q", # flake8-quotes + "TID", # flake8-tidy-imports + "FLY", # flynt + "I", # isort + "PD", # pandas-vet + "PERF", # perflint + "E", # pycodestyle/error + "W", # pycodestyle/warning + "PGH", # pygrep-hooks + "PLE", # pylint/error + "UP", # pyupgrade ] ignore = [ + # flake8-bugbear "B006", # Do not use mutable data structures for argument defaults "B007", # Loop control variable not used within loop body "B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()` @@ -47,11 +48,17 @@ ignore = [ "B028", # No explicit `stacklevel` keyword argument found "B904", # Within an `except` clause distinguish raised exceptions from errors in exception handling "B905", #`zip()` without an explicit `strict=` parameter + # flake8-comprehensions "C408", # Unnecessary `dict()` call (rewrite as a literal) + # flake8-implicit-str-concat "ISC002", # Implicitly concatenated string literals over multiple lines + # flake8-pie "PIE790", # Unnecessary `pass` statement + # pandas-vet "PD901", # Avoid using the generic variable name `df` for DataFrames + # perflint "PERF401", # Use a list comprehension to create a transformed list + # pycodestyle/error "E241", # Multiple spaces after comma "E265", # Block comment should start with `# ` "E266", # Too many leading `#` before block comment @@ -61,10 +68,12 @@ ignore = [ "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check "E731", # Do not assign a `lambda` expression, use a `def` "E741", # Ambiguous variable name + # pyflakes "F403", # `from ... import *` used; unable to detect undefined names "F405", # may be undefined, or defined from star imports "F821", # Undefined name "F841", # Local variable is assigned to but never used + # pyupgrade "UP015" , # Unnecessary mode argument "UP031", # TODO: Use format specifiers instead of percent format ] From 4e00e4df15528323f6d76bbd40157b582861af86 Mon Sep 17 00:00:00 2001 From: kibitzing Date: Sat, 16 Aug 2025 21:20:52 +0900 Subject: [PATCH 0306/1018] ENH: Add ndmax parameter to np.array to control recursion depth (#29569) * ENH: add ndmax parameter to np.array * ENH: validate ndmax argument is positive * TST: add ndmax tests for array creation * ENH: allow np.array with ndmax=0 to create 0-D array * MNT: simplify ndmax validation and error message * MNT: improve consistency in error message formatting * DOC: add comment explaining legacy behavior of max_depth=0 * TST: update tests to reflect new ndmax validation and error message * DOC: add documentation and examples for np.array ndmax parameter * DOC: add release note for numpy.array ndmax parameter --- .../upcoming_changes/29569.new_feature.rst | 27 +++++++ numpy/_core/_add_newdocs.py | 26 ++++++- numpy/_core/src/multiarray/array_converter.c | 2 +- numpy/_core/src/multiarray/ctors.c | 19 ++++- numpy/_core/src/multiarray/multiarraymodule.c | 28 ++++--- numpy/_core/src/multiarray/scalartypes.c.src | 2 +- numpy/_core/tests/test_multiarray.py | 73 +++++++++++++++++++ 7 files changed, 162 insertions(+), 15 deletions(-) create mode 100644 doc/release/upcoming_changes/29569.new_feature.rst diff --git a/doc/release/upcoming_changes/29569.new_feature.rst b/doc/release/upcoming_changes/29569.new_feature.rst new file mode 100644 index 000000000000..ac014c07c7a0 --- /dev/null +++ b/doc/release/upcoming_changes/29569.new_feature.rst @@ -0,0 +1,27 @@ +``ndmax`` option for `numpy.array` +---------------------------------------------------- +The ``ndmax`` option is now available for `numpy.array`. +It explicitly limits the maximum number of dimensions created from nested sequences. + +This is particularly useful when creating arrays of list-like objects with ``dtype=object``. +By default, NumPy recurses through all nesting levels to create the highest possible +dimensional array, but this behavior may not be desired when the intent is to preserve +nested structures as objects. The ``ndmax`` parameter provides explicit control over +this recursion depth. + +.. code-block:: python + + # Default behavior: Creates a 2D array + >>> a = np.array([[1, 2], [3, 4]], dtype=object) + >>> a + array([[1, 2], + [3, 4]], dtype=object) + >>> a.shape + (2, 2) + + # With ndmax=1: Creates a 1D array + >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) + >>> b + array([list([1, 2]), list([3, 4])], dtype=object) + >>> b.shape + (2,) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index ed8cf50ee360..e3009a490bd3 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -806,7 +806,7 @@ add_newdoc('numpy._core.multiarray', 'array', """ array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, - like=None) + ndmax=None, like=None) Create an array. @@ -855,6 +855,15 @@ Specifies the minimum number of dimensions that the resulting array should have. Ones will be prepended to the shape as needed to meet this requirement. + ndmax : int, optional + Specifies the maximum number of dimensions to create when inferring + shape from nested sequences. By default, NumPy recurses through all + nesting levels (up to the compile-time constant ``NPY_MAXDIMS``). + Setting ``ndmax`` stops recursion at the specified depth, preserving + deeper nested structures as objects instead of promoting them to + higher-dimensional arrays. In this case, ``dtype=object`` is required. + + .. versionadded:: 2.4.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -926,6 +935,21 @@ matrix([[1, 2], [3, 4]]) + Limiting the maximum dimensions with ``ndmax``: + + >>> a = np.array([[1, 2], [3, 4]], dtype=object, ndmax=2) + >>> a + array([[1, 2], + [3, 4]], dtype=object) + >>> a.shape + (2, 2) + + >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) + >>> b + array([list([1, 2]), list([3, 4])], dtype=object) + >>> b.shape + (2,) + """) add_newdoc('numpy._core.multiarray', 'asarray', diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index 496173038954..10dc83ac657f 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -83,7 +83,7 @@ array_converter_new( } else { item->array = (PyArrayObject *)PyArray_FromAny_int( - item->object, NULL, NULL, 0, 0, 0, NULL, + item->object, NULL, NULL, 0, NPY_MAXDIMS, 0, NULL, &item->scalar_input); if (item->array == NULL) { goto fail; diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 498fa78118b3..38da6f314848 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1508,6 +1508,16 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, return NULL; } + /* + * The internal implementation treats 0 as actually wanting a zero-dimensional + * array, but the API for this function has typically treated it as + * "anything is fine", so convert here. + * TODO: should we use another value as a placeholder instead? + */ + if (max_depth == 0 || max_depth > NPY_MAXDIMS) { + max_depth = NPY_MAXDIMS; + } + int was_scalar; PyObject* ret = PyArray_FromAny_int( op, dt_info.descr, dt_info.dtype, @@ -1563,7 +1573,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, Py_BEGIN_CRITICAL_SECTION(op); ndim = PyArray_DiscoverDTypeAndShape( - op, NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, + op, max_depth, dims, &cache, in_DType, in_descr, &dtype, copy, &was_copied_by__array__); if (ndim < 0) { @@ -1583,7 +1593,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, npy_free_coercion_cache(cache); goto cleanup; } - if (max_depth != 0 && ndim > max_depth) { + if (ndim > max_depth && (in_DType == NULL || in_DType->type_num != NPY_OBJECT)) { PyErr_SetString(PyExc_ValueError, "object too deep for desired array"); npy_free_coercion_cache(cache); @@ -1798,6 +1808,11 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, return NULL; } + /* See comment in PyArray_FromAny for rationale */ + if (max_depth == 0 || max_depth > NPY_MAXDIMS) { + max_depth = NPY_MAXDIMS; + } + PyObject* ret = PyArray_CheckFromAny_int( op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requires, context); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 3d82e6c7f448..a7fdf3efba17 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1560,7 +1560,7 @@ _prepend_ones(PyArrayObject *arr, int nd, int ndmin, NPY_ORDER order) static inline PyObject * _array_fromobject_generic( PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, - NPY_COPYMODE copy, NPY_ORDER order, npy_bool subok, int ndmin) + NPY_COPYMODE copy, NPY_ORDER order, npy_bool subok, int ndmin, int ndmax) { PyArrayObject *oparr = NULL, *ret = NULL; PyArray_Descr *oldtype = NULL; @@ -1570,10 +1570,9 @@ _array_fromobject_generic( Py_XINCREF(in_descr); PyArray_Descr *dtype = in_descr; - if (ndmin > NPY_MAXDIMS) { + if (ndmin > ndmax) { PyErr_Format(PyExc_ValueError, - "ndmin bigger than allowable number of dimensions " - "NPY_MAXDIMS (=%d)", NPY_MAXDIMS); + "ndmin must be <= ndmax (%d)", ndmax); goto finish; } /* fast exit if simple call */ @@ -1682,7 +1681,7 @@ _array_fromobject_generic( flags |= NPY_ARRAY_FORCECAST; ret = (PyArrayObject *)PyArray_CheckFromAny_int( - op, dtype, in_DType, 0, 0, flags, NULL); + op, dtype, in_DType, 0, ndmax, flags, NULL); finish: Py_XDECREF(dtype); @@ -1713,6 +1712,7 @@ array_array(PyObject *NPY_UNUSED(ignored), npy_bool subok = NPY_FALSE; NPY_COPYMODE copy = NPY_COPY_ALWAYS; int ndmin = 0; + int ndmax = NPY_MAXDIMS; npy_dtype_info dt_info = {NULL, NULL}; NPY_ORDER order = NPY_KEEPORDER; PyObject *like = Py_None; @@ -1726,6 +1726,7 @@ array_array(PyObject *NPY_UNUSED(ignored), "$order", &PyArray_OrderConverter, &order, "$subok", &PyArray_BoolConverter, &subok, "$ndmin", &PyArray_PythonPyIntFromInt, &ndmin, + "$ndmax", &PyArray_PythonPyIntFromInt, &ndmax, "$like", NULL, &like, NULL, NULL, NULL) < 0) { Py_XDECREF(dt_info.descr); @@ -1747,8 +1748,15 @@ array_array(PyObject *NPY_UNUSED(ignored), op = args[0]; } + if (ndmax > NPY_MAXDIMS || ndmax < 0) { + PyErr_Format(PyExc_ValueError, "ndmax must be in the range [0, NPY_MAXDIMS (%d)] ", NPY_MAXDIMS); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); + return NULL; + } + PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, copy, order, subok, ndmin); + op, dt_info.descr, dt_info.dtype, copy, order, subok, ndmin, ndmax); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1794,7 +1802,7 @@ array_asarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, copy, order, NPY_FALSE, 0); + op, dt_info.descr, dt_info.dtype, copy, order, NPY_FALSE, 0, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1840,7 +1848,7 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, copy, order, NPY_TRUE, 0); + op, dt_info.descr, dt_info.dtype, copy, order, NPY_TRUE, 0, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1882,7 +1890,7 @@ array_ascontiguousarray(PyObject *NPY_UNUSED(ignored), PyObject *res = _array_fromobject_generic( op, dt_info.descr, dt_info.dtype, NPY_COPY_IF_NEEDED, NPY_CORDER, NPY_FALSE, - 1); + 1, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1924,7 +1932,7 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored), PyObject *res = _array_fromobject_generic( op, dt_info.descr, dt_info.dtype, NPY_COPY_IF_NEEDED, NPY_FORTRANORDER, - NPY_FALSE, 1); + NPY_FALSE, 1, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 5e3a3ba71d3e..a6170936a5f3 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -226,7 +226,7 @@ find_binary_operation_path( */ int was_scalar; PyArrayObject *arr = (PyArrayObject *)PyArray_FromAny_int( - other, NULL, NULL, 0, 0, 0, NULL, &was_scalar); + other, NULL, NULL, 0, NPY_MAXDIMS, 0, NULL, &was_scalar); if (arr == NULL) { return -1; } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index cf2f899b7991..da4eeb91cfc2 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -1279,6 +1279,79 @@ def test_creation_from_dtypemeta(self, func): assert_array_equal(arr1, arr2) assert arr2.dtype == dtype + def test_ndmax_less_than_actual_dims_dtype_object(self): + data = [[1, 2, 3], [4, 5, 6]] + arr = np.array(data, ndmax=1, dtype=object) + assert arr.ndim == 1 + assert arr.shape == (2,) + assert arr.dtype == object + + data = [[1, 2, 3], [4, 5]] + arr = np.array(data, ndmax=1, dtype=object) + assert arr.ndim == 1 + assert arr.shape == (2,) + assert arr.dtype == object + + data = [[[1], [2]], [[3], [4]]] + arr = np.array(data, ndmax=2, dtype=object) + assert arr.ndim == 2 + assert arr.shape == (2, 2) + assert arr.dtype == object + + def test_ndmax_equal_to_actual_dims(self): + data = [[1, 2], [3, 4]] + arr = np.array(data, ndmax=2) + assert arr.ndim == 2 + assert_array_equal(arr, np.array(data)) + + def test_ndmax_greater_than_actual_dims(self): + data = [[1, 2], [3, 4]] + arr = np.array(data, ndmax=3) + assert arr.ndim == 2 + assert_array_equal(arr, np.array(data)) + + def test_ndmax_less_than_actual_dims(self): + data = [[[1], [2]], [[3], [4]]] + with pytest.raises(ValueError, + match="setting an array element with a sequence. " + "The requested array would exceed the maximum number of dimension of 2."): + np.array(data, ndmax=2) + + def test_ndmax_is_zero(self): + data = [1, 2, 3] + arr = np.array(data, ndmax=0, dtype=object) + assert arr.ndim == 0 + assert arr.shape == () + assert arr.dtype == object + + data = [[1, 2, 3], [4, 5, 6]] + arr = np.array(data, ndmax=0, dtype=object) + assert arr.ndim == 0 + assert arr.shape == () + assert arr.dtype == object + + data = [[1, 2, 3], [4, 5]] + arr = np.array(data, ndmax=0, dtype=object) + assert arr.ndim == 0 + assert arr.shape == () + assert arr.dtype == object + + def test_ndmax_less_than_ndmin(self): + data = [[[1], [2]], [[3], [4]]] + with pytest.raises(ValueError, match="ndmin must be <= ndmax"): + np.array(data, ndmax=1, ndmin=2) + + def test_ndmax_is_negative(self): + data = [1, 2, 3] + with pytest.raises(ValueError, match="ndmax must be in the range"): + np.array(data, ndmax=-1) + + def test_ndmax_greather_than_NPY_MAXDIMS(self): + data = [1, 2, 3] + # current NPY_MAXDIMS is 64 + with pytest.raises(ValueError, match="ndmax must be in the range"): + np.array(data, ndmax=65) + class TestStructured: def test_subarray_field_access(self): From b9dcaa3ab616920be14ad931a228a0ee0378057e Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 16 Aug 2025 20:51:10 +0200 Subject: [PATCH 0307/1018] TYP: add ``ndmax`` parameter to ``np..array`` This follows #29569, and also fills in the missing parameter defaults, towards #28428. --- numpy/_core/multiarray.pyi | 59 +++++++++++++++++++++----------------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 560822d68466..5a3fa46bee20 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -441,57 +441,62 @@ def empty_like( @overload def array( object: _ArrayT, - dtype: None = ..., + dtype: None = None, *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", subok: L[True], - ndmin: int = ..., - like: _SupportsArrayFunc | None = ..., + ndmin: int = 0, + ndmax: int | None = None, + like: _SupportsArrayFunc | None = None, ) -> _ArrayT: ... @overload def array( object: _SupportsArray[_ArrayT], - dtype: None = ..., + dtype: None = None, *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", subok: L[True], - ndmin: L[0] = ..., - like: _SupportsArrayFunc | None = ..., + ndmin: L[0] = 0, + ndmax: int | None = None, + like: _SupportsArrayFunc | None = None, ) -> _ArrayT: ... @overload def array( object: _ArrayLike[_ScalarT], - dtype: None = ..., + dtype: None = None, *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: _SupportsArrayFunc | None = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def array( object: Any, dtype: _DTypeLike[_ScalarT], *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: _SupportsArrayFunc | None = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def array( object: Any, - dtype: DTypeLike | None = ..., + dtype: DTypeLike | None = None, *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: _SupportsArrayFunc | None = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... # From 0d5bec96a0a826064b3fc3099ac9d3903f7af659 Mon Sep 17 00:00:00 2001 From: Riku Sakamoto Date: Sun, 13 Jul 2025 12:47:45 -0700 Subject: [PATCH 0308/1018] ENH: Allow subscript access for `np.bool` by adding `__class_getitem__` Fixes #29247. Implements `booleantype_class_getitem_abc` and adds a `__class_getitem__` method to `np.bool` to enable subscript access (e.g., `np.bool[int]`). --- numpy/_core/src/multiarray/scalartypes.c.src | 28 ++++++++++++++++++++ numpy/_core/tests/test_scalar_methods.py | 8 ++++-- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 5e3a3ba71d3e..80b9f8e337c4 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -2447,6 +2447,23 @@ numbertype_class_getitem_abc(PyObject *cls, PyObject *args) return Py_GenericAlias(cls, args); } + +static PyObject * +booleantype_class_getitem_abc(PyObject *cls, PyObject *args) +{ + const Py_ssize_t args_len = PyTuple_Check(args) ? PyTuple_Size(args) : 1; + int args_len_expected = 1; + + if ((args_len > args_len_expected) || (args_len == 0)) { + return PyErr_Format(PyExc_TypeError, + "Too %s arguments for %s", + args_len > args_len_expected ? "many" : "few", + ((PyTypeObject *)cls)->tp_name); + } + return Py_GenericAlias(cls, args); +} + + /* * Use for concrete np.number subclasses, making them act as if they * were subtyped from e.g. np.signedinteger[object], thus lacking any @@ -2853,6 +2870,16 @@ static PyMethodDef numbertype_methods[] = { {NULL, NULL, 0, NULL} /* sentinel */ }; + +static PyMethodDef booleantype_methods[] = { + /* for typing */ + {"__class_getitem__", + (PyCFunction)booleantype_class_getitem_abc, + METH_CLASS | METH_O, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ +}; + + /**begin repeat * #name = cfloat,clongdouble# */ @@ -4571,6 +4598,7 @@ initialize_numeric_types(void) PyBoolArrType_Type.tp_str = genbool_type_str; PyBoolArrType_Type.tp_repr = genbool_type_repr; + PyBoolArrType_Type.tp_methods = booleantype_methods; /**begin repeat diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index 2d508a08bb4d..e2943be4f660 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -171,8 +171,12 @@ def test_abc_non_numeric(self, cls: type[np.generic]) -> None: @pytest.mark.parametrize("code", np.typecodes["All"]) def test_concrete(self, code: str) -> None: cls = np.dtype(code).type - with pytest.raises(TypeError): - cls[Any] + if cls == np.bool: + # np.bool allows subscript + assert cls[Any] + else: + with pytest.raises(TypeError): + cls[Any] @pytest.mark.parametrize("arg_len", range(4)) def test_subscript_tuple(self, arg_len: int) -> None: From 003281b94b9c906bc11ceb516e9c1ed1a9cd722f Mon Sep 17 00:00:00 2001 From: Riku Sakamoto Date: Sun, 17 Aug 2025 14:42:04 +0900 Subject: [PATCH 0309/1018] ENH: Delegate checking of type argument count for `np.bool` to static type checkers --- numpy/_core/src/multiarray/scalartypes.c.src | 27 +++----------------- numpy/_core/tests/test_scalar_methods.py | 6 ++++- 2 files changed, 8 insertions(+), 25 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 80b9f8e337c4..dc8d047917ae 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -2447,23 +2447,6 @@ numbertype_class_getitem_abc(PyObject *cls, PyObject *args) return Py_GenericAlias(cls, args); } - -static PyObject * -booleantype_class_getitem_abc(PyObject *cls, PyObject *args) -{ - const Py_ssize_t args_len = PyTuple_Check(args) ? PyTuple_Size(args) : 1; - int args_len_expected = 1; - - if ((args_len > args_len_expected) || (args_len == 0)) { - return PyErr_Format(PyExc_TypeError, - "Too %s arguments for %s", - args_len > args_len_expected ? "many" : "few", - ((PyTypeObject *)cls)->tp_name); - } - return Py_GenericAlias(cls, args); -} - - /* * Use for concrete np.number subclasses, making them act as if they * were subtyped from e.g. np.signedinteger[object], thus lacking any @@ -2870,16 +2853,12 @@ static PyMethodDef numbertype_methods[] = { {NULL, NULL, 0, NULL} /* sentinel */ }; - static PyMethodDef booleantype_methods[] = { - /* for typing */ - {"__class_getitem__", - (PyCFunction)booleantype_class_getitem_abc, - METH_CLASS | METH_O, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ + /* for typing */ + {"__class_getitem__", Py_GenericAlias, METH_CLASS | METH_O, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ }; - /**begin repeat * #name = cfloat,clongdouble# */ diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index e2943be4f660..26dad71794e3 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -4,7 +4,7 @@ import fractions import platform import types -from typing import Any +from typing import Any, Literal import pytest @@ -190,6 +190,10 @@ def test_subscript_tuple(self, arg_len: int) -> None: def test_subscript_scalar(self) -> None: assert np.number[Any] + @pytest.mark.parametrize("subscript", [Literal[True], Literal[False]]) + def test_subscript_bool(self, subscript: Literal[True, False]) -> None: + assert isinstance(np.bool[subscript], types.GenericAlias) + class TestBitCount: # derived in part from the cpython test "test_bit_count" From 4136d1c20d73e8e1bdc7f1a6a24a138cb44cd0dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Sun, 17 Aug 2025 16:07:11 -0300 Subject: [PATCH 0310/1018] DOC: Add link to homepage in doc landing page [skip azp][skip cirrus][skip actions] --- doc/source/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/index.rst b/doc/source/index.rst index 02f3a8dc12b0..00d1bb62e6b3 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -21,6 +21,7 @@ NumPy documentation `Historical versions of documentation `_ **Useful links**: +`Home `_ | `Installation `_ | `Source Repository `_ | `Issue Tracker `_ | From e7d3a29e1497bb65f8a6aad93689df0ae2cd0b6a Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 18 Aug 2025 12:33:37 +0100 Subject: [PATCH 0311/1018] TYP: Type default values in stubs in `numpy/ma` (#29531) --- numpy/ma/core.pyi | 114 +++++++++++++++++++++--------------------- numpy/ma/extras.pyi | 38 +++++++------- numpy/ma/mrecords.pyi | 48 +++++++++--------- 3 files changed, 100 insertions(+), 100 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index a033e735d3f5..9adab779776a 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -328,10 +328,10 @@ def filled(a: ndarray[_ShapeT_co, _DTypeT_co], fill_value: _ScalarLike_co | None def filled(a: _ArrayLike[_ScalarT_co], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT_co]: ... @overload def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Any]: ... -def getdata(a, subok=...): ... +def getdata(a, subok=True): ... get_data = getdata -def fix_invalid(a, mask=..., copy=..., fill_value=...): ... +def fix_invalid(a, mask=..., copy=True, fill_value=None): ... class _MaskedUFunc: f: Any @@ -350,9 +350,9 @@ class _MaskedBinaryOperation(_MaskedUFunc): filly: Any def __init__(self, mbfunc, fillx=..., filly=...): ... def __call__(self, a, b, *args, **kwargs): ... - def reduce(self, target, axis=..., dtype=...): ... + def reduce(self, target, axis=0, dtype=None): ... def outer(self, a, b): ... - def accumulate(self, target, axis=...): ... + def accumulate(self, target, axis=0): ... class _DomainedBinaryOperation(_MaskedUFunc): domain: Any @@ -433,29 +433,29 @@ def getmaskarray(arr): ... # which isn't necessarily a ndarray. Please open an issue if this causes issues. def is_mask(m: object) -> TypeIs[NDArray[bool_]]: ... -def make_mask(m, copy=..., shrink=..., dtype=...): ... -def make_mask_none(newshape, dtype=...): ... -def mask_or(m1, m2, copy=..., shrink=...): ... +def make_mask(m, copy=False, shrink=True, dtype=...): ... +def make_mask_none(newshape, dtype=None): ... +def mask_or(m1, m2, copy=False, shrink=True): ... def flatten_mask(mask): ... -def masked_where(condition, a, copy=...): ... -def masked_greater(x, value, copy=...): ... -def masked_greater_equal(x, value, copy=...): ... -def masked_less(x, value, copy=...): ... -def masked_less_equal(x, value, copy=...): ... -def masked_not_equal(x, value, copy=...): ... -def masked_equal(x, value, copy=...): ... -def masked_inside(x, v1, v2, copy=...): ... -def masked_outside(x, v1, v2, copy=...): ... -def masked_object(x, value, copy=..., shrink=...): ... -def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...): ... -def masked_invalid(a, copy=...): ... +def masked_where(condition, a, copy=True): ... +def masked_greater(x, value, copy=True): ... +def masked_greater_equal(x, value, copy=True): ... +def masked_less(x, value, copy=True): ... +def masked_less_equal(x, value, copy=True): ... +def masked_not_equal(x, value, copy=True): ... +def masked_equal(x, value, copy=True): ... +def masked_inside(x, v1, v2, copy=True): ... +def masked_outside(x, v1, v2, copy=True): ... +def masked_object(x, value, copy=True, shrink=True): ... +def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): ... +def masked_invalid(a, copy=True): ... class _MaskedPrintOption: def __init__(self, display): ... def display(self): ... def set_display(self, s): ... def enabled(self): ... - def enable(self, shrink=...): ... + def enable(self, shrink=1): ... masked_print_option: _MaskedPrintOption @@ -1302,7 +1302,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def count(self, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... @overload - def count(self, axis: _ShapeLike | None = ..., *, keepdims: Literal[True]) -> NDArray[int_]: ... + def count(self, axis: _ShapeLike | None = None, *, keepdims: Literal[True]) -> NDArray[int_]: ... @overload def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... @@ -1492,19 +1492,19 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def trace( self, # >= 2D MaskedArray - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: None = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, ) -> Any: ... @overload def trace( self, # >= 2D MaskedArray - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike | None = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -1520,9 +1520,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # This differs from `ndarray.dot`, in that 1D dot 1D returns a 0D array. @overload - def dot(self, b: ArrayLike, out: None = ..., strict: bool = ...) -> _MaskedArray[Any]: ... + def dot(self, b: ArrayLike, out: None = None, strict: bool = False) -> _MaskedArray[Any]: ... @overload - def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = ...) -> _ArrayT: ... + def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = False) -> _ArrayT: ... @overload def sum( @@ -2043,7 +2043,7 @@ class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __setitem__(self, indx, value): ... def __iter__(self): ... def __len__(self): ... - def filled(self, fill_value=...): ... + def filled(self, fill_value=None): ... def tolist(self): ... def isMaskedArray(x): ... @@ -2076,16 +2076,16 @@ masked_array = MaskedArray def array( data, - dtype=..., - copy=..., - order=..., + dtype=None, + copy=False, + order=None, mask=..., - fill_value=..., - keep_mask=..., - hard_mask=..., - shrink=..., - subok=..., - ndmin=..., + fill_value=None, + keep_mask=True, + hard_mask=False, + shrink=True, + subok=True, + ndmin=0, ): ... def is_masked(x: object) -> bool: ... @@ -2370,8 +2370,8 @@ def take( mode: _ModeKind = "raise", ) -> _ArrayT: ... -def power(a, b, third=...): ... -def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... +def power(a, b, third=None): ... +def argsort(a, axis=..., kind=None, order=None, endwith=True, fill_value=None, *, stable=None): ... @overload def sort( a: _ArrayT, @@ -2398,22 +2398,22 @@ def sort( def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... @overload def compressed(x: ArrayLike) -> _Array1D[Any]: ... -def concatenate(arrays, axis=...): ... -def diag(v, k=...): ... +def concatenate(arrays, axis=0): ... +def diag(v, k=0): ... def left_shift(a, n): ... def right_shift(a, n): ... def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def putmask(a: NDArray[Any], mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... -def transpose(a, axes=...): ... -def reshape(a, new_shape, order=...): ... +def transpose(a, axes=None): ... +def reshape(a, new_shape, order="C"): ... def resize(x, new_shape): ... def ndim(obj: ArrayLike) -> int: ... def shape(obj): ... def size(obj: ArrayLike, axis: SupportsIndex | None = None) -> int: ... -def diff(a, /, n=..., axis=..., prepend=..., append=...): ... +def diff(a, /, n=1, axis=-1, prepend=..., append=...): ... def where(condition, x=..., y=...): ... -def choose(indices, choices, out=..., mode=...): ... -def round_(a, decimals=..., out=...): ... +def choose(indices, choices, out=None, mode="raise"): ... +def round_(a, decimals=0, out=None): ... round = round_ def inner(a, b): ... @@ -2422,15 +2422,15 @@ innerproduct = inner def outer(a, b): ... outerproduct = outer -def correlate(a, v, mode=..., propagate_mask=...): ... -def convolve(a, v, mode=..., propagate_mask=...): ... +def correlate(a, v, mode="valid", propagate_mask=True): ... +def convolve(a, v, mode="full", propagate_mask=True): ... def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float = 1e-5, atol: float = 1e-8) -> bool: ... -def asarray(a, dtype=..., order=...): ... -def asanyarray(a, dtype=...): ... +def asarray(a, dtype=None, order=None): ... +def asanyarray(a, dtype=None): ... def fromflex(fxarray): ... class _convert2ma: @@ -2452,6 +2452,6 @@ squeeze: _convert2ma zeros: _convert2ma zeros_like: _convert2ma -def append(a, b, axis=...): ... -def dot(a, b, strict=..., out=...): ... +def append(a, b, axis=None): ... +def dot(a, b, strict=False, out=None): ... def mask_rowcols(a, axis=...): ... diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index c3f9fcde4a0a..5a7b2b399ea8 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -55,8 +55,8 @@ __all__ = [ "vstack", ] -def count_masked(arr, axis=...): ... -def masked_all(shape, dtype=...): ... +def count_masked(arr, axis=None): ... +def masked_all(shape, dtype=float): ... # noqa: PYI014 def masked_all_like(arr): ... class _fromnxfunction: @@ -91,23 +91,23 @@ diagflat: _fromnxfunction_single def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... def apply_over_axes(func, a, axes): ... -def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... -def compress_nd(x, axis=...): ... -def compress_rowcols(x, axis=...): ... +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): ... +def compress_nd(x, axis=None): ... +def compress_rowcols(x, axis=None): ... def compress_rows(a): ... def compress_cols(a): ... def mask_rows(a, axis=...): ... def mask_cols(a, axis=...): ... -def ediff1d(arr, to_end=..., to_begin=...): ... -def unique(ar1, return_index=..., return_inverse=...): ... -def intersect1d(ar1, ar2, assume_unique=...): ... -def setxor1d(ar1, ar2, assume_unique=...): ... -def in1d(ar1, ar2, assume_unique=..., invert=...): ... -def isin(element, test_elements, assume_unique=..., invert=...): ... +def ediff1d(arr, to_end=None, to_begin=None): ... +def unique(ar1, return_index=False, return_inverse=False): ... +def intersect1d(ar1, ar2, assume_unique=False): ... +def setxor1d(ar1, ar2, assume_unique=False): ... +def in1d(ar1, ar2, assume_unique=False, invert=False): ... +def isin(element, test_elements, assume_unique=False, invert=False): ... def union1d(ar1, ar2): ... -def setdiff1d(ar1, ar2, assume_unique=...): ... -def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... -def corrcoef(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... +def setdiff1d(ar1, ar2, assume_unique=False): ... +def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... +def corrcoef(x, y=None, rowvar=True, bias=..., allow_masked=True, ddof=...): ... class MAxisConcatenator(AxisConcatenator): @staticmethod @@ -120,15 +120,15 @@ class mr_class(MAxisConcatenator): mr_: mr_class -def ndenumerate(a, compressed=...): ... +def ndenumerate(a, compressed=True): ... def flatnotmasked_edges(a): ... -def notmasked_edges(a, axis=...): ... +def notmasked_edges(a, axis=None): ... def flatnotmasked_contiguous(a): ... -def notmasked_contiguous(a, axis=...): ... +def notmasked_contiguous(a, axis=None): ... def clump_unmasked(a): ... def clump_masked(a): ... -def vander(x, n=...): ... -def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... +def vander(x, n=None): ... +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): ... # def mask_rowcols(a: Incomplete, axis: Incomplete | None = None) -> MaskedArray[Incomplete, np.dtype[Incomplete]]: ... diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index cae687aa7d1a..c1f3592b0dd6 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -48,49 +48,49 @@ class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co]): def __setattr__(self, attr, val): ... def __getitem__(self, indx): ... def __setitem__(self, indx, value): ... - def view(self, dtype=..., type=...): ... + def view(self, dtype=None, type=None): ... def harden_mask(self): ... def soften_mask(self): ... def copy(self): ... - def tolist(self, fill_value=...): ... + def tolist(self, fill_value=None): ... def __reduce__(self): ... mrecarray = MaskedRecords def fromarrays( arraylist, - dtype=..., - shape=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., - fill_value=..., + dtype=None, + shape=None, + formats=None, + names=None, + titles=None, + aligned=False, + byteorder=None, + fill_value=None, ): ... def fromrecords( reclist, - dtype=..., - shape=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., - fill_value=..., + dtype=None, + shape=None, + formats=None, + names=None, + titles=None, + aligned=False, + byteorder=None, + fill_value=None, mask=..., ): ... def fromtextfile( fname, - delimiter=..., - commentchar=..., - missingchar=..., - varnames=..., - vartypes=..., + delimiter=None, + commentchar="#", + missingchar="", + varnames=None, + vartypes=None, # NOTE: deprecated: NumPy 1.22.0, 2021-09-23 # delimitor=..., ): ... -def addfield(mrecord, newfield, newfieldname=...): ... +def addfield(mrecord, newfield, newfieldname=None): ... From 71a1b5ca6cfc573acbbda1ebe67c28a9f0eddf4d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 18 Aug 2025 09:41:38 -0600 Subject: [PATCH 0312/1018] DEP: Deprecate NumPy warning control utilities (#29550) * DEP: Deprecate NumPy warning control utilities * TST: fix mypy tests * DEP: un-deprecate assert_no_warnings, which is thread-safe * DEP: un-deprecate clear_and_catch_warnings * TYP: update type stubs * STY: appease ruff * TST: update doctest deprecation filters * TST: fix warning test * DOC: add release note * Update numpy/tests/test_warnings.py * MNT: use stacklevel instead * TST: use pytest.deprecated_call() * MNT: fix deprecated version * Apply suggestions from code review Co-authored-by: Sebastian Berg --------- Co-authored-by: Sebastian Berg --- .../upcoming_changes/29550.deprecation.rst | 6 ++++ numpy/_core/tests/test_deprecations.py | 19 +++++++++++ numpy/conftest.py | 4 ++- numpy/testing/_private/utils.py | 25 ++++++++++++-- numpy/testing/_private/utils.pyi | 5 ++- numpy/testing/tests/test_utils.py | 34 +++++++++++++++---- numpy/typing/tests/data/pass/ndarray_misc.py | 10 +++--- numpy/typing/tests/data/reveal/testing.pyi | 10 +++--- 8 files changed, 94 insertions(+), 19 deletions(-) create mode 100644 doc/release/upcoming_changes/29550.deprecation.rst diff --git a/doc/release/upcoming_changes/29550.deprecation.rst b/doc/release/upcoming_changes/29550.deprecation.rst new file mode 100644 index 000000000000..ce35477c5010 --- /dev/null +++ b/doc/release/upcoming_changes/29550.deprecation.rst @@ -0,0 +1,6 @@ +Assertion and warning control utilities are deprecated +------------------------------------------------------ + +`np.testing.assert_warns` and `np.testing.suppress_warnings` are deprecated. +Use `warnings.catch_warnings`, `warnings.filterwarnings`, ``pytest.warns``, or +``pytest.filterwarnings`` instead. diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 75f092a51808..9be253f7c95c 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -516,3 +516,22 @@ def assign_to_index(): arr.flat[[1.]] = 10 self.assert_deprecated(assign_to_index) + + +class TestWarningUtilityDeprecations(_DeprecationTestCase): + # Deprecation in NumPy 2.4, 2025-08 + message = r"NumPy warning suppression and assertion utilities are deprecated." + + def test_assert_warns_deprecated(self): + def use_assert_warns(): + with np.testing.assert_warns(RuntimeWarning): + warnings.warn("foo", RuntimeWarning, stacklevel=1) + + self.assert_deprecated(use_assert_warns) + + def test_suppress_warnings_deprecated(self): + def use_suppress_warnings(): + with np.testing.suppress_warnings() as sup: + sup.filter(RuntimeWarning, 'invalid value encountered in divide') + + self.assert_deprecated(use_suppress_warnings) diff --git a/numpy/conftest.py b/numpy/conftest.py index fde4defc926d..1454b4af59de 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -170,7 +170,9 @@ def warnings_errors_and_rng(test=None): "This function is deprecated.", # random_integers "Data type alias 'a'", # numpy.rec.fromfile "Arrays of 2-dimensional vectors", # matlib.cross - "`in1d` is deprecated", ] + "`in1d` is deprecated", + "NumPy warning suppression and assertion utilities are deprecated." + ] msg = "|".join(msgs) msgs_r = [ diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index e4a74e69afb0..9be98f9d2fbe 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1985,7 +1985,7 @@ def integer_repr(x): @contextlib.contextmanager def _assert_warns_context(warning_class, name=None): __tracebackhide__ = True # Hide traceback for py.test - with suppress_warnings() as sup: + with suppress_warnings(_warn=False) as sup: l = sup.record(warning_class) yield if not len(l) > 0: @@ -2009,6 +2009,11 @@ def assert_warns(warning_class, *args, **kwargs): The ability to be used as a context manager is new in NumPy v1.11.0. + .. deprecated:: 2.4 + + This is deprecated. Use `warnings.catch_warnings` or + ``pytest.warns`` instead. + Parameters ---------- warning_class : class @@ -2036,6 +2041,11 @@ def assert_warns(warning_class, *args, **kwargs): >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) >>> assert ret == 16 """ + warnings.warn( + "NumPy warning suppression and assertion utilities are deprecated. " + "Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, " + "or pytest.filterwarnings instead. (Deprecated NumPy 2.4)", + DeprecationWarning, stacklevel=2) if not args and not kwargs: return _assert_warns_context(warning_class) elif len(args) < 1: @@ -2288,6 +2298,11 @@ class suppress_warnings: tests might need to see the warning. Additionally it allows easier specificity for testing warnings and can be nested. + .. deprecated:: 2.4 + + This is deprecated. Use `warnings.filterwarnings` or + ``pytest.filterwarnings`` instead. + Parameters ---------- forwarding_rule : str, optional @@ -2348,7 +2363,13 @@ def some_function(): # do something which causes a warning in np.ma.core pass """ - def __init__(self, forwarding_rule="always"): + def __init__(self, forwarding_rule="always", _warn=True): + if _warn: + warnings.warn( + "NumPy warning suppression and assertion utilities are deprecated. " + "Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, " + "or pytest.filterwarnings instead. (Deprecated NumPy 2.4)", + DeprecationWarning, stacklevel=2) self._entered = False # Suppressions are either instance or defined inside one with block: diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index d5584e511796..76455df87eb0 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -24,7 +24,7 @@ from typing import ( overload, type_check_only, ) -from typing_extensions import TypeVar +from typing_extensions import TypeVar, deprecated from unittest.case import SkipTest import numpy as np @@ -148,6 +148,7 @@ class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): @overload # record; bool def __init__(self, /, record: bool, modules: _ToModules = ()) -> None: ... +@deprecated("Please use warnings.filterwarnings or pytest.mark.filterwarnings instead") class suppress_warnings: log: Final[_WarnLog] def __init__(self, /, forwarding_rule: L["always", "module", "once", "location"] = "always") -> None: ... @@ -358,8 +359,10 @@ def assert_array_max_ulp( ) -> NDArray[Any]: ... # +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") @overload def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") @overload def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 07f2c9da3005..5e1c625955bb 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -1140,7 +1140,9 @@ def test_strict(self): with pytest.raises(AssertionError): self._assert_func(x, y.astype(np.float32), strict=True) - +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") class TestWarns: def test_warn(self): @@ -1787,6 +1789,9 @@ def test_clear_and_catch_warnings(): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") def test_suppress_warnings_module(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1833,6 +1838,9 @@ def warn(arr): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") def test_suppress_warnings_type(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1861,6 +1869,9 @@ def test_suppress_warnings_type(): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") def test_suppress_warnings_decorate_no_record(): sup = suppress_warnings() sup.filter(UserWarning) @@ -1876,6 +1887,9 @@ def warn(category): assert_equal(len(w), 1) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") def test_suppress_warnings_record(): sup = suppress_warnings() log1 = sup.record() @@ -1913,9 +1927,13 @@ def test_suppress_warnings_record(): warnings.warn('Some warning') warnings.warn('Some other warning') assert_equal(len(sup2.log), 1) - assert_equal(len(sup.log), 1) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 2) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") def test_suppress_warnings_forwarding(): def warn_other_module(): # Apply along axis is implemented in python; stacklevel=2 means @@ -1931,7 +1949,8 @@ def warn(arr): for i in range(2): warnings.warn("Some warning") - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1940,7 +1959,8 @@ def warn(arr): warnings.warn("Some warning") warnings.warn("Some warning") - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1950,7 +1970,8 @@ def warn(arr): warnings.warn("Some warning") warn_other_module() - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1960,7 +1981,8 @@ def warn(arr): warnings.warn("Some other warning") warn_other_module() - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) def test_tempdir(): diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index 30ad270edd6e..4d4e1f872763 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -11,6 +11,8 @@ import operator from typing import Any, cast +import pytest + import numpy as np import numpy.typing as npt @@ -190,11 +192,11 @@ class IntSubClass(npt.NDArray[np.intp]): ... # deprecated -with np.testing.assert_warns(DeprecationWarning): +with pytest.deprecated_call(): ctypes_obj.get_data() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with np.testing.assert_warns(DeprecationWarning): +with pytest.deprecated_call(): ctypes_obj.get_shape() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with np.testing.assert_warns(DeprecationWarning): +with pytest.deprecated_call(): ctypes_obj.get_strides() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with np.testing.assert_warns(DeprecationWarning): +with pytest.deprecated_call(): ctypes_obj.get_as_parameter() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 52d9ef6b3a5d..1533cd27955b 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -15,7 +15,7 @@ AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] bool_obj: bool -suppress_obj: np.testing.suppress_warnings +suppress_obj: np.testing.suppress_warnings # type: ignore[deprecated] # pyright: ignore[reportDeprecated] FT = TypeVar("FT", bound=Callable[..., Any]) def func() -> int: ... @@ -58,12 +58,12 @@ with np.testing.clear_and_catch_warnings(True) as c1: with np.testing.clear_and_catch_warnings() as c2: assert_type(c2, None) -assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) -assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) +assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(suppress_obj.filter(RuntimeWarning), None) assert_type(suppress_obj.record(RuntimeWarning), list[warnings.WarningMessage]) with suppress_obj as c3: - assert_type(c3, np.testing.suppress_warnings) + assert_type(c3, np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(np.testing.verbose, int) assert_type(np.testing.IS_PYPY, bool) @@ -172,7 +172,7 @@ assert_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2), Non assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2), npt.NDArray[Any]) assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32), npt.NDArray[Any]) -assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) +assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) def func4(a: int, b: str) -> bool: ... From 4b0a702a02f5090701980b7e0ac78c37e10b06a6 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 18 Aug 2025 21:00:51 +0300 Subject: [PATCH 0313/1018] TST: update link and version for Intel SDE download --- .github/workflows/linux_simd.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index a9f065e25cc0..9f12215eb30e 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -202,7 +202,7 @@ jobs: - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/788820/sde-external-9.27.0-2023-09-13-lin.tar.xz + curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde @@ -253,7 +253,7 @@ jobs: - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/788820/sde-external-9.27.0-2023-09-13-lin.tar.xz + curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde From 111fe76ebf9e3dc528a2d808626808bd9601938d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Mon, 18 Aug 2025 20:55:31 +0200 Subject: [PATCH 0314/1018] BLD: wire up `ASIMDDP` feature to `ARM_FEATURES` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the `ASIMDDP` feature to the `ARM_FEATURES` dictionary, to fix an error when it is explicitly listed in `cpu-baseline`. Fixes #29570 Signed-off-by: Michał Górny --- meson_cpu/arm/meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meson_cpu/arm/meson.build b/meson_cpu/arm/meson.build index 7ffa3ef58ed0..5478e52cdcea 100644 --- a/meson_cpu/arm/meson.build +++ b/meson_cpu/arm/meson.build @@ -60,6 +60,6 @@ SVE = mod_features.new( # TODO: Add support for MSVC ARM_FEATURES = { 'NEON': NEON, 'NEON_FP16': NEON_FP16, 'NEON_VFPV4': NEON_VFPV4, - 'ASIMD': ASIMD, 'ASIMDHP': ASIMDHP, 'ASIMDFHM': ASIMDFHM, + 'ASIMD': ASIMD, 'ASIMDHP': ASIMDHP, 'ASIMDDP': ASIMDDP, 'ASIMDFHM': ASIMDFHM, 'SVE': SVE } From e868634f6110aaa65bc6568e4d1967cb0757c1b4 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 18 Aug 2025 23:47:32 +0200 Subject: [PATCH 0315/1018] TYP: add ``sorted`` kwarg to ``unique`` --- numpy/lib/_arraysetops_impl.pyi | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index c0291680a8ec..6ca3ed8282b6 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -108,6 +108,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> NDArray[_ScalarT]: ... @overload # unknown scalar-type, FFF def unique( @@ -118,6 +119,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> _AnyArray: ... @overload # known scalar-type, TFF def unique( @@ -128,6 +130,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray]: ... @overload # unknown scalar-type, TFF def unique( @@ -138,6 +141,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray]: ... @overload # known scalar-type, FTF (positional) def unique( @@ -148,6 +152,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray]: ... @overload # known scalar-type, FTF (keyword) def unique( @@ -158,6 +163,7 @@ def unique( return_counts: L[False] = False, axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray]: ... @overload # unknown scalar-type, FTF (positional) def unique( @@ -168,6 +174,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray]: ... @overload # unknown scalar-type, FTF (keyword) def unique( @@ -178,6 +185,7 @@ def unique( return_counts: L[False] = False, axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray]: ... @overload # known scalar-type, FFT (positional) def unique( @@ -188,6 +196,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray]: ... @overload # known scalar-type, FFT (keyword) def unique( @@ -198,6 +207,7 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray]: ... @overload # unknown scalar-type, FFT (positional) def unique( @@ -208,6 +218,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray]: ... @overload # unknown scalar-type, FFT (keyword) def unique( @@ -218,6 +229,7 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray]: ... @overload # known scalar-type, TTF def unique( @@ -228,6 +240,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, TTF def unique( @@ -238,6 +251,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray, _IntArray]: ... @overload # known scalar-type, TFT (positional) def unique( @@ -248,6 +262,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, TFT (keyword) def unique( @@ -258,6 +273,7 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, TFT (positional) def unique( @@ -268,6 +284,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, TFT (keyword) def unique( @@ -278,6 +295,7 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray, _IntArray]: ... @overload # known scalar-type, FTT (positional) def unique( @@ -288,6 +306,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, FTT (keyword) def unique( @@ -298,6 +317,7 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, FTT (positional) def unique( @@ -308,6 +328,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, FTT (keyword) def unique( @@ -318,6 +339,7 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray, _IntArray]: ... @overload # known scalar-type, TTT def unique( @@ -328,6 +350,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, TTT def unique( @@ -338,6 +361,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray, _IntArray, _IntArray]: ... # From 720d35b53f92d4975821d58e391f019524b4d721 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 03:09:29 +0000 Subject: [PATCH 0316/1018] MAINT: Bump actions/dependency-review-action from 4.7.1 to 4.7.2 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.7.1 to 4.7.2. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/da24556b548a50705dd671f47852072ea4c105d9...bc41886e18ea39df68b1b1245f4184881938e050) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.7.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 158a826825f1..248b69a0d939 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9 # v4.7.1 + uses: actions/dependency-review-action@bc41886e18ea39df68b1b1245f4184881938e050 # v4.7.2 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 From 5015557f7da13fea17e7cbbad321937cb56e1078 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 03:29:30 +0000 Subject: [PATCH 0317/1018] MAINT: Bump github/codeql-action from 3.29.9 to 3.29.10 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.9 to 3.29.10. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/df559355d593797519d70b90fc8edd5db049e7a2...96f518a34f7a870018057716cc4d7a5c014bd61c) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.10 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 062bc56f5e3a..dce656f39b37 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 + uses: github/codeql-action/init@96f518a34f7a870018057716cc4d7a5c014bd61c # v3.29.10 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 + uses: github/codeql-action/autobuild@96f518a34f7a870018057716cc4d7a5c014bd61c # v3.29.10 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 + uses: github/codeql-action/analyze@96f518a34f7a870018057716cc4d7a5c014bd61c # v3.29.10 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index f52a41787c77..cecb8d3eef4a 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v2.1.27 + uses: github/codeql-action/upload-sarif@96f518a34f7a870018057716cc4d7a5c014bd61c # v2.1.27 with: sarif_file: results.sarif From 5ba8c96e5853fe538bbe5d1a15d64a24e20b54a9 Mon Sep 17 00:00:00 2001 From: lvllvl <24905907+lvllvl@users.noreply.github.com> Date: Wed, 20 Aug 2025 04:38:15 -0400 Subject: [PATCH 0318/1018] DEP: show warning when np.maximum receives more than 2 inputs (#29052) Deprecate not using `out=` for np.minimum and maximum due to it being a common confusion. --- .../upcoming_changes/29052.deprecation.rst | 10 ++++++++++ numpy/_core/src/umath/ufunc_object.c | 16 ++++++++++++++++ numpy/_core/tests/test_deprecations.py | 9 +++++++++ 3 files changed, 35 insertions(+) create mode 100644 doc/release/upcoming_changes/29052.deprecation.rst diff --git a/doc/release/upcoming_changes/29052.deprecation.rst b/doc/release/upcoming_changes/29052.deprecation.rst new file mode 100644 index 000000000000..e302907abfba --- /dev/null +++ b/doc/release/upcoming_changes/29052.deprecation.rst @@ -0,0 +1,10 @@ +Positional ``out`` argument to `np.maximum`, `np.minimum` is deprecated +----------------------------------------------------------------------- +Passing the output array ``out`` positionally to `numpy.maximum` and +`numpy.minimum` is deprecated. For example, ``np.maximum(a, b, c)`` will +emit a deprecation warning, since ``c`` is treated as the output buffer +rather than a third input. + +Always pass the output with the keyword form, e.g. +``np.maximum(a, b, out=c)``. This makes intent clear and simplifies +type annotations. diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 1d2c3edbd3b9..6b8cc1789f94 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -65,6 +65,7 @@ #include "mapping.h" #include "npy_static_data.h" #include "multiarraymodule.h" +#include "number.h" /********** PRINTF DEBUG TRACING **************/ #define NPY_UF_DBG_TRACING 0 @@ -4368,6 +4369,21 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, Py_INCREF(tmp); PyTuple_SET_ITEM(full_args.out, i-nin, tmp); } + + /* Extra positional args but no keywords */ + /* DEPRECATED NumPy 2.4, 2025-08 */ + if ((PyObject *)ufunc == n_ops.maximum || (PyObject *)ufunc == n_ops.minimum) { + + if (DEPRECATE( + "Passing more than 2 positional arguments to np.maximum and np.minimum " + "is deprecated. If you meant to use the third argument as an output, " + "use the `out` keyword argument instead. If you hoped to work with " + "more than 2 inputs, combine them into a single array and get the extrema " + "for the relevant axis.") < 0) { + return NULL; + } + } + if (all_none) { Py_SETREF(full_args.out, NULL); } diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 9be253f7c95c..252189ef321a 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -535,3 +535,12 @@ def use_suppress_warnings(): sup.filter(RuntimeWarning, 'invalid value encountered in divide') self.assert_deprecated(use_suppress_warnings) + + +class TestTooManyArgsExtremum(_DeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-08, gh-27639 + message = "Passing more than 2 positional arguments to np.maximum and np.minimum " + + @pytest.mark.parametrize("ufunc", [np.minimum, np.maximum]) + def test_extremem_3_args(self, ufunc): + self.assert_deprecated(ufunc, args=(np.ones(1), np.zeros(1), np.empty(1))) From a1d1dadcddfa635912ae446a1a8ffd0fd99d2da3 Mon Sep 17 00:00:00 2001 From: Shirong_Wang Date: Wed, 20 Aug 2025 23:13:53 +0800 Subject: [PATCH 0319/1018] DOC: fix for f2py migrating-to-meson (#29601) --- doc/source/f2py/buildtools/distutils-to-meson.rst | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index b24638e62239..32f2b599f6e5 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -117,14 +117,12 @@ sample is included below. +------------------------------------+-------------------------------+ | LDFLAGS | Linker options | +------------------------------------+-------------------------------+ - | LD\_LIBRARY\_PATH | Library file locations (Unix) | + | LD_LIBRARY_PATH | Library file locations (Unix) | +------------------------------------+-------------------------------+ | LIBS | Libraries to link against | +------------------------------------+-------------------------------+ | PATH | Search path for executables | +------------------------------------+-------------------------------+ - | LDFLAGS | Linker flags | - +------------------------------------+-------------------------------+ | CXX | C++ compiler | +------------------------------------+-------------------------------+ | CXXFLAGS | C++ compiler options | From 85ccc9741e10322c7cf1092d56f812ea2cb32191 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 20 Aug 2025 17:50:26 +0100 Subject: [PATCH 0320/1018] TYP: Add defaults to ``numpy/core`` and ``numpy/__init__.py`` (#29594) Co-authored-by: Joren Hammudoglu --- numpy/__init__.pyi | 6 +- numpy/_core/_asarray.pyi | 18 +- numpy/_core/_ufunc_config.pyi | 10 +- numpy/_core/arrayprint.pyi | 54 +- numpy/_core/defchararray.pyi | 384 ++++++------ numpy/_core/einsumfunc.pyi | 40 +- numpy/_core/fromnumeric.pyi | 610 +++++++++---------- numpy/_core/numeric.pyi | 186 +++--- numpy/_core/shape_base.pyi | 46 +- numpy/_core/strings.pyi | 100 +-- numpy/typing/tests/data/fail/char.pyi | 2 - numpy/typing/tests/data/fail/chararray.pyi | 2 - numpy/typing/tests/data/reveal/char.pyi | 1 + numpy/typing/tests/data/reveal/chararray.pyi | 1 + 14 files changed, 729 insertions(+), 731 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index bf9ed8c9b802..1f47f2aae0d7 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5259,11 +5259,11 @@ class poly1d: def __getitem__(self, val: int, /) -> Any: ... def __setitem__(self, key: int, val: Any, /) -> None: ... def __iter__(self) -> Iterator[Any]: ... - def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ... + def deriv(self, m: SupportsInt | SupportsIndex = 1) -> poly1d: ... def integ( self, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = 0, ) -> poly1d: ... class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): diff --git a/numpy/_core/_asarray.pyi b/numpy/_core/_asarray.pyi index a4bee00489fb..da5884d49aba 100644 --- a/numpy/_core/_asarray.pyi +++ b/numpy/_core/_asarray.pyi @@ -18,24 +18,24 @@ _RequirementsWithE: TypeAlias = _Requirements | _E @overload def require( a: _ArrayT, - dtype: None = ..., - requirements: _Requirements | Iterable[_Requirements] | None = ..., + dtype: None = None, + requirements: _Requirements | Iterable[_Requirements] | None = None, *, - like: _SupportsArrayFunc = ... + like: _SupportsArrayFunc | None = None ) -> _ArrayT: ... @overload def require( a: object, - dtype: DTypeLike = ..., - requirements: _E | Iterable[_RequirementsWithE] = ..., + dtype: DTypeLike = None, + requirements: _E | Iterable[_RequirementsWithE] | None = None, *, - like: _SupportsArrayFunc = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... @overload def require( a: object, - dtype: DTypeLike = ..., - requirements: _Requirements | Iterable[_Requirements] | None = ..., + dtype: DTypeLike = None, + requirements: _Requirements | Iterable[_Requirements] | None = None, *, - like: _SupportsArrayFunc = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... diff --git a/numpy/_core/_ufunc_config.pyi b/numpy/_core/_ufunc_config.pyi index 86df9827d652..1cc3595d5ba0 100644 --- a/numpy/_core/_ufunc_config.pyi +++ b/numpy/_core/_ufunc_config.pyi @@ -16,11 +16,11 @@ class _ErrDict(TypedDict): invalid: _ErrKind def seterr( - all: _ErrKind | None = ..., - divide: _ErrKind | None = ..., - over: _ErrKind | None = ..., - under: _ErrKind | None = ..., - invalid: _ErrKind | None = ..., + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, ) -> _ErrDict: ... def geterr() -> _ErrDict: ... def setbufsize(size: int) -> int: ... diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 967cc09e6a25..57e2e1248c5e 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -77,14 +77,14 @@ class _FormatOptions(TypedDict): __docformat__: Final = "restructuredtext" # undocumented def set_printoptions( - precision: SupportsIndex | None = ..., - threshold: int | None = ..., - edgeitems: int | None = ..., - linewidth: int | None = ..., - suppress: bool | None = ..., - nanstr: str | None = ..., - infstr: str | None = ..., - formatter: _FormatDict | None = ..., + precision: SupportsIndex | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + linewidth: int | None = None, + suppress: bool | None = None, + nanstr: str | None = None, + infstr: str | None = None, + formatter: _FormatDict | None = None, sign: _Sign | None = None, floatmode: _FloatMode | None = None, *, @@ -189,36 +189,36 @@ def array2string( def format_float_scientific( x: _FloatLike_co, - precision: int | None = ..., - unique: bool = ..., + precision: int | None = None, + unique: bool = True, trim: _Trim = "k", - sign: bool = ..., - pad_left: int | None = ..., - exp_digits: int | None = ..., - min_digits: int | None = ..., + sign: bool = False, + pad_left: int | None = None, + exp_digits: int | None = None, + min_digits: int | None = None, ) -> str: ... def format_float_positional( x: _FloatLike_co, - precision: int | None = ..., - unique: bool = ..., - fractional: bool = ..., + precision: int | None = None, + unique: bool = True, + fractional: bool = True, trim: _Trim = "k", - sign: bool = ..., - pad_left: int | None = ..., - pad_right: int | None = ..., - min_digits: int | None = ..., + sign: bool = False, + pad_left: int | None = None, + pad_right: int | None = None, + min_digits: int | None = None, ) -> str: ... def array_repr( arr: NDArray[Any], - max_line_width: int | None = ..., - precision: SupportsIndex | None = ..., - suppress_small: bool | None = ..., + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, ) -> str: ... def array_str( a: NDArray[Any], - max_line_width: int | None = ..., - precision: SupportsIndex | None = ..., - suppress_small: bool | None = ..., + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, ) -> str: ... def printoptions( precision: SupportsIndex | None = ..., diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index d3e01c2c820a..9b86c2a33e35 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -234,90 +234,90 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): def center( self: _CharArray[str_], width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> _CharArray[str_]: ... @overload def center( self: _CharArray[bytes_], width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> _CharArray[bytes_]: ... @overload def count( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def count( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... def decode( self: _CharArray[bytes_], - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, ) -> _CharArray[str_]: ... def encode( self: _CharArray[str_], - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, ) -> _CharArray[bytes_]: ... @overload def endswith( self: _CharArray[str_], suffix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( self: _CharArray[bytes_], suffix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... def expandtabs( self, - tabsize: i_co = ..., + tabsize: i_co = 8, ) -> Self: ... @overload def find( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def find( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload @@ -335,24 +335,24 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): def ljust( self: _CharArray[str_], width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> _CharArray[str_]: ... @overload def ljust( self: _CharArray[bytes_], width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> _CharArray[bytes_]: ... @overload def lstrip( self: _CharArray[str_], - chars: U_co | None = ..., + chars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def lstrip( self: _CharArray[bytes_], - chars: S_co | None = ..., + chars: S_co | None = None, ) -> _CharArray[bytes_]: ... @overload @@ -371,57 +371,57 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): self: _CharArray[str_], old: U_co, new: U_co, - count: i_co | None = ..., + count: i_co | None = None, ) -> _CharArray[str_]: ... @overload def replace( self: _CharArray[bytes_], old: S_co, new: S_co, - count: i_co | None = ..., + count: i_co | None = None, ) -> _CharArray[bytes_]: ... @overload def rfind( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rfind( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rjust( self: _CharArray[str_], width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> _CharArray[str_]: ... @overload def rjust( self: _CharArray[bytes_], width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> _CharArray[bytes_]: ... @overload @@ -438,79 +438,79 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): @overload def rsplit( self: _CharArray[str_], - sep: U_co | None = ..., - maxsplit: i_co | None = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( self: _CharArray[bytes_], - sep: S_co | None = ..., - maxsplit: i_co | None = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rstrip( self: _CharArray[str_], - chars: U_co | None = ..., + chars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def rstrip( self: _CharArray[bytes_], - chars: S_co | None = ..., + chars: S_co | None = None, ) -> _CharArray[bytes_]: ... @overload def split( self: _CharArray[str_], - sep: U_co | None = ..., - maxsplit: i_co | None = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( self: _CharArray[bytes_], - sep: S_co | None = ..., - maxsplit: i_co | None = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... - def splitlines(self, keepends: b_co | None = ...) -> NDArray[object_]: ... + def splitlines(self, keepends: b_co | None = None) -> NDArray[object_]: ... @overload def startswith( self: _CharArray[str_], prefix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( self: _CharArray[bytes_], prefix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def strip( self: _CharArray[str_], - chars: U_co | None = ..., + chars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def strip( self: _CharArray[bytes_], - chars: S_co | None = ..., + chars: S_co | None = None, ) -> _CharArray[bytes_]: ... @overload def translate( self: _CharArray[str_], table: U_co, - deletechars: U_co | None = ..., + deletechars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def translate( self: _CharArray[bytes_], table: S_co, - deletechars: S_co | None = ..., + deletechars: S_co | None = None, ) -> _CharArray[bytes_]: ... def zfill(self, width: i_co) -> Self: ... @@ -609,33 +609,33 @@ def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +def center(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... @overload -def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +def center(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... @overload -def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... @overload -def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def center(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... def decode( a: S_co, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[str_]: ... def encode( a: U_co | T_co, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[bytes_]: ... @overload -def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ... +def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[str_]: ... @overload -def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ... +def expandtabs(a: S_co, tabsize: i_co = 8) -> NDArray[bytes_]: ... @overload -def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = 8) -> _StringDTypeArray: ... @overload -def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... +def expandtabs(a: T_co, tabsize: i_co = 8) -> _StringDTypeOrUnicodeArray: ... @overload def join(sep: U_co, seq: U_co) -> NDArray[str_]: ... @@ -647,13 +647,13 @@ def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _Str def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +def ljust(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... @overload -def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +def ljust(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... @overload -def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... @overload -def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def ljust(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload def lower(a: U_co) -> NDArray[str_]: ... @@ -665,13 +665,13 @@ def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def lstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +def lstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def lstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +def lstrip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +def lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... @overload -def lstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... +def lstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def partition(a: U_co, sep: U_co) -> NDArray[str_]: ... @@ -687,53 +687,53 @@ def replace( a: U_co, old: U_co, new: U_co, - count: i_co | None = ..., + count: i_co | None = -1, ) -> NDArray[str_]: ... @overload def replace( a: S_co, old: S_co, new: S_co, - count: i_co | None = ..., + count: i_co | None = -1, ) -> NDArray[bytes_]: ... @overload def replace( a: _StringDTypeSupportsArray, old: _StringDTypeSupportsArray, new: _StringDTypeSupportsArray, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeArray: ... @overload def replace( a: T_co, old: T_co, new: T_co, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeOrUnicodeArray: ... @overload def rjust( a: U_co, width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> NDArray[str_]: ... @overload def rjust( a: S_co, width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> NDArray[bytes_]: ... @overload def rjust( a: _StringDTypeSupportsArray, width: i_co, - fillchar: _StringDTypeSupportsArray = ..., + fillchar: str | _StringDTypeSupportsArray = " ", ) -> _StringDTypeArray: ... @overload def rjust( a: T_co, width: i_co, - fillchar: T_co = ..., + fillchar: T_co = " ", ) -> _StringDTypeOrUnicodeArray: ... @overload @@ -748,72 +748,72 @@ def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def rsplit( a: U_co, - sep: U_co | None = ..., - maxsplit: i_co | None = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( a: S_co, - sep: S_co | None = ..., - maxsplit: i_co | None = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( a: _StringDTypeSupportsArray, - sep: _StringDTypeSupportsArray | None = ..., - maxsplit: i_co | None = ..., + sep: _StringDTypeSupportsArray | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( a: T_co, - sep: T_co | None = ..., - maxsplit: i_co | None = ..., + sep: T_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload -def rstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def rstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +def rstrip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... @overload -def rstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... +def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def split( a: U_co, - sep: U_co | None = ..., - maxsplit: i_co | None = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( a: S_co, - sep: S_co | None = ..., - maxsplit: i_co | None = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( a: _StringDTypeSupportsArray, - sep: _StringDTypeSupportsArray | None = ..., - maxsplit: i_co | None = ..., + sep: _StringDTypeSupportsArray | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( a: T_co, - sep: T_co | None = ..., - maxsplit: i_co | None = ..., + sep: T_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... -def splitlines(a: UST_co, keepends: b_co | None = ...) -> NDArray[np.object_]: ... +def splitlines(a: UST_co, keepends: b_co | None = None) -> NDArray[np.object_]: ... @overload -def strip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +def strip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def strip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +def strip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +def strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... @overload -def strip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... +def strip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def swapcase(a: U_co) -> NDArray[str_]: ... @@ -837,25 +837,25 @@ def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... def translate( a: U_co, table: str, - deletechars: str | None = ..., + deletechars: str | None = None, ) -> NDArray[str_]: ... @overload def translate( a: S_co, table: str, - deletechars: str | None = ..., + deletechars: str | None = None, ) -> NDArray[bytes_]: ... @overload def translate( a: _StringDTypeSupportsArray, table: str, - deletechars: str | None = ..., + deletechars: str | None = None, ) -> _StringDTypeArray: ... @overload def translate( a: T_co, table: str, - deletechars: str | None = ..., + deletechars: str | None = None, ) -> _StringDTypeOrUnicodeArray: ... @overload @@ -881,88 +881,88 @@ def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... def count( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def count( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def count( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def endswith( a: U_co, suffix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: S_co, suffix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: T_co, suffix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def find( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def find( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def find( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... def isalpha(a: UST_co) -> NDArray[np.bool]: ... @@ -979,66 +979,66 @@ def isupper(a: UST_co) -> NDArray[np.bool]: ... def rfind( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rfind( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rfind( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def startswith( a: U_co, prefix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: S_co, prefix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: T_co, suffix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... def str_len(A: UST_co) -> NDArray[int_]: ... @@ -1050,18 +1050,18 @@ def str_len(A: UST_co) -> NDArray[int_]: ... @overload def array( obj: U_co, - itemsize: int | None = ..., - copy: bool = ..., - unicode: L[True] | None = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + unicode: L[True] | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def array( obj: S_co, - itemsize: int | None = ..., - copy: bool = ..., - unicode: L[False] | None = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + unicode: L[False] | None = None, + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def array( @@ -1069,16 +1069,16 @@ def array( itemsize: int | None, copy: bool, unicode: L[False], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def array( obj: object, - itemsize: int | None = ..., - copy: bool = ..., + itemsize: int | None = None, + copy: bool = True, *, unicode: L[False], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def array( @@ -1086,74 +1086,74 @@ def array( itemsize: int | None, copy: bool, unicode: L[True], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def array( obj: object, - itemsize: int | None = ..., - copy: bool = ..., + itemsize: int | None = None, + copy: bool = True, *, unicode: L[True], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def array( obj: object, - itemsize: int | None = ..., - copy: bool = ..., - unicode: bool | None = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + unicode: bool | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_] | _CharArray[bytes_]: ... @overload def asarray( obj: U_co, - itemsize: int | None = ..., - unicode: L[True] | None = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + unicode: L[True] | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def asarray( obj: S_co, - itemsize: int | None = ..., - unicode: L[False] | None = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + unicode: L[False] | None = None, + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def asarray( obj: object, itemsize: int | None, unicode: L[False], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def asarray( obj: object, - itemsize: int | None = ..., + itemsize: int | None = None, *, unicode: L[False], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def asarray( obj: object, itemsize: int | None, unicode: L[True], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def asarray( obj: object, - itemsize: int | None = ..., + itemsize: int | None = None, *, unicode: L[True], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def asarray( obj: object, - itemsize: int | None = ..., - unicode: bool | None = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + unicode: bool | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_] | _CharArray[bytes_]: ... diff --git a/numpy/_core/einsumfunc.pyi b/numpy/_core/einsumfunc.pyi index 9653a26dcd78..6d34883e6625 100644 --- a/numpy/_core/einsumfunc.pyi +++ b/numpy/_core/einsumfunc.pyi @@ -42,55 +42,55 @@ def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeBool_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeBool | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeUInt_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeUInt | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeInt_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeInt | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeFloat_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeFloat | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeComplex | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -99,9 +99,9 @@ def einsum( *operands: Any, casting: _CastingUnsafe, dtype: _DTypeLikeComplex_co | None = ..., - out: None = ..., + out: None = None, order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -112,7 +112,7 @@ def einsum( dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> _ArrayT: ... @overload def einsum( @@ -123,7 +123,7 @@ def einsum( casting: _CastingUnsafe, dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> _ArrayT: ... @overload @@ -131,11 +131,11 @@ def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeObject_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -144,9 +144,9 @@ def einsum( *operands: Any, casting: _CastingUnsafe, dtype: _DTypeLikeObject | None = ..., - out: None = ..., + out: None = None, order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -157,7 +157,7 @@ def einsum( dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> _ArrayT: ... @overload def einsum( @@ -168,7 +168,7 @@ def einsum( casting: _CastingUnsafe, dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> _ArrayT: ... # NOTE: `einsum_call` is a hidden kwarg unavailable for public use. diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 95fe7f7d8484..0c96ac8c0aa4 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -129,33 +129,33 @@ _PyScalar: TypeAlias = complex | bytes | str def take( a: _ArrayLike[_ScalarT], indices: _IntLike_co, - axis: None = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> _ScalarT: ... @overload def take( a: ArrayLike, indices: _IntLike_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> Any: ... @overload def take( a: _ArrayLike[_ScalarT], indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[_ScalarT]: ... @overload def take( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload def take( @@ -163,16 +163,16 @@ def take( indices: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload def take( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, out: _ArrayT, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload @@ -249,29 +249,29 @@ def reshape( def choose( a: _IntLike_co, choices: ArrayLike, - out: None = ..., - mode: _ModeKind = ..., + out: None = None, + mode: _ModeKind = "raise", ) -> Any: ... @overload def choose( a: _ArrayLikeInt_co, choices: _ArrayLike[_ScalarT], - out: None = ..., - mode: _ModeKind = ..., + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[_ScalarT]: ... @overload def choose( a: _ArrayLikeInt_co, choices: ArrayLike, - out: None = ..., - mode: _ModeKind = ..., + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload def choose( a: _ArrayLikeInt_co, choices: ArrayLike, out: _ArrayT, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload @@ -303,7 +303,7 @@ def put( a: NDArray[Any], ind: _ArrayLikeInt_co, v: ArrayLike, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> None: ... @overload @@ -322,12 +322,12 @@ def swapaxes( @overload def transpose( a: _ArrayLike[_ScalarT], - axes: _ShapeLike | None = ... + axes: _ShapeLike | None = None, ) -> NDArray[_ScalarT]: ... @overload def transpose( a: ArrayLike, - axes: _ShapeLike | None = ... + axes: _ShapeLike | None = None, ) -> NDArray[Any]: ... @overload @@ -374,44 +374,44 @@ def argpartition( @overload def sort( a: _ArrayLike[_ScalarT], - axis: SupportsIndex | None = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: bool | None = ..., + stable: bool | None = None, ) -> NDArray[_ScalarT]: ... @overload def sort( a: ArrayLike, - axis: SupportsIndex | None = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: bool | None = ..., + stable: bool | None = None, ) -> NDArray[Any]: ... def argsort( a: ArrayLike, - axis: SupportsIndex | None = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: bool | None = ..., + stable: bool | None = None, ) -> NDArray[intp]: ... @overload def argmax( a: ArrayLike, - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, *, keepdims: Literal[False] = ..., ) -> intp: ... @overload def argmax( a: ArrayLike, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, *, keepdims: bool = ..., ) -> Any: ... @@ -426,7 +426,7 @@ def argmax( @overload def argmax( a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: bool = ..., @@ -435,16 +435,16 @@ def argmax( @overload def argmin( a: ArrayLike, - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, *, keepdims: Literal[False] = ..., ) -> intp: ... @overload def argmin( a: ArrayLike, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, *, keepdims: bool = ..., ) -> Any: ... @@ -459,7 +459,7 @@ def argmin( @overload def argmin( a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: bool = ..., @@ -469,15 +469,15 @@ def argmin( def searchsorted( a: ArrayLike, v: _ScalarLike_co, - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., # 1D int array + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> intp: ... @overload def searchsorted( a: ArrayLike, v: ArrayLike, - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., # 1D int array + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> NDArray[intp]: ... # @@ -497,42 +497,42 @@ def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... @overload def squeeze( a: _ScalarT, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, ) -> _ScalarT: ... @overload def squeeze( a: _ArrayLike[_ScalarT], - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, ) -> NDArray[_ScalarT]: ... @overload def squeeze( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, ) -> NDArray[Any]: ... @overload def diagonal( a: _ArrayLike[_ScalarT], - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., # >= 2D array + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, # >= 2D array ) -> NDArray[_ScalarT]: ... @overload def diagonal( a: ArrayLike, - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., # >= 2D array + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, # >= 2D array ) -> NDArray[Any]: ... @overload def trace( a: ArrayLike, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: None = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, ) -> Any: ... @overload def trace( @@ -546,10 +546,10 @@ def trace( @overload def trace( a: ArrayLike, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike | None = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -601,15 +601,15 @@ def shape(a: ArrayLike) -> _AnyShape: ... def compress( condition: _ArrayLikeBool_co, # 1D bool array a: _ArrayLike[_ScalarT], - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, ) -> NDArray[_ScalarT]: ... @overload def compress( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, ) -> NDArray[Any]: ... @overload def compress( @@ -622,7 +622,7 @@ def compress( def compress( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -632,7 +632,7 @@ def clip( a: _ScalarT, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: None = ..., + out: None = None, *, min: ArrayLike | None = ..., max: ArrayLike | None = ..., @@ -648,7 +648,7 @@ def clip( a: _ScalarLike_co, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: None = ..., + out: None = None, *, min: ArrayLike | None = ..., max: ArrayLike | None = ..., @@ -664,7 +664,7 @@ def clip( a: _ArrayLike[_ScalarT], a_min: ArrayLike | None, a_max: ArrayLike | None, - out: None = ..., + out: None = None, *, min: ArrayLike | None = ..., max: ArrayLike | None = ..., @@ -680,7 +680,7 @@ def clip( a: ArrayLike, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: None = ..., + out: None = None, *, min: ArrayLike | None = ..., max: ArrayLike | None = ..., @@ -712,7 +712,7 @@ def clip( a: ArrayLike, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: ArrayLike = ..., + out: ArrayLike = None, *, min: ArrayLike | None = ..., max: ArrayLike | None = ..., @@ -727,9 +727,9 @@ def clip( @overload def sum( a: _ArrayLike[_ScalarT], - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -737,9 +737,9 @@ def sum( @overload def sum( a: _ArrayLike[_ScalarT], - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -749,7 +749,7 @@ def sum( a: ArrayLike, axis: None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -757,10 +757,10 @@ def sum( @overload def sum( a: ArrayLike, - axis: None = ..., + axis: None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -770,7 +770,7 @@ def sum( a: ArrayLike, axis: _ShapeLike | None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -778,10 +778,10 @@ def sum( @overload def sum( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -789,9 +789,9 @@ def sum( @overload def sum( a: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike = None, + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -809,8 +809,8 @@ def sum( @overload def sum( a: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike = None, *, out: _ArrayT, keepdims: bool = ..., @@ -898,38 +898,38 @@ def any( @overload def cumsum( a: _ArrayLike[_ScalarT], - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[_ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[Any]: ... @overload def cumsum( a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, ) -> NDArray[_ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, ) -> NDArray[_ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, + out: None = None, ) -> NDArray[Any]: ... @overload def cumsum( @@ -941,8 +941,8 @@ def cumsum( @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -952,64 +952,64 @@ def cumulative_sum( x: _ArrayLike[_ScalarT], /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[_ScalarT]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[Any]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, dtype: _DTypeLike[_ScalarT], - out: None = ..., - include_initial: bool = ..., + out: None = None, + include_initial: bool = False, ) -> NDArray[_ScalarT]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[Any]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, out: _ArrayT, - include_initial: bool = ..., + include_initial: bool = False, ) -> _ArrayT: ... @overload def ptp( a: _ArrayLike[_ScalarT], - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, keepdims: Literal[False] = ..., ) -> _ScalarT: ... @overload def ptp( a: ArrayLike, - axis: _ShapeLike | None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + out: None = None, keepdims: bool = ..., ) -> Any: ... @overload @@ -1022,7 +1022,7 @@ def ptp( @overload def ptp( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool = ..., @@ -1031,8 +1031,8 @@ def ptp( @overload def amax( a: _ArrayLike[_ScalarT], - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1040,8 +1040,8 @@ def amax( @overload def amax( a: ArrayLike, - axis: _ShapeLike | None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1058,7 +1058,7 @@ def amax( @overload def amax( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool = ..., @@ -1069,8 +1069,8 @@ def amax( @overload def amin( a: _ArrayLike[_ScalarT], - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1078,8 +1078,8 @@ def amin( @overload def amin( a: ArrayLike, - axis: _ShapeLike | None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1096,7 +1096,7 @@ def amin( @overload def amin( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool = ..., @@ -1114,9 +1114,9 @@ def amin( @overload def prod( a: _ArrayLikeBool_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1124,9 +1124,9 @@ def prod( @overload def prod( a: _ArrayLikeUInt_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1134,9 +1134,9 @@ def prod( @overload def prod( a: _ArrayLikeInt_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1144,9 +1144,9 @@ def prod( @overload def prod( a: _ArrayLikeFloat_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1154,9 +1154,9 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1164,9 +1164,9 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1176,7 +1176,7 @@ def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1184,10 +1184,10 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1195,9 +1195,9 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1215,8 +1215,8 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, keepdims: bool = ..., @@ -1227,66 +1227,66 @@ def prod( @overload def cumprod( a: _ArrayLikeBool_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[int_]: ... @overload def cumprod( a: _ArrayLikeUInt_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[uint64]: ... @overload def cumprod( a: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[int64]: ... @overload def cumprod( a: _ArrayLikeFloat_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[floating]: ... @overload def cumprod( a: _ArrayLikeComplex_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[complexfloating]: ... @overload def cumprod( a: _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[object_]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, ) -> NDArray[_ScalarT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, ) -> NDArray[_ScalarT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, + out: None = None, ) -> NDArray[Any]: ... @overload def cumprod( @@ -1298,8 +1298,8 @@ def cumprod( @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -1309,131 +1309,131 @@ def cumulative_prod( x: _ArrayLikeBool_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[int_]: ... @overload def cumulative_prod( x: _ArrayLikeUInt_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[uint64]: ... @overload def cumulative_prod( x: _ArrayLikeInt_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[int64]: ... @overload def cumulative_prod( x: _ArrayLikeFloat_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[floating]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[complexfloating]: ... @overload def cumulative_prod( x: _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[object_]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, dtype: _DTypeLike[_ScalarT], - out: None = ..., - include_initial: bool = ..., + out: None = None, + include_initial: bool = False, ) -> NDArray[_ScalarT]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[Any]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, out: _ArrayT, - include_initial: bool = ..., + include_initial: bool = False, ) -> _ArrayT: ... def ndim(a: ArrayLike) -> int: ... -def size(a: ArrayLike, axis: int | tuple[int, ...] | None = ...) -> int: ... +def size(a: ArrayLike, axis: int | tuple[int, ...] | None = None) -> int: ... @overload def around( a: _BoolLike_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> float16: ... @overload def around( a: _NumberOrObjectT, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> _NumberOrObjectT: ... @overload def around( a: _ComplexLike_co | object_, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> Any: ... @overload def around( a: _ArrayLikeBool_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> NDArray[float16]: ... @overload def around( a: _ArrayLike[_NumberOrObjectT], - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> NDArray[_NumberOrObjectT]: ... @overload def around( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> NDArray[Any]: ... @overload def around( @@ -1444,7 +1444,7 @@ def around( @overload def around( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - decimals: SupportsIndex = ..., + decimals: SupportsIndex = 0, *, out: _ArrayT, ) -> _ArrayT: ... @@ -1452,9 +1452,9 @@ def around( @overload def mean( a: _ArrayLikeFloat_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1462,9 +1462,9 @@ def mean( @overload def mean( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1472,9 +1472,9 @@ def mean( @overload def mean( a: _ArrayLike[np.timedelta64], - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1492,8 +1492,8 @@ def mean( @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ..., @@ -1504,7 +1504,7 @@ def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1512,10 +1512,10 @@ def mean( @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @@ -1534,7 +1534,7 @@ def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, *, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1542,19 +1542,19 @@ def mean( @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT | NDArray[_ScalarT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1563,10 +1563,10 @@ def mean( @overload def std( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1576,10 +1576,10 @@ def std( @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, keepdims: bool = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1591,8 +1591,8 @@ def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, dtype: _DTypeLike[_ScalarT], - out: None = ..., - ddof: float = ..., + out: None = None, + ddof: float = 0, keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1602,11 +1602,11 @@ def std( @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., - ddof: float = ..., + out: None = None, + ddof: float = 0, keepdims: Literal[False] = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1615,10 +1615,10 @@ def std( @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: float = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike = None, + out: None = None, + ddof: float = 0, keepdims: bool = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1631,7 +1631,7 @@ def std( axis: _ShapeLike | None, dtype: DTypeLike, out: _ArrayT, - ddof: float = ..., + ddof: float = 0, keepdims: bool = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1641,11 +1641,11 @@ def std( @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike = None, *, out: _ArrayT, - ddof: float = ..., + ddof: float = 0, keepdims: bool = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1655,10 +1655,10 @@ def std( @overload def var( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1668,10 +1668,10 @@ def var( @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, keepdims: bool = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1683,8 +1683,8 @@ def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, dtype: _DTypeLike[_ScalarT], - out: None = ..., - ddof: float = ..., + out: None = None, + ddof: float = 0, keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1694,11 +1694,11 @@ def var( @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., - ddof: float = ..., + out: None = None, + ddof: float = 0, keepdims: Literal[False] = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1707,10 +1707,10 @@ def var( @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: float = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike = None, + out: None = None, + ddof: float = 0, keepdims: bool = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1723,7 +1723,7 @@ def var( axis: _ShapeLike | None, dtype: DTypeLike, out: _ArrayT, - ddof: float = ..., + ddof: float = 0, keepdims: bool = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1733,11 +1733,11 @@ def var( @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike = None, *, out: _ArrayT, - ddof: float = ..., + ddof: float = 0, keepdims: bool = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index c73a2694df3c..d08eb540743d 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -737,8 +737,8 @@ def ones_like( def full( shape: SupportsIndex, fill_value: _ScalarT, - dtype: None = ..., - order: _OrderCF = ..., + dtype: None = None, + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> _Array[tuple[int], _ScalarT]: ... @overload @@ -746,7 +746,7 @@ def full( shape: SupportsIndex, fill_value: Any, dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> np.ndarray[tuple[int], _DTypeT]: ... @overload @@ -754,15 +754,15 @@ def full( shape: SupportsIndex, fill_value: Any, dtype: type[_ScalarT], - order: _OrderCF = ..., + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> _Array[tuple[int], _ScalarT]: ... @overload def full( shape: SupportsIndex, fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., + dtype: DTypeLike | None = None, + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> _Array[tuple[int], Any]: ... # known shape @@ -770,8 +770,8 @@ def full( def full( shape: _AnyShapeT, fill_value: _ScalarT, - dtype: None = ..., - order: _OrderCF = ..., + dtype: None = None, + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> _Array[_AnyShapeT, _ScalarT]: ... @overload @@ -779,7 +779,7 @@ def full( shape: _AnyShapeT, fill_value: Any, dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> np.ndarray[_AnyShapeT, _DTypeT]: ... @overload @@ -787,15 +787,15 @@ def full( shape: _AnyShapeT, fill_value: Any, dtype: type[_ScalarT], - order: _OrderCF = ..., + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> _Array[_AnyShapeT, _ScalarT]: ... @overload def full( shape: _AnyShapeT, fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., + dtype: DTypeLike | None = None, + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> _Array[_AnyShapeT, Any]: ... # unknown shape @@ -803,8 +803,8 @@ def full( def full( shape: _ShapeLike, fill_value: _ScalarT, - dtype: None = ..., - order: _OrderCF = ..., + dtype: None = None, + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> NDArray[_ScalarT]: ... @overload @@ -812,7 +812,7 @@ def full( shape: _ShapeLike, fill_value: Any, dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> np.ndarray[Any, _DTypeT]: ... @overload @@ -820,15 +820,15 @@ def full( shape: _ShapeLike, fill_value: Any, dtype: type[_ScalarT], - order: _OrderCF = ..., + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> NDArray[_ScalarT]: ... @overload def full( shape: _ShapeLike, fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., + dtype: DTypeLike | None = None, + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> NDArray[Any]: ... @@ -900,147 +900,147 @@ def flatnonzero(a: ArrayLike) -> NDArray[intp]: ... def correlate( a: _ArrayLike[Never], v: _ArrayLike[Never], - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[Any]: ... @overload def correlate( a: _ArrayLikeBool_co, v: _ArrayLikeBool_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[np.bool]: ... @overload def correlate( a: _ArrayLikeUInt_co, v: _ArrayLikeUInt_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[unsignedinteger]: ... @overload def correlate( a: _ArrayLikeInt_co, v: _ArrayLikeInt_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[signedinteger]: ... @overload def correlate( a: _ArrayLikeFloat_co, v: _ArrayLikeFloat_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[floating]: ... @overload def correlate( a: _ArrayLikeComplex_co, v: _ArrayLikeComplex_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[complexfloating]: ... @overload def correlate( a: _ArrayLikeTD64_co, v: _ArrayLikeTD64_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[timedelta64]: ... @overload def correlate( a: _ArrayLikeObject_co, v: _ArrayLikeObject_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[object_]: ... @overload def convolve( a: _ArrayLike[Never], v: _ArrayLike[Never], - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[Any]: ... @overload def convolve( a: _ArrayLikeBool_co, v: _ArrayLikeBool_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[np.bool]: ... @overload def convolve( a: _ArrayLikeUInt_co, v: _ArrayLikeUInt_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[unsignedinteger]: ... @overload def convolve( a: _ArrayLikeInt_co, v: _ArrayLikeInt_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[signedinteger]: ... @overload def convolve( a: _ArrayLikeFloat_co, v: _ArrayLikeFloat_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[floating]: ... @overload def convolve( a: _ArrayLikeComplex_co, v: _ArrayLikeComplex_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[complexfloating]: ... @overload def convolve( a: _ArrayLikeTD64_co, v: _ArrayLikeTD64_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[timedelta64]: ... @overload def convolve( a: _ArrayLikeObject_co, v: _ArrayLikeObject_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[object_]: ... @overload def outer( a: _ArrayLike[Never], b: _ArrayLike[Never], - out: None = ..., + out: None = None, ) -> NDArray[Any]: ... @overload def outer( a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, - out: None = ..., + out: None = None, ) -> NDArray[np.bool]: ... @overload def outer( a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, - out: None = ..., + out: None = None, ) -> NDArray[unsignedinteger]: ... @overload def outer( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, - out: None = ..., + out: None = None, ) -> NDArray[signedinteger]: ... @overload def outer( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - out: None = ..., + out: None = None, ) -> NDArray[floating]: ... @overload def outer( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, - out: None = ..., + out: None = None, ) -> NDArray[complexfloating]: ... @overload def outer( a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, - out: None = ..., + out: None = None, ) -> NDArray[timedelta64]: ... @overload def outer( a: _ArrayLikeObject_co, b: _ArrayLikeObject_co, - out: None = ..., + out: None = None, ) -> NDArray[object_]: ... @overload def outer( @@ -1053,68 +1053,68 @@ def outer( def tensordot( a: _ArrayLike[Never], b: _ArrayLike[Never], - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[Any]: ... @overload def tensordot( a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[np.bool]: ... @overload def tensordot( a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[unsignedinteger]: ... @overload def tensordot( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[signedinteger]: ... @overload def tensordot( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[floating]: ... @overload def tensordot( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[complexfloating]: ... @overload def tensordot( a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[timedelta64]: ... @overload def tensordot( a: _ArrayLikeObject_co, b: _ArrayLikeObject_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[object_]: ... @overload def roll( a: _ArrayLike[_ScalarT], shift: _ShapeLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, ) -> NDArray[_ScalarT]: ... @overload def roll( a: ArrayLike, shift: _ShapeLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, ) -> NDArray[Any]: ... def rollaxis( a: NDArray[_ScalarT], axis: int, - start: int = ..., + start: int = 0, ) -> NDArray[_ScalarT]: ... def moveaxis( @@ -1127,71 +1127,71 @@ def moveaxis( def cross( a: _ArrayLike[Never], b: _ArrayLike[Never], - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NDArray[Any]: ... @overload def cross( a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NoReturn: ... @overload def cross( a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NDArray[unsignedinteger]: ... @overload def cross( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NDArray[signedinteger]: ... @overload def cross( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NDArray[floating]: ... @overload def cross( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NDArray[complexfloating]: ... @overload def cross( a: _ArrayLikeObject_co, b: _ArrayLikeObject_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NDArray[object_]: ... @overload def indices( dimensions: Sequence[int], dtype: type[int] = ..., - sparse: L[False] = ..., + sparse: L[False] = False, ) -> NDArray[int_]: ... @overload def indices( @@ -1210,7 +1210,7 @@ def indices( def indices( dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], - sparse: L[False] = ..., + sparse: L[False] = False, ) -> NDArray[_ScalarT]: ... @overload def indices( @@ -1222,7 +1222,7 @@ def indices( def indices( dimensions: Sequence[int], dtype: DTypeLike = ..., - sparse: L[False] = ..., + sparse: L[False] = False, ) -> NDArray[Any]: ... @overload def indices( @@ -1243,40 +1243,40 @@ def fromfunction( shape: Sequence[int], *, dtype: DTypeLike = ..., - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, **kwargs: Any, ) -> _T: ... def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ... -def binary_repr(num: SupportsIndex, width: int | None = ...) -> str: ... +def binary_repr(num: SupportsIndex, width: int | None = None) -> str: ... def base_repr( number: SupportsAbs[float], - base: float = ..., - padding: SupportsIndex | None = ..., + base: float = 2, + padding: SupportsIndex | None = 0, ) -> str: ... @overload def identity( n: int, - dtype: None = ..., + dtype: None = None, *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[float64]: ... @overload def identity( n: int, dtype: _DTypeLike[_ScalarT], *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def identity( n: int, - dtype: DTypeLike | None = ..., + dtype: DTypeLike | None = None, *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... def allclose( diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index c2c9c961e55b..9a5f8c3b9d60 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -68,72 +68,72 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray def vstack( tup: Sequence[_ArrayLike[_ScalarT]], *, - dtype: None = ..., - casting: _CastingKind = ... + dtype: None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[_ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], *, dtype: _DTypeLike[_ScalarT], - casting: _CastingKind = ... + casting: _CastingKind = "same_kind" ) -> NDArray[_ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload def hstack( tup: Sequence[_ArrayLike[_ScalarT]], *, - dtype: None = ..., - casting: _CastingKind = ... + dtype: None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[_ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], *, dtype: _DTypeLike[_ScalarT], - casting: _CastingKind = ... + casting: _CastingKind = "same_kind" ) -> NDArray[_ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload def stack( arrays: Sequence[_ArrayLike[_ScalarT]], - axis: SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex = 0, + out: None = None, *, - dtype: None = ..., - casting: _CastingKind = ... + dtype: None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[_ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], - axis: SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex = 0, + out: None = None, *, dtype: _DTypeLike[_ScalarT], - casting: _CastingKind = ... + casting: _CastingKind = "same_kind" ) -> NDArray[_ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], - axis: SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex = 0, + out: None = None, *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload def stack( @@ -159,14 +159,14 @@ def unstack( array: _ArrayLike[_ScalarT], /, *, - axis: int = ..., + axis: int = 0, ) -> tuple[NDArray[_ScalarT], ...]: ... @overload def unstack( array: ArrayLike, /, *, - axis: int = ..., + axis: int = 0, ) -> tuple[NDArray[Any], ...]: ... @overload diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 1b8bfd84cc3d..2f630ce8556a 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -150,154 +150,154 @@ def str_len(x: UST_co) -> NDArray[np.int_]: ... def find( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def find( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def find( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def startswith( a: U_co, prefix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: S_co, prefix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: T_co, prefix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: U_co, suffix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: S_co, suffix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: T_co, suffix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... def decode( @@ -312,13 +312,13 @@ def encode( ) -> NDArray[np.bytes_]: ... @overload -def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[np.str_]: ... +def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[np.str_]: ... @overload -def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[np.bytes_]: ... +def expandtabs(a: S_co, tabsize: i_co = 8) -> NDArray[np.bytes_]: ... @overload -def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = 8) -> _StringDTypeArray: ... @overload -def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... +def expandtabs(a: T_co, tabsize: i_co = 8) -> _StringDTypeOrUnicodeArray: ... @overload def center(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... @@ -433,28 +433,28 @@ def replace( a: U_co, old: U_co, new: U_co, - count: i_co = ..., + count: i_co = -1, ) -> NDArray[np.str_]: ... @overload def replace( a: S_co, old: S_co, new: S_co, - count: i_co = ..., + count: i_co = -1, ) -> NDArray[np.bytes_]: ... @overload def replace( a: _StringDTypeSupportsArray, old: _StringDTypeSupportsArray, new: _StringDTypeSupportsArray, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeArray: ... @overload def replace( a: T_co, old: T_co, new: T_co, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeOrUnicodeArray: ... @overload diff --git a/numpy/typing/tests/data/fail/char.pyi b/numpy/typing/tests/data/fail/char.pyi index 62c4475c29be..3dbe5eda296e 100644 --- a/numpy/typing/tests/data/fail/char.pyi +++ b/numpy/typing/tests/data/fail/char.pyi @@ -19,9 +19,7 @@ np.char.join(AR_U, b"_") # type: ignore[arg-type] np.char.join(AR_S, "_") # type: ignore[arg-type] np.char.ljust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] -np.char.ljust(AR_S, 5, fillchar="a") # type: ignore[arg-type] np.char.rjust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] -np.char.rjust(AR_S, 5, fillchar="a") # type: ignore[arg-type] np.char.lstrip(AR_U, chars=b"a") # type: ignore[arg-type] np.char.lstrip(AR_S, chars="a") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/chararray.pyi b/numpy/typing/tests/data/fail/chararray.pyi index f1b7009439d1..589895510227 100644 --- a/numpy/typing/tests/data/fail/chararray.pyi +++ b/numpy/typing/tests/data/fail/chararray.pyi @@ -12,9 +12,7 @@ AR_U.join(b"_") # type: ignore[arg-type] AR_S.join("_") # type: ignore[arg-type] AR_U.ljust(5, fillchar=b"a") # type: ignore[arg-type] -AR_S.ljust(5, fillchar="a") # type: ignore[arg-type] AR_U.rjust(5, fillchar=b"a") # type: ignore[arg-type] -AR_S.rjust(5, fillchar="a") # type: ignore[arg-type] AR_U.lstrip(chars=b"a") # type: ignore[arg-type] AR_S.lstrip(chars="a") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index 5c6af73888d0..2fba2feae385 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -65,6 +65,7 @@ assert_type(np.char.join(AR_T, "_"), AR_TU_alias) assert_type(np.char.ljust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar="a"), npt.NDArray[np.bytes_]) assert_type(np.char.ljust(AR_T, 5), AR_T_alias) assert_type(np.char.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi index b5f4392b75c8..5c3dc85038db 100644 --- a/numpy/typing/tests/data/reveal/chararray.pyi +++ b/numpy/typing/tests/data/reveal/chararray.pyi @@ -50,6 +50,7 @@ assert_type(AR_S.join([b"_", b""]), _BytesCharArray) assert_type(AR_U.ljust(5), _StrCharArray) assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_S.ljust([4, 3, 1], fillchar="a"), _BytesCharArray) assert_type(AR_U.rjust(5), _StrCharArray) assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) From 9927e365c1f5d0d3a138a94ab2d1215cdcf51c57 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Aug 2025 17:32:04 +0000 Subject: [PATCH 0321/1018] MAINT: Bump pypa/cibuildwheel from 3.1.3 to 3.1.4 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.1.3 to 3.1.4. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/352e01339f0a173aa2a3eb57f01492e341e83865...c923d83ad9c1bc00211c5041d0c3f73294ff88f6) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.1.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index a1d5b399988e..e7503871e3f5 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@352e01339f0a173aa2a3eb57f01492e341e83865 # v3.1.3 + - uses: pypa/cibuildwheel@c923d83ad9c1bc00211c5041d0c3f73294ff88f6 # v3.1.4 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index e87af415d3c5..6d041831c22d 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -174,7 +174,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@352e01339f0a173aa2a3eb57f01492e341e83865 # v3.1.3 + uses: pypa/cibuildwheel@c923d83ad9c1bc00211c5041d0c3f73294ff88f6 # v3.1.4 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From a409870243fee1683b057d52dc40b70d0364b185 Mon Sep 17 00:00:00 2001 From: Richard Smythe <82367391+richardsmythe@users.noreply.github.com> Date: Wed, 20 Aug 2025 20:44:49 +0100 Subject: [PATCH 0322/1018] DOC: Fix typo in tril_indices and triu_indices docstrings - (#29604) correspdonding -> corresponding --- numpy/lib/_twodim_base_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_twodim_base_impl.py b/numpy/lib/_twodim_base_impl.py index dc6a55886fdb..bad797a970c9 100644 --- a/numpy/lib/_twodim_base_impl.py +++ b/numpy/lib/_twodim_base_impl.py @@ -926,7 +926,7 @@ def tril_indices(n, k=0, m=None): ------- inds : tuple of arrays The row and column indices, respectively. The row indices are sorted - in non-decreasing order, and the correspdonding column indices are + in non-decreasing order, and the corresponding column indices are strictly increasing for each row. See also @@ -1073,7 +1073,7 @@ def triu_indices(n, k=0, m=None): ------- inds : tuple, shape(2) of ndarrays, shape(`n`) The row and column indices, respectively. The row indices are sorted - in non-decreasing order, and the correspdonding column indices are + in non-decreasing order, and the corresponding column indices are strictly increasing for each row. See also From 3545479aad0388fea7bde3c143c56b6134231654 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Wed, 20 Aug 2025 16:19:11 -0400 Subject: [PATCH 0323/1018] TST: Replace xunit setup with methods (#29596) --- numpy/_core/tests/test_defchararray.py | 414 +++++++++++++------------ 1 file changed, 221 insertions(+), 193 deletions(-) diff --git a/numpy/_core/tests/test_defchararray.py b/numpy/_core/tests/test_defchararray.py index 2607953a940a..04518bdbd671 100644 --- a/numpy/_core/tests/test_defchararray.py +++ b/numpy/_core/tests/test_defchararray.py @@ -134,63 +134,67 @@ def fail(): assert_raises(ValueError, fail) - class TestWhitespace: - def setup_method(self): - self.A = np.array([['abc ', '123 '], - ['789 ', 'xyz ']]).view(np.char.chararray) - self.B = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.char.chararray) - def test1(self): - assert_(np.all(self.A == self.B)) - assert_(np.all(self.A >= self.B)) - assert_(np.all(self.A <= self.B)) - assert_(not np.any(self.A > self.B)) - assert_(not np.any(self.A < self.B)) - assert_(not np.any(self.A != self.B)) + A = np.array([['abc ', '123 '], + ['789 ', 'xyz ']]).view(np.char.chararray) + B = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + assert_(np.all(A == B)) + assert_(np.all(A >= B)) + assert_(np.all(A <= B)) + assert_(not np.any(A > B)) + assert_(not np.any(A < B)) + assert_(not np.any(A != B)) class TestChar: - def setup_method(self): - self.A = np.array('abc1', dtype='c').view(np.char.chararray) - def test_it(self): - assert_equal(self.A.shape, (4,)) - assert_equal(self.A.upper()[:2].tobytes(), b'AB') + A = np.array('abc1', dtype='c').view(np.char.chararray) + assert_equal(A.shape, (4,)) + assert_equal(A.upper()[:2].tobytes(), b'AB') class TestComparisons: - def setup_method(self): - self.A = np.array([['abc', 'abcc', '123'], - ['789', 'abc', 'xyz']]).view(np.char.chararray) - self.B = np.array([['efg', 'efg', '123 '], - ['051', 'efgg', 'tuv']]).view(np.char.chararray) + def A(self): + return np.array([['abc', 'abcc', '123'], + ['789', 'abc', 'xyz']]).view(np.char.chararray) + + def B(self): + return np.array([['efg', 'efg', '123 '], + ['051', 'efgg', 'tuv']]).view(np.char.chararray) def test_not_equal(self): - assert_array_equal((self.A != self.B), + A, B = self.A(), self.B() + assert_array_equal((A != B), [[True, True, False], [True, True, True]]) def test_equal(self): - assert_array_equal((self.A == self.B), + A, B = self.A(), self.B() + assert_array_equal((A == B), [[False, False, True], [False, False, False]]) def test_greater_equal(self): - assert_array_equal((self.A >= self.B), + A, B = self.A(), self.B() + assert_array_equal((A >= B), [[False, False, True], [True, False, True]]) def test_less_equal(self): - assert_array_equal((self.A <= self.B), + A, B = self.A(), self.B() + assert_array_equal((A <= B), [[True, True, True], [False, True, False]]) def test_greater(self): - assert_array_equal((self.A > self.B), + A, B = self.A(), self.B() + assert_array_equal((A > B), [[False, False, False], [True, False, True]]) def test_less(self): - assert_array_equal((self.A < self.B), + A, B = self.A(), self.B() + assert_array_equal((A < B), [[True, True, False], [False, True, False]]) def test_type(self): - out1 = np.char.equal(self.A, self.B) + A, B = self.A(), self.B() + out1 = np.char.equal(A, B) out2 = np.char.equal('a', 'a') assert_(isinstance(out1, np.ndarray)) assert_(isinstance(out2, np.ndarray)) @@ -198,59 +202,56 @@ def test_type(self): class TestComparisonsMixed1(TestComparisons): """Ticket #1276""" - def setup_method(self): - TestComparisons.setup_method(self) - self.B = np.array( + def B(self): + return np.array( [['efg', 'efg', '123 '], ['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray) class TestComparisonsMixed2(TestComparisons): """Ticket #1276""" - def setup_method(self): - TestComparisons.setup_method(self) - self.A = np.array( + def A(self): + return np.array( [['abc', 'abcc', '123'], ['789', 'abc', 'xyz']], np.str_).view(np.char.chararray) class TestInformation: - def setup_method(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]) \ - .view(np.char.chararray) - self.B = np.array([[' \u03a3 ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]) \ - .view(np.char.chararray) - # Array with longer strings, > MEMCHR_CUT_OFF in code. - self.C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ', - '01234567890123456789012345']) - .view(np.char.chararray)) + def A(self): + return np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) + + def B(self): + return np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) def test_len(self): - assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) - assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) - assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) + A, B = self.A(), self.B() + assert_(issubclass(np.char.str_len(A).dtype.type, np.integer)) + assert_array_equal(np.char.str_len(A), [[5, 0], [5, 9], [12, 5]]) + assert_array_equal(np.char.str_len(B), [[3, 0], [5, 9], [12, 5]]) def test_count(self): - assert_(issubclass(self.A.count('').dtype.type, np.integer)) - assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) - assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) + A, B = self.A(), self.B() + assert_(issubclass(A.count('').dtype.type, np.integer)) + assert_array_equal(A.count('a'), [[1, 0], [0, 1], [0, 0]]) + assert_array_equal(A.count('123'), [[0, 0], [1, 0], [1, 0]]) # Python doesn't seem to like counting NULL characters - # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) - assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) - # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) + assert_array_equal(A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(B.count('a'), [[0, 0], [0, 1], [0, 0]]) + assert_array_equal(B.count('123'), [[0, 0], [1, 0], [1, 0]]) def test_endswith(self): - assert_(issubclass(self.A.endswith('').dtype.type, np.bool)) - assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) + A = self.A() + assert_(issubclass(A.endswith('').dtype.type, np.bool)) + assert_array_equal(A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) + assert_array_equal(A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) def fail(): - self.A.endswith('3', 'fdjk') + A.endswith('3', 'fdjk') assert_raises(TypeError, fail) @@ -260,7 +261,7 @@ def fail(): ("S", lambda x: x.encode('ascii')), ]) def test_find(self, dtype, encode): - A = self.A.astype(dtype) + A = self.A().astype(dtype) assert_(issubclass(A.find(encode('a')).dtype.type, np.integer)) assert_array_equal(A.find(encode('a')), [[1, -1], [-1, 6], [-1, -1]]) @@ -270,103 +271,119 @@ def test_find(self, dtype, encode): [[1, -1], [-1, -1], [-1, -1]]) assert_array_equal(A.find([encode('1'), encode('P')]), [[-1, -1], [0, -1], [0, 1]]) - C = self.C.astype(dtype) + C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ', + '01234567890123456789012345']) + .view(np.char.chararray)).astype(dtype) assert_array_equal(C.find(encode('M')), [12, -1]) def test_index(self): + A = self.A() def fail(): - self.A.index('a') + A.index('a') assert_raises(ValueError, fail) assert_(np.char.index('abcba', 'b') == 1) assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) def test_isalnum(self): - assert_(issubclass(self.A.isalnum().dtype.type, np.bool)) - assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) + A = self.A() + assert_(issubclass(A.isalnum().dtype.type, np.bool)) + assert_array_equal(A.isalnum(), [[False, False], [True, True], [False, True]]) def test_isalpha(self): - assert_(issubclass(self.A.isalpha().dtype.type, np.bool)) - assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) + A = self.A() + assert_(issubclass(A.isalpha().dtype.type, np.bool)) + assert_array_equal(A.isalpha(), [[False, False], [False, True], [False, True]]) def test_isdigit(self): - assert_(issubclass(self.A.isdigit().dtype.type, np.bool)) - assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) + A = self.A() + assert_(issubclass(A.isdigit().dtype.type, np.bool)) + assert_array_equal(A.isdigit(), [[False, False], [True, False], [False, False]]) def test_islower(self): - assert_(issubclass(self.A.islower().dtype.type, np.bool)) - assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.islower().dtype.type, np.bool)) + assert_array_equal(A.islower(), [[True, False], [False, False], [False, False]]) def test_isspace(self): - assert_(issubclass(self.A.isspace().dtype.type, np.bool)) - assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.isspace().dtype.type, np.bool)) + assert_array_equal(A.isspace(), [[False, False], [False, False], [False, False]]) def test_istitle(self): - assert_(issubclass(self.A.istitle().dtype.type, np.bool)) - assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.istitle().dtype.type, np.bool)) + assert_array_equal(A.istitle(), [[False, False], [False, False], [False, False]]) def test_isupper(self): - assert_(issubclass(self.A.isupper().dtype.type, np.bool)) - assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) + A = self.A() + assert_(issubclass(A.isupper().dtype.type, np.bool)) + assert_array_equal(A.isupper(), [[False, False], [False, False], [False, True]]) def test_rfind(self): - assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) - assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) - assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) - assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) - assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) + A = self.A() + assert_(issubclass(A.rfind('a').dtype.type, np.integer)) + assert_array_equal(A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) + assert_array_equal(A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) def test_rindex(self): + A = self.A() def fail(): - self.A.rindex('a') + A.rindex('a') assert_raises(ValueError, fail) assert_(np.char.rindex('abcba', 'b') == 3) assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) def test_startswith(self): - assert_(issubclass(self.A.startswith('').dtype.type, np.bool)) - assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) + A = self.A() + assert_(issubclass(A.startswith('').dtype.type, np.bool)) + assert_array_equal(A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) def fail(): - self.A.startswith('3', 'fdjk') + A.startswith('3', 'fdjk') assert_raises(TypeError, fail) - class TestMethods: - def setup_method(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']], - dtype='S').view(np.char.chararray) - self.B = np.array([[' \u03a3 ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]).view( - np.char.chararray) + def A(self): + return np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], + dtype='S').view(np.char.chararray) + + def B(self): + return np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) def test_capitalize(self): + A, B = self.A(), self.B() tgt = [[b' abc ', b''], [b'12345', b'Mixedcase'], [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.capitalize().dtype.type, np.bytes_)) - assert_array_equal(self.A.capitalize(), tgt) + assert_(issubclass(A.capitalize().dtype.type, np.bytes_)) + assert_array_equal(A.capitalize(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'Mixedcase'], ['123 \t 345 \0 ', 'Upper']] - assert_(issubclass(self.B.capitalize().dtype.type, np.str_)) - assert_array_equal(self.B.capitalize(), tgt) + assert_(issubclass(B.capitalize().dtype.type, np.str_)) + assert_array_equal(B.capitalize(), tgt) def test_center(self): - assert_(issubclass(self.A.center(10).dtype.type, np.bytes_)) - C = self.A.center([10, 20]) + A = self.A() + assert_(issubclass(A.center(10).dtype.type, np.bytes_)) + C = A.center([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.center(20, b'#') + C = A.center(20, b'#') assert_(np.all(C.startswith(b'#'))) assert_(np.all(C.endswith(b'#'))) @@ -381,17 +398,17 @@ def test_decode(self): assert_(A.decode('unicode-escape')[0] == '\u03a3') def test_encode(self): - B = self.B.encode('unicode_escape') + B = self.B().encode('unicode_escape') assert_(B[0][0] == ' \\u03a3 '.encode('latin1')) def test_expandtabs(self): - T = self.A.expandtabs() + T = self.A().expandtabs() assert_(T[2, 0] == b'123 345 \0') def test_join(self): # NOTE: list(b'123') == [49, 50, 51] # so that b','.join(b'123') results to an error on Py3 - A0 = self.A.decode('ascii') + A0 = self.A().decode('ascii') A = np.char.join([',', '#'], A0) assert_(issubclass(A.dtype.type, np.str_)) @@ -401,12 +418,13 @@ def test_join(self): assert_array_equal(np.char.join([',', '#'], A0), tgt) def test_ljust(self): - assert_(issubclass(self.A.ljust(10).dtype.type, np.bytes_)) + A = self.A() + assert_(issubclass(A.ljust(10).dtype.type, np.bytes_)) - C = self.A.ljust([10, 20]) + C = A.ljust([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.ljust(20, b'#') + C = A.ljust(20, b'#') assert_array_equal(C.startswith(b'#'), [ [False, True], [False, False], [False, False]]) assert_(np.all(C.endswith(b'#'))) @@ -418,38 +436,41 @@ def test_ljust(self): assert_array_equal(C, tgt) def test_lower(self): + A, B = self.A(), self.B() tgt = [[b' abc ', b''], [b'12345', b'mixedcase'], [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.lower().dtype.type, np.bytes_)) - assert_array_equal(self.A.lower(), tgt) + assert_(issubclass(A.lower().dtype.type, np.bytes_)) + assert_array_equal(A.lower(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'mixedcase'], ['123 \t 345 \0 ', 'upper']] - assert_(issubclass(self.B.lower().dtype.type, np.str_)) - assert_array_equal(self.B.lower(), tgt) + assert_(issubclass(B.lower().dtype.type, np.str_)) + assert_array_equal(B.lower(), tgt) def test_lstrip(self): + A, B = self.A(), self.B() tgt = [[b'abc ', b''], [b'12345', b'MixedCase'], [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.lstrip().dtype.type, np.bytes_)) - assert_array_equal(self.A.lstrip(), tgt) + assert_(issubclass(A.lstrip().dtype.type, np.bytes_)) + assert_array_equal(A.lstrip(), tgt) tgt = [[b' abc', b''], [b'2345', b'ixedCase'], [b'23 \t 345 \x00', b'UPPER']] - assert_array_equal(self.A.lstrip([b'1', b'M']), tgt) + assert_array_equal(A.lstrip([b'1', b'M']), tgt) tgt = [['\u03a3 ', ''], ['12345', 'MixedCase'], ['123 \t 345 \0 ', 'UPPER']] - assert_(issubclass(self.B.lstrip().dtype.type, np.str_)) - assert_array_equal(self.B.lstrip(), tgt) + assert_(issubclass(B.lstrip().dtype.type, np.str_)) + assert_array_equal(B.lstrip(), tgt) def test_partition(self): - P = self.A.partition([b'3', b'M']) + A = self.A() + P = A.partition([b'3', b'M']) tgt = [[(b' abc ', b'', b''), (b'', b'', b'')], [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]] @@ -457,7 +478,8 @@ def test_partition(self): assert_array_equal(P, tgt) def test_replace(self): - R = self.A.replace([b'3', b'a'], + A = self.A() + R = A.replace([b'3', b'a'], [b'##########', b'@']) tgt = [[b' abc ', b''], [b'12##########45', b'MixedC@se'], @@ -466,14 +488,14 @@ def test_replace(self): assert_array_equal(R, tgt) # Test special cases that should just return the input array, # since replacements are not possible or do nothing. - S1 = self.A.replace(b'A very long byte string, longer than A', b'') - assert_array_equal(S1, self.A) - S2 = self.A.replace(b'', b'') - assert_array_equal(S2, self.A) - S3 = self.A.replace(b'3', b'3') - assert_array_equal(S3, self.A) - S4 = self.A.replace(b'3', b'', count=0) - assert_array_equal(S4, self.A) + S1 = A.replace(b'A very long byte string, longer than A', b'') + assert_array_equal(S1, A) + S2 = A.replace(b'', b'') + assert_array_equal(S2, A) + S3 = A.replace(b'3', b'3') + assert_array_equal(S3, A) + S4 = A.replace(b'3', b'', count=0) + assert_array_equal(S4, A) def test_replace_count_and_size(self): a = np.array(['0123456789' * i for i in range(4)] @@ -510,12 +532,13 @@ def test_replace_broadcasting(self): assert_array_equal(r3, np.array(['X,X,X', 'X,0', 'X'])) def test_rjust(self): - assert_(issubclass(self.A.rjust(10).dtype.type, np.bytes_)) + A = self.A() + assert_(issubclass(A.rjust(10).dtype.type, np.bytes_)) - C = self.A.rjust([10, 20]) + C = A.rjust([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.rjust(20, b'#') + C = A.rjust(20, b'#') assert_(np.all(C.startswith(b'#'))) assert_array_equal(C.endswith(b'#'), [[False, True], [False, False], [False, False]]) @@ -527,7 +550,8 @@ def test_rjust(self): assert_array_equal(C, tgt) def test_rpartition(self): - P = self.A.rpartition([b'3', b'M']) + A = self.A() + P = A.rpartition([b'3', b'M']) tgt = [[(b'', b'', b' abc '), (b'', b'', b'')], [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]] @@ -535,7 +559,7 @@ def test_rpartition(self): assert_array_equal(P, tgt) def test_rsplit(self): - A = self.A.rsplit(b'3') + A = self.A().rsplit(b'3') tgt = [[[b' abc '], [b'']], [[b'12', b'45'], [b'MixedCase']], [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] @@ -543,45 +567,47 @@ def test_rsplit(self): assert_equal(A.tolist(), tgt) def test_rstrip(self): - assert_(issubclass(self.A.rstrip().dtype.type, np.bytes_)) + A, B = self.A(), self.B() + assert_(issubclass(A.rstrip().dtype.type, np.bytes_)) tgt = [[b' abc', b''], [b'12345', b'MixedCase'], [b'123 \t 345', b'UPPER']] - assert_array_equal(self.A.rstrip(), tgt) + assert_array_equal(A.rstrip(), tgt) tgt = [[b' abc ', b''], [b'1234', b'MixedCase'], [b'123 \t 345 \x00', b'UPP'] ] - assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt) + assert_array_equal(A.rstrip([b'5', b'ER']), tgt) tgt = [[' \u03a3', ''], ['12345', 'MixedCase'], ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.rstrip().dtype.type, np.str_)) - assert_array_equal(self.B.rstrip(), tgt) + assert_(issubclass(B.rstrip().dtype.type, np.str_)) + assert_array_equal(B.rstrip(), tgt) def test_strip(self): + A, B = self.A(), self.B() tgt = [[b'abc', b''], [b'12345', b'MixedCase'], [b'123 \t 345', b'UPPER']] - assert_(issubclass(self.A.strip().dtype.type, np.bytes_)) - assert_array_equal(self.A.strip(), tgt) + assert_(issubclass(A.strip().dtype.type, np.bytes_)) + assert_array_equal(A.strip(), tgt) tgt = [[b' abc ', b''], [b'234', b'ixedCas'], [b'23 \t 345 \x00', b'UPP']] - assert_array_equal(self.A.strip([b'15', b'EReM']), tgt) + assert_array_equal(A.strip([b'15', b'EReM']), tgt) tgt = [['\u03a3', ''], ['12345', 'MixedCase'], ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.strip().dtype.type, np.str_)) - assert_array_equal(self.B.strip(), tgt) + assert_(issubclass(B.strip().dtype.type, np.str_)) + assert_array_equal(B.strip(), tgt) def test_split(self): - A = self.A.split(b'3') + A = self.A().split(b'3') tgt = [ [[b' abc '], [b'']], [[b'12', b'45'], [b'MixedCase']], @@ -596,90 +622,98 @@ def test_splitlines(self): assert_(len(A[0]) == 3) def test_swapcase(self): + A, B = self.A(), self.B() tgt = [[b' ABC ', b''], [b'12345', b'mIXEDcASE'], [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.swapcase().dtype.type, np.bytes_)) - assert_array_equal(self.A.swapcase(), tgt) + assert_(issubclass(A.swapcase().dtype.type, np.bytes_)) + assert_array_equal(A.swapcase(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'mIXEDcASE'], ['123 \t 345 \0 ', 'upper']] - assert_(issubclass(self.B.swapcase().dtype.type, np.str_)) - assert_array_equal(self.B.swapcase(), tgt) + assert_(issubclass(B.swapcase().dtype.type, np.str_)) + assert_array_equal(B.swapcase(), tgt) def test_title(self): + A, B = self.A(), self.B() tgt = [[b' Abc ', b''], [b'12345', b'Mixedcase'], [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.title().dtype.type, np.bytes_)) - assert_array_equal(self.A.title(), tgt) + assert_(issubclass(A.title().dtype.type, np.bytes_)) + assert_array_equal(A.title(), tgt) tgt = [[' \u03a3 ', ''], ['12345', 'Mixedcase'], ['123 \t 345 \0 ', 'Upper']] - assert_(issubclass(self.B.title().dtype.type, np.str_)) - assert_array_equal(self.B.title(), tgt) + assert_(issubclass(B.title().dtype.type, np.str_)) + assert_array_equal(B.title(), tgt) def test_upper(self): + A, B = self.A(), self.B() tgt = [[b' ABC ', b''], [b'12345', b'MIXEDCASE'], [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.upper().dtype.type, np.bytes_)) - assert_array_equal(self.A.upper(), tgt) + assert_(issubclass(A.upper().dtype.type, np.bytes_)) + assert_array_equal(A.upper(), tgt) tgt = [[' \u03a3 ', ''], ['12345', 'MIXEDCASE'], ['123 \t 345 \0 ', 'UPPER']] - assert_(issubclass(self.B.upper().dtype.type, np.str_)) - assert_array_equal(self.B.upper(), tgt) + assert_(issubclass(B.upper().dtype.type, np.str_)) + assert_array_equal(B.upper(), tgt) def test_isnumeric(self): + A, B = self.A(), self.B() def fail(): - self.A.isnumeric() + A.isnumeric() assert_raises(TypeError, fail) - assert_(issubclass(self.B.isnumeric().dtype.type, np.bool)) - assert_array_equal(self.B.isnumeric(), [ + assert_(issubclass(B.isnumeric().dtype.type, np.bool)) + assert_array_equal(B.isnumeric(), [ [False, False], [True, False], [False, False]]) def test_isdecimal(self): + A, B = self.A(), self.B() def fail(): - self.A.isdecimal() + A.isdecimal() assert_raises(TypeError, fail) - assert_(issubclass(self.B.isdecimal().dtype.type, np.bool)) - assert_array_equal(self.B.isdecimal(), [ + assert_(issubclass(B.isdecimal().dtype.type, np.bool)) + assert_array_equal(B.isdecimal(), [ [False, False], [True, False], [False, False]]) - class TestOperations: - def setup_method(self): - self.A = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.char.chararray) - self.B = np.array([['efg', '456'], - ['051', 'tuv']]).view(np.char.chararray) + def A(self): + return np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + + def B(self): + return np.array([['efg', '456'], + ['051', 'tuv']]).view(np.char.chararray) def test_add(self): + A, B = self.A(), self.B() AB = np.array([['abcefg', '123456'], ['789051', 'xyztuv']]).view(np.char.chararray) - assert_array_equal(AB, (self.A + self.B)) - assert_(len((self.A + self.B)[0][0]) == 6) + assert_array_equal(AB, (A + B)) + assert_(len((A + B)[0][0]) == 6) def test_radd(self): + A = self.A() QA = np.array([['qabc', 'q123'], ['q789', 'qxyz']]).view(np.char.chararray) - assert_array_equal(QA, ('q' + self.A)) + assert_array_equal(QA, ('q' + A)) def test_mul(self): - A = self.A + A = self.A() for r in (2, 3, 5, 7, 197): Ar = np.array([[A[0, 0] * r, A[0, 1] * r], [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) - assert_array_equal(Ar, (self.A * r)) + assert_array_equal(Ar, (A * r)) for ob in [object(), 'qrs']: with assert_raises_regex(ValueError, @@ -687,11 +721,11 @@ def test_mul(self): A * ob def test_rmul(self): - A = self.A + A = self.A() for r in (2, 3, 5, 7, 197): Ar = np.array([[A[0, 0] * r, A[0, 1] * r], [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) - assert_array_equal(Ar, (r * self.A)) + assert_array_equal(Ar, (r * A)) for ob in [object(), 'qrs']: with assert_raises_regex(ValueError, @@ -716,13 +750,14 @@ def test_mod(self): assert_array_equal(A2, (A % [[1, 2], [3, 4]])) def test_rmod(self): - assert_(f"{self.A}" == str(self.A)) - assert_(f"{self.A!r}" == repr(self.A)) + A = self.A() + assert_(f"{A}" == str(A)) + assert_(f"{A!r}" == repr(A)) for ob in [42, object()]: with assert_raises_regex( TypeError, "unsupported operand type.* and 'chararray'"): - ob % self.A + ob % A def test_slice(self): """Regression test for https://github.com/numpy/numpy/issues/5982""" @@ -751,27 +786,21 @@ def test_getitem_length_zero_item(self, data): # or does not have length 0. assert_equal(a[1], a.dtype.type()) - class TestMethodsEmptyArray: - def setup_method(self): - self.U = np.array([], dtype='U') - self.S = np.array([], dtype='S') - def test_encode(self): - res = np.char.encode(self.U) + res = np.char.encode(np.array([], dtype='U')) assert_array_equal(res, []) assert_(res.dtype.char == 'S') def test_decode(self): - res = np.char.decode(self.S) + res = np.char.decode(np.array([], dtype='S')) assert_array_equal(res, []) assert_(res.dtype.char == 'U') def test_decode_with_reshape(self): - res = np.char.decode(self.S.reshape((1, 0, 1))) + res = np.char.decode(np.array([], dtype='S').reshape((1, 0, 1))) assert_(res.shape == (1, 0, 1)) - class TestMethodsScalarValues: def test_mod(self): A = np.array([[' abc ', ''], @@ -816,7 +845,6 @@ def test_replace(self): assert_equal(np.char.replace('Python is good', 'good', 'great'), 'Python is great') - def test_empty_indexing(): """Regression test for ticket 1948.""" # Check that indexing a chararray with an empty list/array returns an From f9102115ee4e97a07d8421f7344c688a4ca676c4 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Thu, 21 Aug 2025 09:47:03 +0800 Subject: [PATCH 0324/1018] ENH: Enable unit tests for RISC-V CPU dispatcher utilities Signed-off-by: Wang Yang --- numpy/_core/meson.build | 2 +- numpy/_core/tests/test_cpu_dispatcher.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index b4c769810ad8..61a9d53a42d0 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -777,7 +777,7 @@ _umath_tests_mtargets = mod_features.multi_targets( AVX2, SSE41, SSE2, ASIMDHP, ASIMD, NEON, VSX3, VSX2, VSX, - VXE, VX, + VXE, VX, RVV, ], baseline: CPU_BASELINE, prefix: 'NPY_', diff --git a/numpy/_core/tests/test_cpu_dispatcher.py b/numpy/_core/tests/test_cpu_dispatcher.py index fc9d5e3147e0..f665fa6464ef 100644 --- a/numpy/_core/tests/test_cpu_dispatcher.py +++ b/numpy/_core/tests/test_cpu_dispatcher.py @@ -15,7 +15,7 @@ def test_dispatcher(): "SSE2", "SSE41", "AVX2", "VSX", "VSX2", "VSX3", "NEON", "ASIMD", "ASIMDHP", - "VX", "VXE", "LSX" + "VX", "VXE", "LSX", "RVV" ) highest_sfx = "" # no suffix for the baseline all_sfx = [] From 8efb7b76aad6179c9dc052038cb24024be280fa3 Mon Sep 17 00:00:00 2001 From: Hunter Hogan Date: Wed, 20 Aug 2025 22:01:05 -0500 Subject: [PATCH 0325/1018] TYP: ndarray.fill() takes no keyword arguments ```python (.venv) C:\apps\mapFolding>py Python 3.13.7 (tags/v3.13.7:bcee1c3, Aug 14 2025, 14:15:11) [MSC v.1944 64 bit (AMD64)] on win32 Type "help", "copyright", "credits" or "license" for more information. >>> import numpy >>> aa = numpy.zeros((4,3)) >>> aa array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]) >>> aa.fill(value=9) Traceback (most recent call last): File "", line 1, in aa.fill(value=9) ~~~~~~~^^^^^^^^^ TypeError: ndarray.fill() takes no keyword arguments >>> aa array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]) >>> aa.fill(9) >>> aa array([[9., 9., 9.], [9., 9., 9.], [9., 9., 9.], [9., 9., 9.]]) >>> ``` --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 1f47f2aae0d7..0982a502f018 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2177,7 +2177,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @strides.setter def strides(self, value: _ShapeLike) -> None: ... def byteswap(self, inplace: builtins.bool = ...) -> Self: ... - def fill(self, value: Any) -> None: ... + def fill(self, value: Any, /) -> None: ... @property def flat(self) -> flatiter[Self]: ... From cd86711619fa2bf6aa44d9f3bd053c3134e8bc25 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 21 Aug 2025 19:57:58 +0200 Subject: [PATCH 0326/1018] CI: more specific mypy_primer ``on:`` paths --- .github/workflows/mypy_primer.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 544c568a522d..2bfc4197e9b8 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -4,9 +4,13 @@ on: # Only run on PR, since we diff against main pull_request: paths: - - "**/*.pyi" - ".github/workflows/mypy_primer.yml" - ".github/workflows/mypy_primer_comment.yml" + - "numpy/**/*.pyi" + - "numpy/_typing/*.py" + - "numpy/typing/*.py" + - "!numpy/typing/tests/**" + - "numpy/py.typed" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} From f2e65e7c9c69f8aa21094d2b897d680c208fb221 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 21 Aug 2025 20:07:55 +0200 Subject: [PATCH 0327/1018] CI: replace comment-hider acction in mypy_primer workflow --- .github/workflows/mypy_primer_comment.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index be0dda7f7dec..338a53da6a13 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,10 +49,10 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: kanga333/comment-hider@c12bb20b48aeb8fc098e35967de8d4f8018fffdf # v0.4.0 + uses: int128/hide-comment-action@a30d551065e4231e6d7a671bb5ce884f9ee6417b # v1.43.0 with: - github_token: ${{ secrets.GITHUB_TOKEN }} - issue_number: ${{ steps.get-pr-number.outputs.result }} + token: ${{ secrets.GITHUB_TOKEN }} + issue-number: ${{ steps.get-pr-number.outputs.result }} - run: cat diff_*.txt | tee fulldiff.txt From 6e7641976458d2310b151ad8e6ba2be5ee55a53b Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Wed, 13 Aug 2025 22:34:10 +0530 Subject: [PATCH 0328/1018] DOC: document datetime/timedelta to PyObject conversion --- doc/source/reference/arrays.datetime.rst | 18 ++++++++++++++++++ numpy/_core/src/multiarray/_datetime.h | 1 + 2 files changed, 19 insertions(+) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 2095dc3aea49..9c53d1bbdb1e 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -309,6 +309,24 @@ us / μs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] as attosecond +/- 9.2 seconds [ 1969 AD, 1970 AD] ======== ================ ======================= ========================== +Converting datetime and timedelta to Python Object +================================================== + +NumPy follows a strict protocol when converting datetime and timedelta to Python Objects (e.g., tuple, list, datetime.datetime). + +For conversion of datetime to a Python Object: + +- For days or coarser, returns a datetime.date. +- For days or coarser, returns a datetime.date. +- For microseconds or coarser, returns a datetime.datetime. +- For units finer than microseconds, returns an integer. + +For conversion of timedelta to Python Object + +- Not-a-time is returned as the string "NaT". +- For microseconds or coarser, returns a datetime.timedelta. +- For units finer than microseconds, returns an integer. + Business day functionality ========================== diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index 112c57433094..64284f550af4 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -243,6 +243,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, * Converts a datetime into a PyObject *. * * For days or coarser, returns a datetime.date. + * For days or coarser, returns a datetime.date. * For microseconds or coarser, returns a datetime.datetime. * For units finer than microseconds, returns an integer. */ From 23de624336554b7783175185699714f0ba3faf04 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Wed, 13 Aug 2025 22:51:35 +0530 Subject: [PATCH 0329/1018] DOC: document datetime/timedelta to PyObject conversion --- doc/source/reference/arrays.datetime.rst | 2 +- numpy/_core/src/multiarray/_datetime.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 9c53d1bbdb1e..d14d99c39b44 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -316,7 +316,7 @@ NumPy follows a strict protocol when converting datetime and timedelta to Python For conversion of datetime to a Python Object: -- For days or coarser, returns a datetime.date. +- Not-a-time is returned as the string "NaT". - For days or coarser, returns a datetime.date. - For microseconds or coarser, returns a datetime.datetime. - For units finer than microseconds, returns an integer. diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index 64284f550af4..49745197dfab 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -242,7 +242,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * For days or coarser, returns a datetime.date. + * Not-a-time is returned as the string "NaT". * For days or coarser, returns a datetime.date. * For microseconds or coarser, returns a datetime.datetime. * For units finer than microseconds, returns an integer. From 19119dc08ea99f038c9490238b185df4b75d6bd5 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Thu, 14 Aug 2025 00:53:50 +0530 Subject: [PATCH 0330/1018] DOC: document datetime/timedelta to PyObject conversion --- doc/source/reference/arrays.datetime.rst | 8 +++++--- numpy/_core/src/multiarray/_datetime.h | 6 +++--- numpy/_core/src/multiarray/datetime.c | 6 +++--- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index d14d99c39b44..d2d68a26be3c 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -309,6 +309,7 @@ us / μs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] as attosecond +/- 9.2 seconds [ 1969 AD, 1970 AD] ======== ================ ======================= ========================== + Converting datetime and timedelta to Python Object ================================================== @@ -316,16 +317,17 @@ NumPy follows a strict protocol when converting datetime and timedelta to Python For conversion of datetime to a Python Object: -- Not-a-time is returned as the string "NaT". +- Not-a-time is returned as None. - For days or coarser, returns a datetime.date. - For microseconds or coarser, returns a datetime.datetime. - For units finer than microseconds, returns an integer. For conversion of timedelta to Python Object -- Not-a-time is returned as the string "NaT". +- Not-a-time is returned as None. - For microseconds or coarser, returns a datetime.timedelta. -- For units finer than microseconds, returns an integer. +- For Y/M/B (nonlinear units), generic units and units finer than microseconds, returns an integer. + Business day functionality ========================== diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index 49745197dfab..7ef2c39023a1 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -242,7 +242,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * Not-a-time is returned as the string "NaT". + * Not-a-time is returned as None. * For days or coarser, returns a datetime.date. * For microseconds or coarser, returns a datetime.datetime. * For units finer than microseconds, returns an integer. @@ -253,9 +253,9 @@ convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta); /* * Converts a timedelta into a PyObject *. * - * Not-a-time is returned as the string "NaT". + * Not-a-time is returned as None. * For microseconds or coarser, returns a datetime.timedelta. - * For units finer than microseconds, returns an integer. + * For Y/M/B (nonlinear units), generic units and units finer than microseconds, returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta); diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 0dac36a0903b..f2f9e6405f9a 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2761,7 +2761,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * Not-a-time is returned as the string "NaT". + * Not-a-time is returned as None. * For days or coarser, returns a datetime.date. * For microseconds or coarser, returns a datetime.datetime. * For units finer than microseconds, returns an integer. @@ -2945,9 +2945,9 @@ convert_timedelta_to_timedeltastruct(PyArray_DatetimeMetaData *meta, /* * Converts a timedelta into a PyObject *. * - * Not-a-time is returned as the string "NaT". + * Not-a-time is returned as None. * For microseconds or coarser, returns a datetime.timedelta. - * For units finer than microseconds, returns an integer. + * For Y/M/B (nonlinear units), generic units and units finer than microseconds, returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) From 3cfb1d21c81425c8d2e0a8e7ec27878f1c8f5b09 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Thu, 21 Aug 2025 12:07:03 +0530 Subject: [PATCH 0331/1018] DOC: Update under title 'Converting datetime and timedelta to Python Object' --- doc/source/reference/arrays.datetime.rst | 40 ++++++++++++++++++------ 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index d2d68a26be3c..43ff7bf650e3 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -313,20 +313,40 @@ us / μs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] Converting datetime and timedelta to Python Object ================================================== -NumPy follows a strict protocol when converting datetime and timedelta to Python Objects (e.g., tuple, list, datetime.datetime). +NumPy follows a strict protocol when converting datetime64 and/or timedelta64 to Python Objects (e.g., tuple, list, datetime.datetime). -For conversion of datetime to a Python Object: +The protocol is described in the following table: -- Not-a-time is returned as None. -- For days or coarser, returns a datetime.date. -- For microseconds or coarser, returns a datetime.datetime. -- For units finer than microseconds, returns an integer. +================================ ================================= ================================== + Input Type for datetime64 for timedelta64 +================================ ================================= ================================== + NaT None None + Finer than Microseconds int int + Microseconds or Coarser datetime.datetime datetime.timedelta + Days or Coarser datetime.date datetime.timedelta +Non-linear(Y/M) and genric units datetime.date int +================================ ================================= ================================== -For conversion of timedelta to Python Object +.. admonition:: Example + + .. try_examples:: + + >>> import numpy as np + + >>> type(np.datetime64('NaT').astype(object)) + + + >>> type(np.timedelta64('NaT').astype(object)) + + + >>> type(np.timedelta64(123, 'ns').astype(object)) + + + >>> type(np.datetime64('2025-01-01T12:00:00.123456').astype(object)) + -- Not-a-time is returned as None. -- For microseconds or coarser, returns a datetime.timedelta. -- For Y/M/B (nonlinear units), generic units and units finer than microseconds, returns an integer. + >>> type(np.timedelta64(10, 'D').astype(object)) + Business day functionality From eac897a2aac3ca0f3ad64b2936a70ccef70beb69 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Thu, 21 Aug 2025 12:11:16 +0530 Subject: [PATCH 0332/1018] DOC: Fix ambiguous comments under datetime/timedelta to PyObject conversion --- numpy/_core/src/multiarray/_datetime.h | 6 +++--- numpy/_core/src/multiarray/datetime.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index 7ef2c39023a1..829cb7972065 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -242,7 +242,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * Not-a-time is returned as None. + * NaT (Not-a-time) is returned as None. * For days or coarser, returns a datetime.date. * For microseconds or coarser, returns a datetime.datetime. * For units finer than microseconds, returns an integer. @@ -253,9 +253,9 @@ convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta); /* * Converts a timedelta into a PyObject *. * - * Not-a-time is returned as None. + * NaT (Not-a-time) is returned as None. * For microseconds or coarser, returns a datetime.timedelta. - * For Y/M/B (nonlinear units), generic units and units finer than microseconds, returns an integer. + * For Y/M (nonlinear units), generic units and units finer than microseconds, returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta); diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index f2f9e6405f9a..cd70559dd90a 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2945,9 +2945,9 @@ convert_timedelta_to_timedeltastruct(PyArray_DatetimeMetaData *meta, /* * Converts a timedelta into a PyObject *. * - * Not-a-time is returned as None. + * NaT (Not-a-time) is returned as None. * For microseconds or coarser, returns a datetime.timedelta. - * For Y/M/B (nonlinear units), generic units and units finer than microseconds, returns an integer. + * For Y/M (nonlinear units), generic units and units finer than microseconds, returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) @@ -2963,7 +2963,7 @@ convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) /* * If the type's precision is greater than microseconds, is - * Y/M/B (nonlinear units), or is generic units, return an int + * Y/M (nonlinear units), or is generic units, return an int */ if (meta->base > NPY_FR_us || meta->base == NPY_FR_Y || From ebaab572235781fad5fc012068ebeea55efeb6e7 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Thu, 21 Aug 2025 22:35:19 +0530 Subject: [PATCH 0333/1018] DOC: Update doc/source/reference/arrays.datetime.rst --- doc/source/reference/arrays.datetime.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 43ff7bf650e3..93ec2d00f5b8 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -313,18 +313,18 @@ us / μs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] Converting datetime and timedelta to Python Object ================================================== -NumPy follows a strict protocol when converting datetime64 and/or timedelta64 to Python Objects (e.g., tuple, list, datetime.datetime). +NumPy follows a strict protocol when converting ``np.datetime64`` and/or ``np.timedelta64`` to Python Objects (e.g., ``tuple``, ``list``, ``datetime.datetime``). The protocol is described in the following table: ================================ ================================= ================================== - Input Type for datetime64 for timedelta64 + Input Type for ``datetime64`` for ``timedelta64`` ================================ ================================= ================================== - NaT None None - Finer than Microseconds int int - Microseconds or Coarser datetime.datetime datetime.timedelta - Days or Coarser datetime.date datetime.timedelta -Non-linear(Y/M) and genric units datetime.date int + ``NaT`` ``None`` ``None`` + Finer than Microseconds ``int`` ``int`` + Microseconds or Coarser ``datetime.datetime`` ``datetime.timedelta`` + Days or Coarser ``datetime.date`` ``datetime.timedelta`` +Non-linear(Y/M) and genric units ``datetime.date`` ``int`` ================================ ================================= ================================== .. admonition:: Example From 62667abf4f8964b1388aba5beb56a9a7310e17a7 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Fri, 22 Aug 2025 01:11:00 +0530 Subject: [PATCH 0334/1018] DOC: Update doc/source/reference/arrays.datetime.rst --- doc/source/reference/arrays.datetime.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 93ec2d00f5b8..9735ccbfa2a4 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -313,18 +313,18 @@ us / μs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] Converting datetime and timedelta to Python Object ================================================== -NumPy follows a strict protocol when converting ``np.datetime64`` and/or ``np.timedelta64`` to Python Objects (e.g., ``tuple``, ``list``, ``datetime.datetime``). +NumPy follows a strict protocol when converting `datetime64` and/or `timedelta64` to Python Objects (e.g., `tuple`, `list`, `datetime.datetime`). The protocol is described in the following table: ================================ ================================= ================================== - Input Type for ``datetime64`` for ``timedelta64`` + Input Type for `datetime64` for `timedelta64` ================================ ================================= ================================== - ``NaT`` ``None`` ``None`` - Finer than Microseconds ``int`` ``int`` - Microseconds or Coarser ``datetime.datetime`` ``datetime.timedelta`` - Days or Coarser ``datetime.date`` ``datetime.timedelta`` -Non-linear(Y/M) and genric units ``datetime.date`` ``int`` + `NaT` `None` `None` + Finer than Microseconds `int` `int` + Microseconds or Coarser `datetime.datetime` `datetime.timedelta` + Days or Coarser `datetime.date` `datetime.timedelta` +Non-linear(Y/M) and genric units `datetime.date` `int` ================================ ================================= ================================== .. admonition:: Example From cb016cd1adcd6007943680f1b4165dfebf3d23dd Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Fri, 22 Aug 2025 01:31:38 +0530 Subject: [PATCH 0335/1018] DOC: Update doc/source/reference/arrays.datetime.rst --- doc/source/reference/arrays.datetime.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 9735ccbfa2a4..a62ec26e00c8 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -313,18 +313,18 @@ us / μs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] Converting datetime and timedelta to Python Object ================================================== -NumPy follows a strict protocol when converting `datetime64` and/or `timedelta64` to Python Objects (e.g., `tuple`, `list`, `datetime.datetime`). +NumPy follows a strict protocol when converting `datetime64` and/or `timedelta64` to Python Objects (e.g., ``tuple``, ``list``, `datetime.datetime`). The protocol is described in the following table: ================================ ================================= ================================== Input Type for `datetime64` for `timedelta64` ================================ ================================= ================================== - `NaT` `None` `None` - Finer than Microseconds `int` `int` + ``NaT`` ``None`` ``None`` + Finer than Microseconds ``int`` ``int`` Microseconds or Coarser `datetime.datetime` `datetime.timedelta` Days or Coarser `datetime.date` `datetime.timedelta` -Non-linear(Y/M) and genric units `datetime.date` `int` +Non-linear(Y/M) and genric units `datetime.date` ``int`` ================================ ================================= ================================== .. admonition:: Example From 835e56c322ac5bcfdf1397f5905222d1b1d22484 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 14 Aug 2025 21:35:59 +0200 Subject: [PATCH 0336/1018] TYP: move a `# type: ignore` so that it actually ignores the error --- numpy/_typing/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 0044749e3dce..0594ae5759b4 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -118,8 +118,8 @@ ) # -from ._nbit_base import ( - NBitBase as NBitBase, # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +from ._nbit_base import ( # type: ignore[deprecated] + NBitBase as NBitBase, # pyright: ignore[reportDeprecated] _8Bit as _8Bit, _16Bit as _16Bit, _32Bit as _32Bit, From 7216e70e17fe2dc60fb91be9aa8bc78a3e533b2a Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 14 Aug 2025 22:27:30 +0200 Subject: [PATCH 0337/1018] TYP: use ``TypeAliasType`` for ``ArrayLike`` and ``DTypeLike`` on py312+ --- numpy/_typing/__init__.py | 18 ++++++++++++++++-- numpy/typing/tests/test_runtime.py | 21 +++++++++++++++++---- 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 0594ae5759b4..11ed6e62f1be 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -1,7 +1,8 @@ """Private counterpart of ``numpy.typing``.""" +import sys + from ._array_like import ( - ArrayLike as ArrayLike, NDArray as NDArray, _ArrayLike as _ArrayLike, _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, @@ -84,7 +85,6 @@ # from ._dtype_like import ( - DTypeLike as DTypeLike, _DTypeLike as _DTypeLike, _DTypeLikeBool as _DTypeLikeBool, _DTypeLikeBytes as _DTypeLikeBytes, @@ -156,3 +156,17 @@ _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, ) + +# wrapping the public aliases in `TypeAliasType` helps with introspection readability +if sys.version_info >= (3, 12): + from typing import TypeAliasType + + from ._array_like import ArrayLike as _ArrayLikeAlias + from ._dtype_like import DTypeLike as _DTypeLikeAlias + + ArrayLike = TypeAliasType("ArrayLike", _ArrayLikeAlias) + DTypeLike = TypeAliasType("DTypeLike", _DTypeLikeAlias) + +else: + from ._array_like import ArrayLike as ArrayLike + from ._dtype_like import DTypeLike as DTypeLike diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py index 236952101126..8e49fda8185c 100644 --- a/numpy/typing/tests/test_runtime.py +++ b/numpy/typing/tests/test_runtime.py @@ -22,12 +22,25 @@ class TypeTup(NamedTuple): origin: type | None +def _flatten_type_alias(t: Any) -> Any: + # "flattens" a TypeAliasType to its underlying type alias + return getattr(t, "__value__", t) + + NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) TYPES = { - "ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union), - "DTypeLike": TypeTup(npt.DTypeLike, npt.DTypeLike.__args__, Union), - "NBitBase": TypeTup(npt.NBitBase, (), None), + "ArrayLike": TypeTup( + _flatten_type_alias(npt.ArrayLike), + _flatten_type_alias(npt.ArrayLike).__args__, + Union, + ), + "DTypeLike": TypeTup( + _flatten_type_alias(npt.DTypeLike), + _flatten_type_alias(npt.DTypeLike).__args__, + Union, + ), + "NBitBase": TypeTup(npt.NBitBase, (), None), # type: ignore[deprecated] # pyright: ignore[reportDeprecated] "NDArray": NDArrayTup, } @@ -68,7 +81,7 @@ def test_get_type_hints_str(name: type, tup: TypeTup) -> None: def func(a: typ_str) -> None: pass out = get_type_hints(func) - ref = {"a": typ, "return": type(None)} + ref = {"a": getattr(npt, str(name)), "return": type(None)} assert out == ref From 6d77e46292b4c1bafebc76a6abbb96448a87ed4a Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Fri, 22 Aug 2025 13:17:16 +1000 Subject: [PATCH 0338/1018] CI: only test pyodide --- .github/workflows/emscripten.yml | 53 ++------------------------------ 1 file changed, 2 insertions(+), 51 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index e7503871e3f5..c5f770c9d785 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -5,25 +5,6 @@ on: branches: - main - maintenance/** - # Note: this workflow gets triggered on the same schedule as the - # wheels.yml workflow to upload WASM wheels to Anaconda.org. - schedule: - # ┌───────────── minute (0 - 59) - # │ ┌───────────── hour (0 - 23) - # │ │ ┌───────────── day of the month (1 - 31) - # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) - # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) - # │ │ │ │ │ - - cron: "42 2 * * SUN,WED" - workflow_dispatch: - inputs: - push_wheels: - # Can be 'true' or 'false'. Default is 'false'. - # Warning: this will overwrite existing wheels. - description: > - Push wheels to Anaconda.org if the build succeeds - required: false - default: 'false' env: FORCE_COLOR: 3 @@ -37,7 +18,7 @@ jobs: build-wasm-emscripten: permissions: contents: read # to fetch code (actions/checkout) - name: Build NumPy distribution for Pyodide + name: Pyodide test runs-on: ubuntu-22.04 # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' @@ -52,34 +33,4 @@ jobs: - uses: pypa/cibuildwheel@c923d83ad9c1bc00211c5041d0c3f73294ff88f6 # v3.1.4 env: CIBW_PLATFORM: pyodide - - - name: Upload wheel artifact(s) - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: cp312-pyodide_wasm32 - path: ./wheelhouse/*.whl - if-no-files-found: error - - # Push to https://anaconda.org/scientific-python-nightly-wheels/numpy - # WARNING: this job will overwrite any existing WASM wheels. - upload-wheels: - name: Upload NumPy WASM wheels to Anaconda.org - runs-on: ubuntu-22.04 - permissions: {} - needs: [build-wasm-emscripten] - if: >- - (github.repository == 'numpy/numpy') && - (github.event_name == 'workflow_dispatch' && github.event.inputs.push_wheels == 'true') || - (github.event_name == 'schedule') - steps: - - name: Download wheel artifact(s) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 - with: - path: wheelhouse/ - merge-multiple: true - - - name: Push to Anaconda PyPI index - uses: scientific-python/upload-nightly-action@b36e8c0c10dbcfd2e05bf95f17ef8c14fd708dbf # v0.6.2 - with: - artifacts_path: wheelhouse/ - anaconda_nightly_upload_token: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} + CIBW_BUILD: cp312-* From 89dd7c593d6e21b66b70991fe5136fed292277d0 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Fri, 22 Aug 2025 12:15:30 +0530 Subject: [PATCH 0339/1018] DOC: Update doc/source/reference/arrays.datetime.rst --- doc/source/reference/arrays.datetime.rst | 40 ++++++++++++++++++------ 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index a62ec26e00c8..59e11fa91f5a 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -320,11 +320,12 @@ The protocol is described in the following table: ================================ ================================= ================================== Input Type for `datetime64` for `timedelta64` ================================ ================================= ================================== - ``NaT`` ``None`` ``None`` - Finer than Microseconds ``int`` ``int`` - Microseconds or Coarser `datetime.datetime` `datetime.timedelta` - Days or Coarser `datetime.date` `datetime.timedelta` -Non-linear(Y/M) and genric units `datetime.date` ``int`` + ``NaT`` ``None`` ``None`` + ns/ps/fs/as ``int`` ``int`` + μs/ms/s/m/h `datetime.datetime` `datetime.timedelta` + D/W (Linear units) `datetime.date` `datetime.timedelta` + Y/M (Non-linear units) `datetime.date` ``int`` + Generic units `datetime.date` ``int`` ================================ ================================= ================================== .. admonition:: Example @@ -333,22 +334,41 @@ Non-linear(Y/M) and genric units `datetime.date` >>> import numpy as np - >>> type(np.datetime64('NaT').astype(object)) + >>> type(np.datetime64('NaT').item()) - >>> type(np.timedelta64('NaT').astype(object)) + >>> type(np.timedelta64('NaT').item()) - >>> type(np.timedelta64(123, 'ns').astype(object)) + >>> type(np.timedelta64(123, 'ns').item()) - >>> type(np.datetime64('2025-01-01T12:00:00.123456').astype(object)) + >>> type(np.datetime64('2025-01-01T12:00:00.123456').item()) - >>> type(np.timedelta64(10, 'D').astype(object)) + >>> type(np.timedelta64(10, 'D').item()) +In the casea where conversion of `datetime64` and/or `timedelta64` is done against Python types like ``int``, ``float``, and ``str`` the corresponding return types will be ``np.str_``, ``np.int64`` and ``np.float64``. + + +.. admonition:: Example + + .. try_examples:: + + >>> import numpy as np + + >>> type(np.timedelta64(1, 'D').astype(int)) + + + >>> type(np.datetime64('2025-01-01T12:00:00.123456').astype(float)) + + + >>> type(np.timedelta64(123, 'ns').astype(str)) + + + Business day functionality ========================== From 0bb2bf80da0a32a4969b1b4300711fecb6adc65a Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Fri, 22 Aug 2025 12:15:57 +0530 Subject: [PATCH 0340/1018] DOC: numpy/_core/src/multiarray/_datetime.h --- numpy/_core/src/multiarray/_datetime.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index 829cb7972065..5261e8232a08 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -243,9 +243,9 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, * Converts a datetime into a PyObject *. * * NaT (Not-a-time) is returned as None. - * For days or coarser, returns a datetime.date. - * For microseconds or coarser, returns a datetime.datetime. - * For units finer than microseconds, returns an integer. + * For D/W/Y/M (days or coarser), returns a datetime.date. + * For μs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.datetime. + * For ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta); @@ -254,8 +254,8 @@ convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta); * Converts a timedelta into a PyObject *. * * NaT (Not-a-time) is returned as None. - * For microseconds or coarser, returns a datetime.timedelta. - * For Y/M (nonlinear units), generic units and units finer than microseconds, returns an integer. + * For μs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.timedelta. + * For Y/M (non-linear units), generic units and ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta); From da71accb4afcd80ed2eecf07839716764808c002 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Fri, 22 Aug 2025 12:16:21 +0530 Subject: [PATCH 0341/1018] DOC: numpy/_core/src/multiarray/datetime.c --- numpy/_core/src/multiarray/datetime.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index cd70559dd90a..3c163a34fbdf 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2761,10 +2761,10 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * Not-a-time is returned as None. - * For days or coarser, returns a datetime.date. - * For microseconds or coarser, returns a datetime.datetime. - * For units finer than microseconds, returns an integer. + * NaT (Not-a-time) is returned as None. + * For D/W/Y/M (days or coarser), returns a datetime.date. + * For μs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.datetime. + * For ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta) @@ -2946,8 +2946,8 @@ convert_timedelta_to_timedeltastruct(PyArray_DatetimeMetaData *meta, * Converts a timedelta into a PyObject *. * * NaT (Not-a-time) is returned as None. - * For microseconds or coarser, returns a datetime.timedelta. - * For Y/M (nonlinear units), generic units and units finer than microseconds, returns an integer. + * For μs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.timedelta. + * For Y/M (non-linear units), generic units and ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) From 398e38e6bea79df7276749228fead40f29f5a092 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Fri, 22 Aug 2025 12:50:02 +0100 Subject: [PATCH 0342/1018] Merge pull request #29603 from MarcoGorelli/typ-lib-defaults --- numpy/lib/_array_utils_impl.pyi | 6 +- numpy/lib/_arraypad_impl.pyi | 20 +- numpy/lib/_function_base_impl.pyi | 582 +++++++++++++++--------------- numpy/lib/_histograms_impl.pyi | 22 +- numpy/lib/_index_tricks_impl.pyi | 4 +- numpy/lib/_npyio_impl.pyi | 132 +++---- numpy/lib/_polynomial_impl.pyi | 76 ++-- numpy/lib/_shape_base_impl.pyi | 10 +- numpy/lib/_stride_tricks_impl.pyi | 34 +- numpy/lib/_twodim_base_impl.pyi | 198 +++++----- numpy/lib/_ufunclike_impl.pyi | 14 +- 11 files changed, 549 insertions(+), 549 deletions(-) diff --git a/numpy/lib/_array_utils_impl.pyi b/numpy/lib/_array_utils_impl.pyi index d3e0714773f2..db9ef852ba57 100644 --- a/numpy/lib/_array_utils_impl.pyi +++ b/numpy/lib/_array_utils_impl.pyi @@ -14,9 +14,9 @@ def byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ... def normalize_axis_tuple( axis: int | Iterable[int], - ndim: int = ..., - argname: str | None = ..., - allow_duplicate: bool | None = ..., + ndim: int, + argname: str | None = None, + allow_duplicate: bool | None = False, ) -> tuple[int, int]: ... def normalize_axis_index( diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index cfabdecf669e..e7aacea43254 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -54,23 +54,23 @@ _PadWidth: TypeAlias = ( def pad( array: _ArrayLike[_ScalarT], pad_width: _PadWidth, - mode: _ModeKind = ..., + mode: _ModeKind = "constant", *, - stat_length: _ArrayLikeInt | None = ..., - constant_values: ArrayLike = ..., - end_values: ArrayLike = ..., - reflect_type: L["odd", "even"] = ..., + stat_length: _ArrayLikeInt | None = None, + constant_values: ArrayLike = 0, + end_values: ArrayLike = 0, + reflect_type: L["odd", "even"] = "even", ) -> NDArray[_ScalarT]: ... @overload def pad( array: ArrayLike, pad_width: _PadWidth, - mode: _ModeKind = ..., + mode: _ModeKind = "constant", *, - stat_length: _ArrayLikeInt | None = ..., - constant_values: ArrayLike = ..., - end_values: ArrayLike = ..., - reflect_type: L["odd", "even"] = ..., + stat_length: _ArrayLikeInt | None = None, + constant_values: ArrayLike = 0, + end_values: ArrayLike = 0, + reflect_type: L["odd", "even"] = "even", ) -> NDArray[Any]: ... @overload def pad( diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 41d6204cd684..bee5da4c58ae 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -122,24 +122,24 @@ class _TrimZerosSequence(Protocol[_T_co]): @overload def rot90( m: _ArrayLike[_ScalarT], - k: int = ..., + k: int = 1, axes: tuple[int, int] = ..., ) -> NDArray[_ScalarT]: ... @overload def rot90( m: ArrayLike, - k: int = ..., + k: int = 1, axes: tuple[int, int] = ..., ) -> NDArray[Any]: ... @overload -def flip(m: _ScalarT, axis: None = ...) -> _ScalarT: ... +def flip(m: _ScalarT, axis: None = None) -> _ScalarT: ... @overload -def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ... +def flip(m: _ScalarLike_co, axis: None = None) -> Any: ... @overload -def flip(m: _ArrayLike[_ScalarT], axis: _ShapeLike | None = ...) -> NDArray[_ScalarT]: ... +def flip(m: _ArrayLike[_ScalarT], axis: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... @overload -def flip(m: ArrayLike, axis: _ShapeLike | None = ...) -> NDArray[Any]: ... +def flip(m: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... def iterable(y: object) -> TypeIs[Iterable[Any]]: ... @@ -201,26 +201,26 @@ def average( @overload def asarray_chkfinite( a: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., + dtype: None = None, + order: _OrderKACF = None, ) -> NDArray[_ScalarT]: ... @overload def asarray_chkfinite( a: object, - dtype: None = ..., - order: _OrderKACF = ..., + dtype: None = None, + order: _OrderKACF = None, ) -> NDArray[Any]: ... @overload def asarray_chkfinite( a: Any, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> NDArray[_ScalarT]: ... @overload def asarray_chkfinite( a: Any, dtype: DTypeLike, - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> NDArray[Any]: ... @overload @@ -251,7 +251,7 @@ def piecewise( def select( condlist: Sequence[ArrayLike], choicelist: Sequence[ArrayLike], - default: ArrayLike = ..., + default: ArrayLike = 0, ) -> NDArray[Any]: ... @overload @@ -263,43 +263,43 @@ def copy( @overload def copy( a: _ArrayT, - order: _OrderKACF = ..., + order: _OrderKACF = "K", *, subok: L[True], ) -> _ArrayT: ... @overload def copy( a: _ArrayLike[_ScalarT], - order: _OrderKACF = ..., - subok: L[False] = ..., + order: _OrderKACF = "K", + subok: L[False] = False, ) -> NDArray[_ScalarT]: ... @overload def copy( a: ArrayLike, - order: _OrderKACF = ..., - subok: L[False] = ..., + order: _OrderKACF = "K", + subok: L[False] = False, ) -> NDArray[Any]: ... def gradient( f: ArrayLike, *varargs: ArrayLike, - axis: _ShapeLike | None = ..., - edge_order: L[1, 2] = ..., + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, ) -> Any: ... @overload def diff( a: _T, n: L[0], - axis: SupportsIndex = ..., + axis: SupportsIndex = -1, prepend: ArrayLike = ..., append: ArrayLike = ..., ) -> _T: ... @overload def diff( a: ArrayLike, - n: int = ..., - axis: SupportsIndex = ..., + n: int = 1, + axis: SupportsIndex = -1, prepend: ArrayLike = ..., append: ArrayLike = ..., ) -> NDArray[Any]: ... @@ -387,27 +387,27 @@ def interp( ) -> NDArray[complex128 | float64] | complex128 | float64: ... @overload -def angle(z: _ComplexLike_co, deg: bool = ...) -> floating: ... +def angle(z: _ComplexLike_co, deg: bool = False) -> floating: ... @overload -def angle(z: object_, deg: bool = ...) -> Any: ... +def angle(z: object_, deg: bool = False) -> Any: ... @overload -def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating]: ... +def angle(z: _ArrayLikeComplex_co, deg: bool = False) -> NDArray[floating]: ... @overload -def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ... +def angle(z: _ArrayLikeObject_co, deg: bool = False) -> NDArray[object_]: ... @overload def unwrap( p: _ArrayLikeFloat_co, - discont: float | None = ..., - axis: int = ..., + discont: float | None = None, + axis: int = -1, *, period: float = ..., ) -> NDArray[floating]: ... @overload def unwrap( p: _ArrayLikeObject_co, - discont: float | None = ..., - axis: int = ..., + discont: float | None = None, + axis: int = -1, *, period: float = ..., ) -> NDArray[object_]: ... @@ -416,7 +416,7 @@ def sort_complex(a: ArrayLike) -> NDArray[complexfloating]: ... def trim_zeros( filt: _TrimZerosSequence[_T], - trim: L["f", "b", "fb", "bf"] = ..., + trim: L["f", "b", "fb", "bf"] = "fb", ) -> _T: ... @overload @@ -429,48 +429,48 @@ def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ... @overload def cov( m: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co | None = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: SupportsIndex | SupportsInt | None = ..., - fweights: ArrayLike | None = ..., - aweights: ArrayLike | None = ..., + y: _ArrayLikeFloat_co | None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: ArrayLike | None = None, + aweights: ArrayLike | None = None, *, - dtype: None = ..., + dtype: None = None, ) -> NDArray[floating]: ... @overload def cov( m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: SupportsIndex | SupportsInt | None = ..., - fweights: ArrayLike | None = ..., - aweights: ArrayLike | None = ..., + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: ArrayLike | None = None, + aweights: ArrayLike | None = None, *, - dtype: None = ..., + dtype: None = None, ) -> NDArray[complexfloating]: ... @overload def cov( m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: SupportsIndex | SupportsInt | None = ..., - fweights: ArrayLike | None = ..., - aweights: ArrayLike | None = ..., + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: ArrayLike | None = None, + aweights: ArrayLike | None = None, *, dtype: _DTypeLike[_ScalarT], ) -> NDArray[_ScalarT]: ... @overload def cov( m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: SupportsIndex | SupportsInt | None = ..., - fweights: ArrayLike | None = ..., - aweights: ArrayLike | None = ..., + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: ArrayLike | None = None, + aweights: ArrayLike | None = None, *, dtype: DTypeLike, ) -> NDArray[Any]: ... @@ -544,59 +544,59 @@ def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload def median( a: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, ) -> floating: ... @overload def median( a: _ArrayLikeComplex_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, ) -> complexfloating: ... @overload def median( a: _ArrayLikeTD64_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, ) -> timedelta64: ... @overload def median( a: _ArrayLikeObject_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, ) -> Any: ... @overload def median( a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, ) -> Any: ... @overload def median( a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, axis: _ShapeLike | None, out: _ArrayT, - overwrite_input: bool = ..., - keepdims: bool = ..., + overwrite_input: bool = False, + keepdims: bool = False, ) -> _ArrayT: ... @overload def median( a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, - overwrite_input: bool = ..., - keepdims: bool = ..., + overwrite_input: bool = False, + keepdims: bool = False, ) -> _ArrayT: ... _MethodKind = L[ @@ -620,133 +620,133 @@ _MethodKind = L[ def percentile( a: _ArrayLikeFloat_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> floating: ... @overload def percentile( a: _ArrayLikeComplex_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> complexfloating: ... @overload def percentile( a: _ArrayLikeTD64_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> timedelta64: ... @overload def percentile( a: _ArrayLikeDT64_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> datetime64: ... @overload def percentile( a: _ArrayLikeObject_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def percentile( a: _ArrayLikeFloat_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[floating]: ... @overload def percentile( a: _ArrayLikeComplex_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[complexfloating]: ... @overload def percentile( a: _ArrayLikeTD64_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[timedelta64]: ... @overload def percentile( a: _ArrayLikeDT64_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[datetime64]: ... @overload def percentile( a: _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[object_]: ... @overload def percentile( a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: _ShapeLike | None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def percentile( @@ -754,23 +754,23 @@ def percentile( q: _ArrayLikeFloat_co, axis: _ShapeLike | None, out: _ArrayT, - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... @overload def percentile( a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., - weights: _ArrayLikeFloat_co | None = ..., + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, + weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... # NOTE: keep in sync with `percentile` @@ -778,133 +778,133 @@ def percentile( def quantile( a: _ArrayLikeFloat_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> floating: ... @overload def quantile( a: _ArrayLikeComplex_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> complexfloating: ... @overload def quantile( a: _ArrayLikeTD64_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> timedelta64: ... @overload def quantile( a: _ArrayLikeDT64_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> datetime64: ... @overload def quantile( a: _ArrayLikeObject_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def quantile( a: _ArrayLikeFloat_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[floating]: ... @overload def quantile( a: _ArrayLikeComplex_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[complexfloating]: ... @overload def quantile( a: _ArrayLikeTD64_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[timedelta64]: ... @overload def quantile( a: _ArrayLikeDT64_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[datetime64]: ... @overload def quantile( a: _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[object_]: ... @overload def quantile( a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: _ShapeLike | None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def quantile( @@ -912,23 +912,23 @@ def quantile( q: _ArrayLikeFloat_co, axis: _ShapeLike | None, out: _ArrayT, - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... @overload def quantile( a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., - weights: _ArrayLikeFloat_co | None = ..., + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, + weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... _ScalarT_fm = TypeVar( @@ -942,51 +942,51 @@ class _SupportsRMulFloat(Protocol[_T_co]): @overload def trapezoid( # type: ignore[overload-overlap] y: Sequence[_FloatLike_co], - x: Sequence[_FloatLike_co] | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: Sequence[_FloatLike_co] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> float64: ... @overload def trapezoid( y: Sequence[_ComplexLike_co], - x: Sequence[_ComplexLike_co] | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: Sequence[_ComplexLike_co] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> complex128: ... @overload def trapezoid( y: _ArrayLike[bool_ | integer], - x: _ArrayLike[bool_ | integer] | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: _ArrayLike[bool_ | integer] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> float64 | NDArray[float64]: ... @overload def trapezoid( # type: ignore[overload-overlap] y: _ArrayLikeObject_co, - x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> float | NDArray[object_]: ... @overload def trapezoid( y: _ArrayLike[_ScalarT_fm], - x: _ArrayLike[_ScalarT_fm] | _ArrayLikeInt_co | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: _ArrayLike[_ScalarT_fm] | _ArrayLikeInt_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> _ScalarT_fm | NDArray[_ScalarT_fm]: ... @overload def trapezoid( y: Sequence[_SupportsRMulFloat[_T]], - x: Sequence[_SupportsRMulFloat[_T] | _T] | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: Sequence[_SupportsRMulFloat[_T] | _T] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> _T: ... @overload def trapezoid( y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> ( floating | complexfloating | timedelta64 | NDArray[floating | complexfloating | timedelta64 | object_] @@ -998,27 +998,27 @@ def trapz(y: ArrayLike, x: ArrayLike | None = None, dx: float = 1.0, axis: int = @overload def meshgrid( *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[()]: ... @overload def meshgrid( x1: _ArrayLike[_ScalarT], /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[_ScalarT]]: ... @overload def meshgrid( x1: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[Any]]: ... @overload def meshgrid( @@ -1026,9 +1026,9 @@ def meshgrid( x2: _ArrayLike[_ScalarT2], /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... @overload def meshgrid( @@ -1036,9 +1036,9 @@ def meshgrid( x2: _ArrayLike[_ScalarT], /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[Any], NDArray[_ScalarT]]: ... @overload def meshgrid( @@ -1046,9 +1046,9 @@ def meshgrid( x2: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[_ScalarT], NDArray[Any]]: ... @overload def meshgrid( @@ -1056,9 +1056,9 @@ def meshgrid( x2: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[Any], NDArray[Any]]: ... @overload def meshgrid( @@ -1067,9 +1067,9 @@ def meshgrid( x3: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ... @overload def meshgrid( @@ -1079,29 +1079,29 @@ def meshgrid( x4: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any], NDArray[Any]]: ... @overload def meshgrid( *xi: ArrayLike, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[Any], ...]: ... @overload def delete( arr: _ArrayLike[_ScalarT], obj: slice | _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, ) -> NDArray[_ScalarT]: ... @overload def delete( arr: ArrayLike, obj: slice | _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, ) -> NDArray[Any]: ... @overload @@ -1109,31 +1109,31 @@ def insert( arr: _ArrayLike[_ScalarT], obj: slice | _ArrayLikeInt_co, values: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, ) -> NDArray[_ScalarT]: ... @overload def insert( arr: ArrayLike, obj: slice | _ArrayLikeInt_co, values: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, ) -> NDArray[Any]: ... def append( arr: ArrayLike, values: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, ) -> NDArray[Any]: ... @overload def digitize( x: _FloatLike_co, bins: _ArrayLikeFloat_co, - right: bool = ..., + right: bool = False, ) -> intp: ... @overload def digitize( x: _ArrayLikeFloat_co, bins: _ArrayLikeFloat_co, - right: bool = ..., + right: bool = False, ) -> NDArray[intp]: ... diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 06987dd71e6b..7cb70ef21508 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -18,23 +18,23 @@ _BinKind: TypeAlias = L[ def histogram_bin_edges( a: ArrayLike, - bins: _BinKind | SupportsIndex | ArrayLike = ..., - range: tuple[float, float] | None = ..., - weights: ArrayLike | None = ..., + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: tuple[float, float] | None = None, + weights: ArrayLike | None = None, ) -> NDArray[Any]: ... def histogram( a: ArrayLike, - bins: _BinKind | SupportsIndex | ArrayLike = ..., - range: tuple[float, float] | None = ..., - density: bool = ..., - weights: ArrayLike | None = ..., + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: tuple[float, float] | None = None, + density: bool = None, + weights: ArrayLike | None = None, ) -> tuple[NDArray[Any], NDArray[Any]]: ... def histogramdd( sample: ArrayLike, - bins: SupportsIndex | ArrayLike = ..., - range: Sequence[tuple[float, float]] = ..., - density: bool | None = ..., - weights: ArrayLike | None = ..., + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[tuple[float, float]] = None, + density: bool | None = None, + weights: ArrayLike | None = None, ) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ... diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 208a8e868b48..c9ee8a5b0bb7 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -187,10 +187,10 @@ def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ... def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ... # -def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = ...) -> None: ... +def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = False) -> None: ... # -def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[np.intp], ...]: ... +def diag_indices(n: int, ndim: int = 2) -> tuple[NDArray[np.intp], ...]: ... def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ... # diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 253fcb0fdf9e..18fce91382ab 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -205,88 +205,88 @@ def fromregex( def genfromtxt( fname: _FName, dtype: None = None, - comments: str = ..., - delimiter: str | int | Iterable[int] | None = ..., - skip_header: int = ..., - skip_footer: int = ..., - converters: Mapping[int | str, Callable[[str], Any]] | None = ..., - missing_values: Any = ..., - filling_values: Any = ..., - usecols: Sequence[int] | None = ..., - names: L[True] | str | Collection[str] | None = ..., - excludelist: Sequence[str] | None = ..., + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, deletechars: str = ..., - replace_space: str = ..., - autostrip: bool = ..., - case_sensitive: bool | L["upper", "lower"] = ..., - defaultfmt: str = ..., - unpack: bool | None = ..., - usemask: bool = ..., - loose: bool = ..., - invalid_raise: bool = ..., - max_rows: int | None = ..., - encoding: str = ..., + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, *, - ndmin: L[0, 1, 2] = ..., - like: _SupportsArrayFunc | None = ..., + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload def genfromtxt( fname: _FName, dtype: _DTypeLike[_ScalarT], - comments: str = ..., - delimiter: str | int | Iterable[int] | None = ..., - skip_header: int = ..., - skip_footer: int = ..., - converters: Mapping[int | str, Callable[[str], Any]] | None = ..., - missing_values: Any = ..., - filling_values: Any = ..., - usecols: Sequence[int] | None = ..., - names: L[True] | str | Collection[str] | None = ..., - excludelist: Sequence[str] | None = ..., + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, deletechars: str = ..., - replace_space: str = ..., - autostrip: bool = ..., - case_sensitive: bool | L["upper", "lower"] = ..., - defaultfmt: str = ..., - unpack: bool | None = ..., - usemask: bool = ..., - loose: bool = ..., - invalid_raise: bool = ..., - max_rows: int | None = ..., - encoding: str = ..., + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, *, - ndmin: L[0, 1, 2] = ..., - like: _SupportsArrayFunc | None = ..., + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def genfromtxt( fname: _FName, dtype: DTypeLike, - comments: str = ..., - delimiter: str | int | Iterable[int] | None = ..., - skip_header: int = ..., - skip_footer: int = ..., - converters: Mapping[int | str, Callable[[str], Any]] | None = ..., - missing_values: Any = ..., - filling_values: Any = ..., - usecols: Sequence[int] | None = ..., - names: L[True] | str | Collection[str] | None = ..., - excludelist: Sequence[str] | None = ..., + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, deletechars: str = ..., - replace_space: str = ..., - autostrip: bool = ..., - case_sensitive: bool | L["upper", "lower"] = ..., - defaultfmt: str = ..., - unpack: bool | None = ..., - usemask: bool = ..., - loose: bool = ..., - invalid_raise: bool = ..., - max_rows: int | None = ..., - encoding: str = ..., + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, *, - ndmin: L[0, 1, 2] = ..., - like: _SupportsArrayFunc | None = ..., + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index 3b0eade1399e..0cdab5f11b5c 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -67,47 +67,47 @@ def roots(p: ArrayLike) -> NDArray[complexfloating] | NDArray[floating]: ... @overload def polyint( p: poly1d, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = None, ) -> poly1d: ... @overload def polyint( p: _ArrayLikeFloat_co, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeFloat_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeFloat_co | None = None, ) -> NDArray[floating]: ... @overload def polyint( p: _ArrayLikeComplex_co, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeComplex_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | None = None, ) -> NDArray[complexfloating]: ... @overload def polyint( p: _ArrayLikeObject_co, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeObject_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeObject_co | None = None, ) -> NDArray[object_]: ... @overload def polyder( p: poly1d, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> poly1d: ... @overload def polyder( p: _ArrayLikeFloat_co, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> NDArray[floating]: ... @overload def polyder( p: _ArrayLikeComplex_co, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> NDArray[complexfloating]: ... @overload def polyder( p: _ArrayLikeObject_co, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> NDArray[object_]: ... @overload @@ -115,40 +115,40 @@ def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[False] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: L[False] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False, ) -> NDArray[float64]: ... @overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[False] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: L[False] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False, ) -> NDArray[complex128]: ... @overload def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[False] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: L[True, "unscaled"] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[True, "unscaled"] = False, ) -> _2Tup[NDArray[float64]]: ... @overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[False] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: L[True, "unscaled"] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[True, "unscaled"] = False, ) -> _2Tup[NDArray[complex128]]: ... @overload def polyfit( @@ -157,19 +157,19 @@ def polyfit( deg: SupportsIndex | SupportsInt, rcond: float | None, full: L[True], - w: _ArrayLikeFloat_co | None = ..., - cov: bool | L["unscaled"] = ..., + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, ) -> _5Tup[NDArray[float64]]: ... @overload def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., + rcond: float | None = None, *, full: L[True], - w: _ArrayLikeFloat_co | None = ..., - cov: bool | L["unscaled"] = ..., + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, ) -> _5Tup[NDArray[float64]]: ... @overload def polyfit( @@ -178,19 +178,19 @@ def polyfit( deg: SupportsIndex | SupportsInt, rcond: float | None, full: L[True], - w: _ArrayLikeFloat_co | None = ..., - cov: bool | L["unscaled"] = ..., + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, ) -> _5Tup[NDArray[complex128]]: ... @overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., + rcond: float | None = None, *, full: L[True], - w: _ArrayLikeFloat_co | None = ..., - cov: bool | L["unscaled"] = ..., + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, ) -> _5Tup[NDArray[complex128]]: ... @overload diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index 0206d95109fa..f8ba31d0f774 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -79,7 +79,7 @@ class _SupportsArrayWrap(Protocol): def take_along_axis( arr: _ScalarT | NDArray[_ScalarT], indices: NDArray[integer], - axis: int | None = ..., + axis: int | None = -1, ) -> NDArray[_ScalarT]: ... def put_along_axis( @@ -147,26 +147,26 @@ def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... def array_split( ary: _ArrayLike[_ScalarT], indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> list[NDArray[_ScalarT]]: ... @overload def array_split( ary: ArrayLike, indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> list[NDArray[Any]]: ... @overload def split( ary: _ArrayLike[_ScalarT], indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> list[NDArray[_ScalarT]]: ... @overload def split( ary: ArrayLike, indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> list[NDArray[Any]]: ... @overload diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index a7005d702d96..51d377e06a18 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -20,55 +20,55 @@ class DummyArray: @overload def as_strided( x: _ArrayLike[_ScalarT], - shape: Iterable[int] | None = ..., - strides: Iterable[int] | None = ..., - subok: bool = ..., - writeable: bool = ..., + shape: Iterable[int] | None = None, + strides: Iterable[int] | None = None, + subok: bool = False, + writeable: bool = True, ) -> NDArray[_ScalarT]: ... @overload def as_strided( x: ArrayLike, - shape: Iterable[int] | None = ..., - strides: Iterable[int] | None = ..., - subok: bool = ..., - writeable: bool = ..., + shape: Iterable[int] | None = None, + strides: Iterable[int] | None = None, + subok: bool = False, + writeable: bool = True, ) -> NDArray[Any]: ... @overload def sliding_window_view( x: _ArrayLike[_ScalarT], window_shape: int | Iterable[int], - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - subok: bool = ..., - writeable: bool = ..., + subok: bool = False, + writeable: bool = False, ) -> NDArray[_ScalarT]: ... @overload def sliding_window_view( x: ArrayLike, window_shape: int | Iterable[int], - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - subok: bool = ..., - writeable: bool = ..., + subok: bool = False, + writeable: bool = False, ) -> NDArray[Any]: ... @overload def broadcast_to( array: _ArrayLike[_ScalarT], shape: int | Iterable[int], - subok: bool = ..., + subok: bool = False, ) -> NDArray[_ScalarT]: ... @overload def broadcast_to( array: ArrayLike, shape: int | Iterable[int], - subok: bool = ..., + subok: bool = False, ) -> NDArray[Any]: ... def broadcast_shapes(*args: _ShapeLike) -> _AnyShape: ... def broadcast_arrays( *args: ArrayLike, - subok: bool = ..., + subok: bool = False, ) -> tuple[NDArray[Any], ...]: ... diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 9e70d0a617f6..2c4de938444f 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -84,13 +84,13 @@ def flipud(m: ArrayLike) -> NDArray[Any]: ... @overload def eye( N: int, - M: int | None = ..., - k: int = ..., - dtype: None = ..., - order: _OrderCF = ..., + M: int | None = None, + k: int = 0, + dtype: None = ..., # = float + order: _OrderCF = "C", *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[float64]: ... @overload def eye( @@ -98,52 +98,52 @@ def eye( M: int | None, k: int, dtype: _DTypeLike[_ScalarT], - order: _OrderCF = ..., + order: _OrderCF = "C", *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def eye( N: int, - M: int | None = ..., - k: int = ..., + M: int | None = None, + k: int = 0, *, dtype: _DTypeLike[_ScalarT], - order: _OrderCF = ..., - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., + order: _OrderCF = "C", + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def eye( N: int, - M: int | None = ..., - k: int = ..., - dtype: DTypeLike = ..., - order: _OrderCF = ..., + M: int | None = None, + k: int = 0, + dtype: DTypeLike = ..., # = float + order: _OrderCF = "C", *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload -def diag(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ... +def diag(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload -def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def diag(v: ArrayLike, k: int = 0) -> NDArray[Any]: ... @overload -def diagflat(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ... +def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload -def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def diagflat(v: ArrayLike, k: int = 0) -> NDArray[Any]: ... @overload def tri( N: int, - M: int | None = ..., - k: int = ..., - dtype: None = ..., + M: int | None = None, + k: int = 0, + dtype: None = ..., # = float *, - like: _SupportsArrayFunc | None = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[float64]: ... @overload def tri( @@ -152,25 +152,25 @@ def tri( k: int, dtype: _DTypeLike[_ScalarT], *, - like: _SupportsArrayFunc | None = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[_ScalarT]: ... @overload def tri( N: int, - M: int | None = ..., - k: int = ..., + M: int | None = None, + k: int = 0, *, dtype: _DTypeLike[_ScalarT], - like: _SupportsArrayFunc | None = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[_ScalarT]: ... @overload def tri( N: int, - M: int | None = ..., - k: int = ..., - dtype: DTypeLike = ..., + M: int | None = None, + k: int = 0, + dtype: DTypeLike = ..., # = float *, - like: _SupportsArrayFunc | None = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... @overload @@ -186,36 +186,36 @@ def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... @overload def vander( # type: ignore[misc] x: _ArrayLikeInt_co, - N: int | None = ..., - increasing: bool = ..., + N: int | None = None, + increasing: bool = False, ) -> NDArray[signedinteger]: ... @overload def vander( # type: ignore[misc] x: _ArrayLikeFloat_co, - N: int | None = ..., - increasing: bool = ..., + N: int | None = None, + increasing: bool = False, ) -> NDArray[floating]: ... @overload def vander( x: _ArrayLikeComplex_co, - N: int | None = ..., - increasing: bool = ..., + N: int | None = None, + increasing: bool = False, ) -> NDArray[complexfloating]: ... @overload def vander( x: _ArrayLikeObject_co, - N: int | None = ..., - increasing: bool = ..., + N: int | None = None, + increasing: bool = False, ) -> NDArray[object_]: ... @overload def histogram2d( x: _ArrayLike1D[_ComplexFloatingT], y: _ArrayLike1D[_ComplexFloatingT | _Float_co], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_ComplexFloatingT], @@ -225,10 +225,10 @@ def histogram2d( def histogram2d( x: _ArrayLike1D[_ComplexFloatingT | _Float_co], y: _ArrayLike1D[_ComplexFloatingT], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_ComplexFloatingT], @@ -238,10 +238,10 @@ def histogram2d( def histogram2d( x: _ArrayLike1D[_InexactT], y: _ArrayLike1D[_InexactT | _Int_co], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_InexactT], @@ -251,10 +251,10 @@ def histogram2d( def histogram2d( x: _ArrayLike1D[_InexactT | _Int_co], y: _ArrayLike1D[_InexactT], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_InexactT], @@ -264,10 +264,10 @@ def histogram2d( def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], y: _ArrayLike1DInt_co | Sequence[float], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[float64], @@ -277,10 +277,10 @@ def histogram2d( def histogram2d( x: Sequence[complex], y: Sequence[complex], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[complex128 | float64], @@ -291,9 +291,9 @@ def histogram2d( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, bins: _ArrayLike1D[_NumberCoT] | Sequence[_ArrayLike1D[_NumberCoT]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_NumberCoT], @@ -304,9 +304,9 @@ def histogram2d( x: _ArrayLike1D[_InexactT], y: _ArrayLike1D[_InexactT], bins: Sequence[_ArrayLike1D[_NumberCoT] | int], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_NumberCoT | _InexactT], @@ -317,9 +317,9 @@ def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], y: _ArrayLike1DInt_co | Sequence[float], bins: Sequence[_ArrayLike1D[_NumberCoT] | int], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_NumberCoT | float64], @@ -330,9 +330,9 @@ def histogram2d( x: Sequence[complex], y: Sequence[complex], bins: Sequence[_ArrayLike1D[_NumberCoT] | int], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_NumberCoT | complex128 | float64], @@ -343,9 +343,9 @@ def histogram2d( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, bins: Sequence[Sequence[bool]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[np.bool], @@ -356,9 +356,9 @@ def histogram2d( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, bins: Sequence[Sequence[int]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[np.int_ | np.bool], @@ -369,9 +369,9 @@ def histogram2d( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, bins: Sequence[Sequence[float]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[np.float64 | np.int_ | np.bool], @@ -382,9 +382,9 @@ def histogram2d( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, bins: Sequence[Sequence[complex]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[np.complex128 | np.float64 | np.int_ | np.bool], @@ -398,7 +398,7 @@ def histogram2d( def mask_indices( n: int, mask_func: _MaskFunc[int], - k: int = ..., + k: int = 0, ) -> tuple[NDArray[intp], NDArray[intp]]: ... @overload def mask_indices( @@ -409,22 +409,22 @@ def mask_indices( def tril_indices( n: int, - k: int = ..., - m: int | None = ..., + k: int = 0, + m: int | None = None, ) -> tuple[NDArray[int_], NDArray[int_]]: ... def tril_indices_from( arr: NDArray[Any], - k: int = ..., + k: int = 0, ) -> tuple[NDArray[int_], NDArray[int_]]: ... def triu_indices( n: int, - k: int = ..., - m: int | None = ..., + k: int = 0, + m: int | None = None, ) -> tuple[NDArray[int_], NDArray[int_]]: ... def triu_indices_from( arr: NDArray[Any], - k: int = ..., + k: int = 0, ) -> tuple[NDArray[int_], NDArray[int_]]: ... diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index a673f05c010d..d5c3151fb892 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -16,17 +16,17 @@ _ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) @overload def fix( # type: ignore[misc] x: _FloatLike_co, - out: None = ..., + out: None = None, ) -> floating: ... @overload def fix( x: _ArrayLikeFloat_co, - out: None = ..., + out: None = None, ) -> NDArray[floating]: ... @overload def fix( x: _ArrayLikeObject_co, - out: None = ..., + out: None = None, ) -> NDArray[object_]: ... @overload def fix( @@ -37,12 +37,12 @@ def fix( @overload def isposinf( # type: ignore[misc] x: _FloatLike_co, - out: None = ..., + out: None = None, ) -> np.bool: ... @overload def isposinf( x: _ArrayLikeFloat_co, - out: None = ..., + out: None = None, ) -> NDArray[np.bool]: ... @overload def isposinf( @@ -53,12 +53,12 @@ def isposinf( @overload def isneginf( # type: ignore[misc] x: _FloatLike_co, - out: None = ..., + out: None = None, ) -> np.bool: ... @overload def isneginf( x: _ArrayLikeFloat_co, - out: None = ..., + out: None = None, ) -> NDArray[np.bool]: ... @overload def isneginf( From 71eebaf8513dfd6ed40b0c689702d81e3f6f2527 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Fri, 22 Aug 2025 15:49:05 +0200 Subject: [PATCH 0343/1018] BUG: make round consistently return a copy (#29137) * BUG: make round consistently return a copy Otherwise, `round` returns a view for integer arguments and a copy otherwise. All other "rounding" functions (ceil, floor, trunc, rint), always return copies. Thus, make `round` consistent with them. * DOC: add a release note snippet for the `round` change * TST: add a test for round preserving the order * MAINT: address review comments * BUG: round: NPY_KEEPORDER for complex arrays, too --- .../upcoming_changes/29137.compatibility.rst | 3 +++ numpy/_core/src/multiarray/calculation.c | 5 ++--- numpy/_core/tests/test_multiarray.py | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 3 deletions(-) create mode 100644 doc/release/upcoming_changes/29137.compatibility.rst diff --git a/doc/release/upcoming_changes/29137.compatibility.rst b/doc/release/upcoming_changes/29137.compatibility.rst new file mode 100644 index 000000000000..3ac9da2a4c48 --- /dev/null +++ b/doc/release/upcoming_changes/29137.compatibility.rst @@ -0,0 +1,3 @@ +* `numpy.round` now always returns a copy. Previously, it returned a view + for integer inputs for ``decimals >= 0`` and a copy in all other cases. + This change brings ``round`` in line with ``ceil``, ``floor`` and ``trunc``. diff --git a/numpy/_core/src/multiarray/calculation.c b/numpy/_core/src/multiarray/calculation.c index 87f03a94fa5f..0d855281d57a 100644 --- a/numpy/_core/src/multiarray/calculation.c +++ b/numpy/_core/src/multiarray/calculation.c @@ -576,7 +576,7 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) Py_INCREF(arr); } else { - arr = PyArray_Copy(a); + arr = PyArray_NewCopy(a, NPY_KEEPORDER); if (arr == NULL) { return NULL; } @@ -637,8 +637,7 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) return (PyObject *)out; } else { - Py_INCREF(a); - return (PyObject *)a; + return PyArray_NewCopy(a, NPY_KEEPORDER); } } if (decimals == 0) { diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index da4eeb91cfc2..d5aac78c4a4d 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -2153,6 +2153,7 @@ def check_round(arr, expected, *round_args): assert_equal(out, expected) assert out is res + check_round(np.array([1, 2, 3]), [1, 2, 3]) check_round(np.array([1.2, 1.5]), [1, 2]) check_round(np.array(1.5), 2) check_round(np.array([12.2, 15.5]), [10, 20], -1) @@ -2161,6 +2162,20 @@ def check_round(arr, expected, *round_args): check_round(np.array([4.5 + 1.5j]), [4 + 2j]) check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) + @pytest.mark.parametrize('dt', ['uint8', int, float, complex]) + def test_round_copies(self, dt): + a = np.arange(3, dtype=dt) + assert not np.shares_memory(a.round(), a) + assert not np.shares_memory(a.round(decimals=2), a) + + out = np.empty(3, dtype=dt) + assert not np.shares_memory(a.round(out=out), a) + + a = np.arange(12).astype(dt).reshape(3, 4).T + + assert a.flags.f_contiguous + assert np.round(a).flags.f_contiguous + def test_squeeze(self): a = np.array([[[1], [2], [3]]]) assert_equal(a.squeeze(), [1, 2, 3]) From 0034f453d126bd7e29ac0eb055013cead6ebb537 Mon Sep 17 00:00:00 2001 From: Mohit Deoli <141480197+devmt04@users.noreply.github.com> Date: Fri, 22 Aug 2025 20:39:33 +0530 Subject: [PATCH 0344/1018] Update doc/source/reference/arrays.datetime.rst Co-authored-by: Joren Hammudoglu --- doc/source/reference/arrays.datetime.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 59e11fa91f5a..895d725c1f28 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -350,7 +350,7 @@ The protocol is described in the following table: -In the casea where conversion of `datetime64` and/or `timedelta64` is done against Python types like ``int``, ``float``, and ``str`` the corresponding return types will be ``np.str_``, ``np.int64`` and ``np.float64``. +In the case where conversion of `datetime64` and/or `timedelta64` is done against Python types like ``int``, ``float``, and ``str`` the corresponding return types will be ``np.str_``, ``np.int64`` and ``np.float64``. .. admonition:: Example From f5234ec0f62850b0944ef1429e7ef93e3f557c01 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Fri, 22 Aug 2025 21:10:25 +0530 Subject: [PATCH 0345/1018] DOC: Update doc/source/reference/arrays.datetime.rst --- doc/source/reference/arrays.datetime.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 895d725c1f28..2235f92e0bab 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -324,8 +324,8 @@ The protocol is described in the following table: ns/ps/fs/as ``int`` ``int`` μs/ms/s/m/h `datetime.datetime` `datetime.timedelta` D/W (Linear units) `datetime.date` `datetime.timedelta` - Y/M (Non-linear units) `datetime.date` ``int`` - Generic units `datetime.date` ``int`` + Y/M (Non-linear units) `datetime.date` ``int`` + Generic units `datetime.date` ``int`` ================================ ================================= ================================== .. admonition:: Example From 1aa051052ca0d8b4d3350d19a0d99a8091454ec3 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Fri, 22 Aug 2025 12:05:07 -0400 Subject: [PATCH 0346/1018] TST: Replace xunit setup with methods (#29605) --- numpy/_core/tests/test_half.py | 98 ++++++++++++++------------ numpy/_core/tests/test_indexing.py | 54 +++++++------- numpy/_core/tests/test_numerictypes.py | 9 ++- numpy/_core/tests/test_records.py | 18 ++--- 4 files changed, 97 insertions(+), 82 deletions(-) diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index 711c13655b7a..ef13c5fbcde6 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -18,56 +18,63 @@ def assert_raises_fpe(strmatch, callable, *args, **kwargs): f"Did not raise floating point {strmatch} error") class TestHalf: - def setup_method(self): + def _create_arrays_all(self): # An array of all possible float16 values - self.all_f16 = np.arange(0x10000, dtype=uint16) - self.all_f16 = self.all_f16.view(float16) + all_f16 = np.arange(0x10000, dtype=uint16) + all_f16 = all_f16.view(float16) # NaN value can cause an invalid FP exception if HW is being used with np.errstate(invalid='ignore'): - self.all_f32 = np.array(self.all_f16, dtype=float32) - self.all_f64 = np.array(self.all_f16, dtype=float64) + all_f32 = np.array(all_f16, dtype=float32) + all_f64 = np.array(all_f16, dtype=float64) + return all_f16, all_f32, all_f64 + def _create_arrays_nonan(self): # An array of all non-NaN float16 values, in sorted order - self.nonan_f16 = np.concatenate( + nonan_f16 = np.concatenate( (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), np.arange(0x0000, 0x7c01, 1, dtype=uint16))) - self.nonan_f16 = self.nonan_f16.view(float16) - self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) - self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) - - # An array of all finite float16 values, in sorted order - self.finite_f16 = self.nonan_f16[1:-1] - self.finite_f32 = self.nonan_f32[1:-1] - self.finite_f64 = self.nonan_f64[1:-1] + nonan_f16 = nonan_f16.view(float16) + nonan_f32 = np.array(nonan_f16, dtype=float32) + nonan_f64 = np.array(nonan_f16, dtype=float64) + return nonan_f16, nonan_f32, nonan_f64 + + def _create_arrays_finite(self): + nonan_f16, nonan_f32, nonan_f64 = self._create_arrays_nonan() + finite_f16 = nonan_f16[1:-1] + finite_f32 = nonan_f32[1:-1] + finite_f64 = nonan_f64[1:-1] + return finite_f16, finite_f32, finite_f64 def test_half_conversions(self): """Checks that all 16-bit values survive conversion to/from 32-bit and 64-bit float""" # Because the underlying routines preserve the NaN bits, every # value is preserved when converting to/from other floats. + all_f16, all_f32, all_f64 = self._create_arrays_all() + nonan_f16, _, _ = self._create_arrays_nonan() # Convert from float32 back to float16 with np.errstate(invalid='ignore'): - b = np.array(self.all_f32, dtype=float16) + b = np.array(all_f32, dtype=float16) # avoid testing NaNs due to differing bit patterns in Q/S NaNs b_nn = b == b - assert_equal(self.all_f16[b_nn].view(dtype=uint16), + assert_equal(all_f16[b_nn].view(dtype=uint16), b[b_nn].view(dtype=uint16)) # Convert from float64 back to float16 with np.errstate(invalid='ignore'): - b = np.array(self.all_f64, dtype=float16) + b = np.array(all_f64, dtype=float16) b_nn = b == b - assert_equal(self.all_f16[b_nn].view(dtype=uint16), + assert_equal(all_f16[b_nn].view(dtype=uint16), b[b_nn].view(dtype=uint16)) # Convert float16 to longdouble and back # This doesn't necessarily preserve the extra NaN bits, # so exclude NaNs. - a_ld = np.array(self.nonan_f16, dtype=np.longdouble) + a_ld = np.array(nonan_f16, dtype=np.longdouble) b = np.array(a_ld, dtype=float16) - assert_equal(self.nonan_f16.view(dtype=uint16), + assert_equal(nonan_f16.view(dtype=uint16), b.view(dtype=uint16)) # Check the range for which all integers can be represented @@ -171,34 +178,35 @@ def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits): assert larger_value.astype(np.float16) == smallest_value def test_nans_infs(self): + all_f16, all_f32, _ = self._create_arrays_all() with np.errstate(all='ignore'): # Check some of the ufuncs - assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) - assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) - assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) - assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) + assert_equal(np.isnan(all_f16), np.isnan(all_f32)) + assert_equal(np.isinf(all_f16), np.isinf(all_f32)) + assert_equal(np.isfinite(all_f16), np.isfinite(all_f32)) + assert_equal(np.signbit(all_f16), np.signbit(all_f32)) assert_equal(np.spacing(float16(65504)), np.inf) # Check comparisons of all values with NaN nan = float16(np.nan) - assert_(not (self.all_f16 == nan).any()) - assert_(not (nan == self.all_f16).any()) + assert_(not (all_f16 == nan).any()) + assert_(not (nan == all_f16).any()) - assert_((self.all_f16 != nan).all()) - assert_((nan != self.all_f16).all()) + assert_((all_f16 != nan).all()) + assert_((nan != all_f16).all()) - assert_(not (self.all_f16 < nan).any()) - assert_(not (nan < self.all_f16).any()) + assert_(not (all_f16 < nan).any()) + assert_(not (nan < all_f16).any()) - assert_(not (self.all_f16 <= nan).any()) - assert_(not (nan <= self.all_f16).any()) + assert_(not (all_f16 <= nan).any()) + assert_(not (nan <= all_f16).any()) - assert_(not (self.all_f16 > nan).any()) - assert_(not (nan > self.all_f16).any()) + assert_(not (all_f16 > nan).any()) + assert_(not (nan > all_f16).any()) - assert_(not (self.all_f16 >= nan).any()) - assert_(not (nan >= self.all_f16).any()) + assert_(not (all_f16 >= nan).any()) + assert_(not (nan >= all_f16).any()) def test_half_values(self): """Confirms a small number of known half values""" @@ -255,9 +263,10 @@ def test_half_rounding(self): def test_half_correctness(self): """Take every finite float16, and check the casting functions with a manual conversion.""" + finite_f16, finite_f32, finite_f64 = self._create_arrays_finite() # Create an array of all finite float16s - a_bits = self.finite_f16.view(dtype=uint16) + a_bits = finite_f16.view(dtype=uint16) # Convert to 64-bit float manually a_sgn = (-1.0)**((a_bits & 0x8000) >> 15) @@ -270,29 +279,30 @@ def test_half_correctness(self): a_manual = a_sgn * a_man * 2.0**a_exp - a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] + a32_fail = np.nonzero(finite_f32 != a_manual)[0] if len(a32_fail) != 0: bad_index = a32_fail[0] - assert_equal(self.finite_f32, a_manual, + assert_equal(finite_f32, a_manual, "First non-equal is half value 0x%x -> %g != %g" % (a_bits[bad_index], - self.finite_f32[bad_index], + finite_f32[bad_index], a_manual[bad_index])) - a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] + a64_fail = np.nonzero(finite_f64 != a_manual)[0] if len(a64_fail) != 0: bad_index = a64_fail[0] - assert_equal(self.finite_f64, a_manual, + assert_equal(finite_f64, a_manual, "First non-equal is half value 0x%x -> %g != %g" % (a_bits[bad_index], - self.finite_f64[bad_index], + finite_f64[bad_index], a_manual[bad_index])) def test_half_ordering(self): """Make sure comparisons are working right""" + nonan_f16, _, _ = self._create_arrays_nonan() # All non-NaN float16 values in reverse order - a = self.nonan_f16[::-1].copy() + a = nonan_f16[::-1].copy() # 32-bit float copy b = np.array(a, dtype=float32) diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index c65468ebd24a..b4bb53fa71e1 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -776,6 +776,7 @@ def test_boolean_index_cast_assign(self): zero_array.__setitem__, bool_index, np.array([1j])) assert_equal(zero_array[0, 1], 0) + class TestFancyIndexingEquivalence: def test_object_assign(self): # Check that the field and object special case using copyto is active. @@ -846,10 +847,11 @@ class TestMultiIndexingAutomated: """ - def setup_method(self): - self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) - self.b = np.empty((3, 0, 5, 6)) - self.complex_indices = ['skip', Ellipsis, + def _create_array(self): + return np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) + + def _create_complex_indices(self): + return ['skip', Ellipsis, 0, # Boolean indices, up to 3-d for some special cases of eating up # dimensions, also need to test all False @@ -869,11 +871,6 @@ def setup_method(self): np.array([2, -1], dtype=np.int8), np.zeros([1] * 31, dtype=int), # trigger too large array. np.array([0., 1.])] # invalid datatype - # Some simpler indices that still cover a bit more - self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), - 'skip'] - # Very simple ones to fill the rest: - self.fill_indices = [slice(None, None), 0] def _get_multi_index(self, arr, indices): """Mimic multi dimensional indexing. @@ -1206,16 +1203,23 @@ def test_boolean(self): # it is aligned to the left. This is probably correct for # consistency with arr[boolean_array,] also no broadcasting # is done at all + a = self._create_array() self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool),)) + a, (np.zeros_like(a, dtype=bool),)) self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) + a, (np.zeros_like(a, dtype=bool)[..., 0],)) self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) + a, (np.zeros_like(a, dtype=bool)[None, ...],)) def test_multidim(self): # Automatically test combinations with complex indexes on 2nd (or 1st) # spot and the simple ones in one other spot. + a = self._create_array() + b = np.empty((3, 0, 5, 6)) + complex_indices = self._create_complex_indices() + simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip'] + fill_indices = [slice(None, None), 0] + with warnings.catch_warnings(): # This is so that np.array(True) is not accepted in a full integer # index, when running the file separately. @@ -1226,28 +1230,30 @@ def isskip(idx): return isinstance(idx, str) and idx == "skip" for simple_pos in [0, 2, 3]: - tocheck = [self.fill_indices, self.complex_indices, - self.fill_indices, self.fill_indices] - tocheck[simple_pos] = self.simple_indices + tocheck = [fill_indices, complex_indices, + fill_indices, fill_indices] + tocheck[simple_pos] = simple_indices for index in product(*tocheck): index = tuple(i for i in index if not isskip(i)) - self._check_multi_index(self.a, index) - self._check_multi_index(self.b, index) + self._check_multi_index(a, index) + self._check_multi_index(b, index) # Check very simple item getting: - self._check_multi_index(self.a, (0, 0, 0, 0)) - self._check_multi_index(self.b, (0, 0, 0, 0)) + self._check_multi_index(a, (0, 0, 0, 0)) + self._check_multi_index(b, (0, 0, 0, 0)) # Also check (simple cases of) too many indices: - assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) - assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) + assert_raises(IndexError, a.__getitem__, (0, 0, 0, 0, 0)) + assert_raises(IndexError, a.__setitem__, (0, 0, 0, 0, 0), 0) + assert_raises(IndexError, a.__getitem__, (0, 0, [1], 0, 0)) + assert_raises(IndexError, a.__setitem__, (0, 0, [1], 0, 0), 0) def test_1d(self): a = np.arange(10) - for index in self.complex_indices: + complex_indices = self._create_complex_indices() + for index in complex_indices: self._check_single_index(a, index) + class TestFloatNonIntegerArgument: """ These test that ``TypeError`` is raised when you try to use diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index c9a2ac06472c..c6ecd5327850 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -347,17 +347,16 @@ def test_assign(self): class TestMultipleFields: - def setup_method(self): - self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') - def _bad_call(self): - return self.ary['f0', 'f1'] + ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + return ary['f0', 'f1'] def test_no_tuple(self): assert_raises(IndexError, self._bad_call) def test_return(self): - res = self.ary[['f0', 'f2']].tolist() + ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + res = ary[['f0', 'f2']].tolist() assert_(res == [(1, 3), (5, 7)]) diff --git a/numpy/_core/tests/test_records.py b/numpy/_core/tests/test_records.py index b4b93aee4026..7ed6ea7687ff 100644 --- a/numpy/_core/tests/test_records.py +++ b/numpy/_core/tests/test_records.py @@ -359,26 +359,26 @@ def test_tofile_fromfile(self): class TestRecord: - def setup_method(self): - self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)], + def _create_data(self): + return np.rec.fromrecords([(1, 2, 3), (4, 5, 6)], dtype=[("col1", " Date: Fri, 22 Aug 2025 17:23:32 +0000 Subject: [PATCH 0347/1018] MAINT: Bump github/codeql-action from 3.29.10 to 3.29.11 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.10 to 3.29.11. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/96f518a34f7a870018057716cc4d7a5c014bd61c...3c3833e0f8c1c83d449a7478aa59c036a9165498) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.11 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index dce656f39b37..68aead6651e6 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@96f518a34f7a870018057716cc4d7a5c014bd61c # v3.29.10 + uses: github/codeql-action/init@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@96f518a34f7a870018057716cc4d7a5c014bd61c # v3.29.10 + uses: github/codeql-action/autobuild@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@96f518a34f7a870018057716cc4d7a5c014bd61c # v3.29.10 + uses: github/codeql-action/analyze@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index cecb8d3eef4a..d61c6f1dd815 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@96f518a34f7a870018057716cc4d7a5c014bd61c # v2.1.27 + uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v2.1.27 with: sarif_file: results.sarif From 0caeebfbb9b9195da81d8afa90f9881662c2dccf Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Fri, 22 Aug 2025 21:10:49 -0700 Subject: [PATCH 0348/1018] DOC: Bold font and backquote don't mix together --- doc/source/building/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/building/index.rst b/doc/source/building/index.rst index d7baeaee9324..d027ecb0ee8f 100644 --- a/doc/source/building/index.rst +++ b/doc/source/building/index.rst @@ -473,7 +473,7 @@ interface is self-documenting, so please see ``spin --help`` and install"). Editable installs are supported. It is important to understand that **you - may use either an editable install or ``spin`` in a given repository clone, + may use either an editable install or** ``spin`` **in a given repository clone, but not both**. If you use editable installs, you have to use ``pytest`` and other development tools directly instead of using ``spin``. From 0eda83e198d61517b2e07df29d4e55e1c5048fb9 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Fri, 22 Aug 2025 21:13:36 -0700 Subject: [PATCH 0349/1018] DOC: Correct typos in symbol formatting --- doc/source/user/numpy-for-matlab-users.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index 9e8093b20f02..e05e123e224c 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -674,10 +674,10 @@ are only a handful of key differences between the two. - Operators ``*`` and ``@``, functions ``dot()``, and ``multiply()``: - - For ``array``, **``*`` means element-wise multiplication**, while - **``@`` means matrix multiplication**; they have associated functions + - For ``array``, ``*`` **means element-wise multiplication**, while + ``@`` **means matrix multiplication**; they have associated functions ``multiply()`` and ``dot()``. - - For ``matrix``, **``*`` means matrix multiplication**, and for + - For ``matrix``, ``*`` **means matrix multiplication**, and for element-wise multiplication one has to use the ``multiply()`` function. - Handling of vectors (one-dimensional arrays) From 6f7297004e29ec119d2b642a76018e069bc115c4 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Fri, 22 Aug 2025 21:15:58 -0700 Subject: [PATCH 0350/1018] DOC: Correct a bold font typo [skip azp] [skip cirrus] [skip actions] --- doc/source/f2py/buildtools/distutils-to-meson.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index 32f2b599f6e5..585bfba57246 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -10,7 +10,7 @@ collects common workflows in both formats. .. note:: - This is a ****living**** document, `pull requests `_ are very welcome! + This is a **living** document, `pull requests `_ are very welcome! 1.1 Baseline ~~~~~~~~~~~~ From ddb94ee4897166e244ef098aafcbade20858c9a2 Mon Sep 17 00:00:00 2001 From: Richard Smythe Date: Sat, 23 Aug 2025 09:22:02 +0100 Subject: [PATCH 0351/1018] MAINT: fix typo in cmds.py --- .spin/cmds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 66885de630e0..9490aca297d9 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -455,7 +455,7 @@ def bench(ctx, tests, compare, verbose, quick, commits, build_dir): ] + bench_args _run_asv(cmd) else: - # Ensure that we don't have uncommited changes + # Ensure that we don't have uncommitted changes commit_a, commit_b = [_commit_to_sha(c) for c in commits] if commit_b == 'HEAD' and _dirty_git_working_dir(): From 7f9339b1bcb216b4797c98f802cc8ff05d5640e8 Mon Sep 17 00:00:00 2001 From: zuhu2195 <56065368+zuhu2195@users.noreply.github.com> Date: Sat, 23 Aug 2025 16:45:45 +0530 Subject: [PATCH 0352/1018] MAINT: Eliminate ambiguous order of evaluation in the ratios of random variates. (#29598) In C, in an expression such as f1()/f2(), the order of evaluation of f1() and f2() is unspecified. When such a ratio was used in the C functions random_f and random_standard_cauchy, it meant the streams of random variates for the f and standard_cauchy distributions depended on how the compiler chose to order the evaluation. This could result in different streams of variates when numpy is compiled with different compilers. By evaluating the numerator and denominator in separate statements, the ambiguity is eliminated. Co-authored-by: Joren Hammudoglu --- numpy/random/src/distributions/distributions.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 5ff694b2cde9..2b7c69481064 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -468,12 +468,15 @@ double random_chisquare(bitgen_t *bitgen_state, double df) { } double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) { - return ((random_chisquare(bitgen_state, dfnum) * dfden) / - (random_chisquare(bitgen_state, dfden) * dfnum)); + double subexpr1 = random_chisquare(bitgen_state, dfnum) * dfden; + double subexpr2 = random_chisquare(bitgen_state, dfden) * dfnum; + return subexpr1 / subexpr2; } double random_standard_cauchy(bitgen_t *bitgen_state) { - return random_standard_normal(bitgen_state) / random_standard_normal(bitgen_state); + double subexpr1 = random_standard_normal(bitgen_state); + double subexpr2 = random_standard_normal(bitgen_state); + return subexpr1 / subexpr2; } double random_pareto(bitgen_t *bitgen_state, double a) { From d5692ba8660ef664f51067b1f541ee7c14a58cff Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 02:43:56 +0900 Subject: [PATCH 0353/1018] enh: extend coverage --- benchmarks/benchmarks/bench_lib.py | 45 +++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 0e60468308bb..738be22c54d6 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -1,5 +1,6 @@ """Benchmarks for `numpy.lib`.""" +import string import numpy as np @@ -119,37 +120,61 @@ def time_nanpercentile(self, array_size, percent_nans): class Unique(Benchmark): """Benchmark for np.unique with np.nan values.""" - param_names = ["array_size", "percent_nans"] + param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [200, int(2e5)], + [200, int(2e5), int(1e9)], # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], + # percent of unique values in arrays + [2., 50., 90.], + # dtypes of the arrays + [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] - def setup(self, array_size, percent_nans): - np.random.seed(123) + def setup(self, array_size, percent_nans, percent_unique_values, dtype): + rng = np.random.default_rng(123) # produce a randomly shuffled array with the # approximate desired percentage np.nan content - base_array = np.random.uniform(size=array_size) + match dtype: + case np.float64: + base_array = rng.uniform(size=array_size, dtype=dtype) + case np.complex128: + base_array = np.array( + [ + complex(*rng.uniform(size=2, dtype=np.float64)) + for _ in range(array_size) + ], + dtype=dtype, + ) + case np.dtypes.StringDType: + chars = string.ascii_letters + string.digits + base_array = np.array( + [ + ''.join(rng.choice(list(chars), size=10)) + for _ in range(array_size) + ], + dtype=dtype, + ) + n_nan = int(percent_nans * array_size) nan_indices = np.random.choice(np.arange(array_size), size=n_nan) base_array[nan_indices] = np.nan self.arr = base_array - def time_unique_values(self, array_size, percent_nans): + def time_unique_values(self, array_size, percent_nans, dtype): np.unique(self.arr, return_index=False, return_inverse=False, return_counts=False) - def time_unique_counts(self, array_size, percent_nans): + def time_unique_counts(self, array_size, percent_nans, dtype): np.unique(self.arr, return_index=False, - return_inverse=False, return_counts=True) + return_inverse=False, return_counts=True,) - def time_unique_inverse(self, array_size, percent_nans): + def time_unique_inverse(self, array_size, percent_nans, dtype): np.unique(self.arr, return_index=False, return_inverse=True, return_counts=False) - def time_unique_all(self, array_size, percent_nans): + def time_unique_all(self, array_size, percent_nans, dtype): np.unique(self.arr, return_index=True, return_inverse=True, return_counts=True) From 723e2c6224ec4fd6705049b317047f516861cb40 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 13:37:06 +0900 Subject: [PATCH 0354/1018] enh: coverage --- benchmarks/benchmarks/bench_lib.py | 40 +++++++++++++++++------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 738be22c54d6..9d075b3fb017 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,11 +123,11 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [200, int(2e5), int(1e9)], + [200, int(2e5), int(2e8)], # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], # percent of unique values in arrays - [2., 50., 90.], + [0.002, 0.2, 20.], # dtypes of the arrays [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] @@ -136,45 +136,51 @@ def setup(self, array_size, percent_nans, percent_unique_values, dtype): rng = np.random.default_rng(123) # produce a randomly shuffled array with the # approximate desired percentage np.nan content + unique_values_size = max(int(percent_unique_values / 100. * array_size), 2) match dtype: case np.float64: - base_array = rng.uniform(size=array_size, dtype=dtype) + unique_array = rng.uniform(size=unique_values_size) case np.complex128: - base_array = np.array( + unique_array = np.array( [ - complex(*rng.uniform(size=2, dtype=np.float64)) - for _ in range(array_size) + complex(*rng.uniform(size=2)) + for _ in range(unique_values_size) ], dtype=dtype, ) - case np.dtypes.StringDType: + case np.dtypes.StringDType(): chars = string.ascii_letters + string.digits - base_array = np.array( + unique_array = np.array( [ - ''.join(rng.choice(list(chars), size=10)) - for _ in range(array_size) + ''.join(rng.choice(list(chars), size=rng.integers(5, 20))) + for _ in range(unique_values_size) ], dtype=dtype, ) - - n_nan = int(percent_nans * array_size) - nan_indices = np.random.choice(np.arange(array_size), size=n_nan) + case _: + raise ValueError(f"Unsupported dtype {dtype}") + + base_array = np.resize(unique_array, array_size) + rng.shuffle(base_array) + # insert nans in random places + n_nan = int(percent_nans / 100. * array_size) + nan_indices = rng.choice(np.arange(array_size), size=n_nan, replace=False) base_array[nan_indices] = np.nan self.arr = base_array - def time_unique_values(self, array_size, percent_nans, dtype): + def time_unique_values(self, array_size, percent_nans, percent_unique_values, dtype): np.unique(self.arr, return_index=False, return_inverse=False, return_counts=False) - def time_unique_counts(self, array_size, percent_nans, dtype): + def time_unique_counts(self, array_size, percent_nans, percent_unique_values, dtype): np.unique(self.arr, return_index=False, return_inverse=False, return_counts=True,) - def time_unique_inverse(self, array_size, percent_nans, dtype): + def time_unique_inverse(self, array_size, percent_nans, percent_unique_values, dtype): np.unique(self.arr, return_index=False, return_inverse=True, return_counts=False) - def time_unique_all(self, array_size, percent_nans, dtype): + def time_unique_all(self, array_size, percent_nans, percent_unique_values, dtype): np.unique(self.arr, return_index=True, return_inverse=True, return_counts=True) From 45c9779b0dc1f3304435454ec65e5508b8f6005d Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 13:42:24 +0900 Subject: [PATCH 0355/1018] fix: lint --- benchmarks/benchmarks/bench_lib.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 9d075b3fb017..7a9cdb1c1d2c 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -168,19 +168,23 @@ def setup(self, array_size, percent_nans, percent_unique_values, dtype): base_array[nan_indices] = np.nan self.arr = base_array - def time_unique_values(self, array_size, percent_nans, percent_unique_values, dtype): + def time_unique_values(self, array_size, percent_nans, + percent_unique_values, dtype): np.unique(self.arr, return_index=False, return_inverse=False, return_counts=False) - def time_unique_counts(self, array_size, percent_nans, percent_unique_values, dtype): + def time_unique_counts(self, array_size, percent_nans, + percent_unique_values, dtype): np.unique(self.arr, return_index=False, return_inverse=False, return_counts=True,) - def time_unique_inverse(self, array_size, percent_nans, percent_unique_values, dtype): + def time_unique_inverse(self, array_size, percent_nans, + percent_unique_values, dtype): np.unique(self.arr, return_index=False, return_inverse=True, return_counts=False) - def time_unique_all(self, array_size, percent_nans, percent_unique_values, dtype): + def time_unique_all(self, array_size, percent_nans, + percent_unique_values, dtype): np.unique(self.arr, return_index=True, return_inverse=True, return_counts=True) From eba1760b125e2607555c9aa4943046572bd3a071 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 14:48:20 +0900 Subject: [PATCH 0356/1018] fix: change array_size --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 7a9cdb1c1d2c..0b755f61adfd 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,7 +123,7 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [200, int(2e5), int(2e8)], + [200, int(2e5), int(2e7)], # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], # percent of unique values in arrays From 24459e7b4a9997e542c130b09b52720e4989ee68 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 15:46:01 +0900 Subject: [PATCH 0357/1018] fix: change length of string --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 0b755f61adfd..c2c4bc05b610 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -152,7 +152,7 @@ def setup(self, array_size, percent_nans, percent_unique_values, dtype): chars = string.ascii_letters + string.digits unique_array = np.array( [ - ''.join(rng.choice(list(chars), size=rng.integers(5, 20))) + ''.join(rng.choice(list(chars), size=rng.integers(5, 10))) for _ in range(unique_values_size) ], dtype=dtype, From 85a9c102894a800244ed912b7e52f48284c4ba73 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 25 Aug 2025 10:44:41 +0300 Subject: [PATCH 0358/1018] DOC: more accurately describe 'cache' [skip actions][skip azp][skip cirrus] --- numpy/lib/_function_base_impl.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 0379c3c60a1d..47136b93e86d 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2331,8 +2331,8 @@ class vectorize: passed directly to `pyfunc` unmodified. cache : bool, optional - If `True`, then cache the first function call that determines the number - of outputs if `otypes` and `signature` are not provided. + If neither `otypes` nor `signature` are provided, and `cache` is ``True``, then + cache the number of outputs. signature : string, optional Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for @@ -2356,12 +2356,12 @@ class vectorize: The `vectorize` function is provided primarily for convenience, not for performance. The implementation is essentially a for loop. - If `otypes` and `signature` are not specified, then a call to the function with the - first argument will be used to determine the number of outputs. The - results of this call will be cached if `cache` is `True` to prevent - calling the function twice. However, to implement the cache, the - original function must be wrapped which will slow down subsequent - calls, so only do this if your function is expensive. + If neither `otypes` nor `signature` are specified, then a call to the function with + the first argument will be used to determine the number of outputs. The results of + this call will be cached if `cache` is `True` to prevent calling the function + twice. However, to implement the cache, the original function must be wrapped + which will slow down subsequent calls, so only do this if your function is + expensive. The new keyword argument interface and `excluded` argument support further degrades performance. From c9d6050f5dd12d7bd61a3345334e080689fdd891 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 17:19:39 +0900 Subject: [PATCH 0359/1018] fix: change length of string --- benchmarks/benchmarks/bench_lib.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index c2c4bc05b610..af7d65da542c 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -127,7 +127,7 @@ class Unique(Benchmark): # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], # percent of unique values in arrays - [0.002, 0.2, 20.], + [0.001, 0.1, 10.], # dtypes of the arrays [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] @@ -152,7 +152,7 @@ def setup(self, array_size, percent_nans, percent_unique_values, dtype): chars = string.ascii_letters + string.digits unique_array = np.array( [ - ''.join(rng.choice(list(chars), size=rng.integers(5, 10))) + ''.join(rng.choice(list(chars), size=rng.integers(4, 8))) for _ in range(unique_values_size) ], dtype=dtype, From bbb244233a7d6f6cd8e4afc370d437cef67b6cfe Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 18:13:00 +0900 Subject: [PATCH 0360/1018] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index af7d65da542c..f7c21d6b1b3e 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,11 +123,11 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [200, int(2e5), int(2e7)], + [500, int(5e4), int(5e6)], # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], # percent of unique values in arrays - [0.001, 0.1, 10.], + [0.02, 0.2, 20.], # dtypes of the arrays [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] @@ -139,7 +139,7 @@ def setup(self, array_size, percent_nans, percent_unique_values, dtype): unique_values_size = max(int(percent_unique_values / 100. * array_size), 2) match dtype: case np.float64: - unique_array = rng.uniform(size=unique_values_size) + unique_array = rng.uniform(size=unique_values_size).astype(dtype) case np.complex128: unique_array = np.array( [ From 3036cc749c74973987880353e4e9b35de9598f33 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 18:57:28 +0900 Subject: [PATCH 0361/1018] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index f7c21d6b1b3e..11076b3de134 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,7 +123,7 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [500, int(5e4), int(5e6)], + [200, int(5e4), int(1e7)], # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], # percent of unique values in arrays From 4754c87b9b2ae004797ace59c97c70bd1c73d5cc Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 20:16:47 +0900 Subject: [PATCH 0362/1018] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 11076b3de134..e816017496a4 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -127,7 +127,7 @@ class Unique(Benchmark): # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], # percent of unique values in arrays - [0.02, 0.2, 20.], + [0.02, 0.5, 10.], # dtypes of the arrays [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] From 47430c795c406154eda575bcad706875b595acff Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Mon, 25 Aug 2025 17:05:19 -0400 Subject: [PATCH 0363/1018] TST: Replace xunit setup with methods (#29616) * TST: Replace xunit setup with methods * STY: Method rename, code cleanup --- numpy/_core/tests/test_numeric.py | 379 ++++++++++++++-------------- numpy/_core/tests/test_overrides.py | 59 +++-- 2 files changed, 218 insertions(+), 220 deletions(-) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 560d7cf71543..db5269c507d5 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -747,27 +747,29 @@ def test_bitwise_xor(self): class TestBoolArray: - def setup_method(self): + def _create_bool_arrays(self): # offset for simd tests - self.t = np.array([True] * 41, dtype=bool)[1::] - self.f = np.array([False] * 41, dtype=bool)[1::] - self.o = np.array([False] * 42, dtype=bool)[2::] - self.nm = self.f.copy() - self.im = self.t.copy() - self.nm[3] = True - self.nm[-2] = True - self.im[3] = False - self.im[-2] = False + t = np.array([True] * 41, dtype=bool)[1::] + f = np.array([False] * 41, dtype=bool)[1::] + o = np.array([False] * 42, dtype=bool)[2::] + nm = f.copy() + im = t.copy() + nm[3] = True + nm[-2] = True + im[3] = False + im[-2] = False + return t, f, o, nm, im def test_all_any(self): - assert_(self.t.all()) - assert_(self.t.any()) - assert_(not self.f.all()) - assert_(not self.f.any()) - assert_(self.nm.any()) - assert_(self.im.any()) - assert_(not self.nm.all()) - assert_(not self.im.all()) + t, f, _, nm, im = self._create_bool_arrays() + assert_(t.all()) + assert_(t.any()) + assert_(not f.all()) + assert_(not f.any()) + assert_(nm.any()) + assert_(im.any()) + assert_(not nm.all()) + assert_(not im.all()) # check bad element in all positions for i in range(256 - 7): d = np.array([False] * 256, dtype=bool)[7::] @@ -787,118 +789,103 @@ def test_all_any(self): assert_(not np.all(e), msg=f"{i!r}") def test_logical_not_abs(self): - assert_array_equal(~self.t, self.f) - assert_array_equal(np.abs(~self.t), self.f) - assert_array_equal(np.abs(~self.f), self.t) - assert_array_equal(np.abs(self.f), self.f) - assert_array_equal(~np.abs(self.f), self.t) - assert_array_equal(~np.abs(self.t), self.f) - assert_array_equal(np.abs(~self.nm), self.im) - np.logical_not(self.t, out=self.o) - assert_array_equal(self.o, self.f) - np.abs(self.t, out=self.o) - assert_array_equal(self.o, self.t) + t, f, o, nm, im = self._create_bool_arrays() + assert_array_equal(~t, f) + assert_array_equal(np.abs(~t), f) + assert_array_equal(np.abs(~f), t) + assert_array_equal(np.abs(f), f) + assert_array_equal(~np.abs(f), t) + assert_array_equal(~np.abs(t), f) + assert_array_equal(np.abs(~nm), im) + np.logical_not(t, out=o) + assert_array_equal(o, f) + np.abs(t, out=o) + assert_array_equal(o, t) def test_logical_and_or_xor(self): - assert_array_equal(self.t | self.t, self.t) - assert_array_equal(self.f | self.f, self.f) - assert_array_equal(self.t | self.f, self.t) - assert_array_equal(self.f | self.t, self.t) - np.logical_or(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t & self.t, self.t) - assert_array_equal(self.f & self.f, self.f) - assert_array_equal(self.t & self.f, self.f) - assert_array_equal(self.f & self.t, self.f) - np.logical_and(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t ^ self.t, self.f) - assert_array_equal(self.f ^ self.f, self.f) - assert_array_equal(self.t ^ self.f, self.t) - assert_array_equal(self.f ^ self.t, self.t) - np.logical_xor(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.f) - - assert_array_equal(self.nm & self.t, self.nm) - assert_array_equal(self.im & self.f, False) - assert_array_equal(self.nm & True, self.nm) - assert_array_equal(self.im & False, self.f) - assert_array_equal(self.nm | self.t, self.t) - assert_array_equal(self.im | self.f, self.im) - assert_array_equal(self.nm | True, self.t) - assert_array_equal(self.im | False, self.im) - assert_array_equal(self.nm ^ self.t, self.im) - assert_array_equal(self.im ^ self.f, self.im) - assert_array_equal(self.nm ^ True, self.im) - assert_array_equal(self.im ^ False, self.im) + t, f, o, nm, im = self._create_bool_arrays() + assert_array_equal(t | t, t) + assert_array_equal(f | f, f) + assert_array_equal(t | f, t) + assert_array_equal(f | t, t) + np.logical_or(t, t, out=o) + assert_array_equal(o, t) + assert_array_equal(t & t, t) + assert_array_equal(f & f, f) + assert_array_equal(t & f, f) + assert_array_equal(f & t, f) + np.logical_and(t, t, out=o) + assert_array_equal(o, t) + assert_array_equal(t ^ t, f) + assert_array_equal(f ^ f, f) + assert_array_equal(t ^ f, t) + assert_array_equal(f ^ t, t) + np.logical_xor(t, t, out=o) + assert_array_equal(o, f) + + assert_array_equal(nm & t, nm) + assert_array_equal(im & f, False) + assert_array_equal(nm & True, nm) + assert_array_equal(im & False, f) + assert_array_equal(nm | t, t) + assert_array_equal(im | f, im) + assert_array_equal(nm | True, t) + assert_array_equal(im | False, im) + assert_array_equal(nm ^ t, im) + assert_array_equal(im ^ f, im) + assert_array_equal(nm ^ True, im) + assert_array_equal(im ^ False, im) class TestBoolCmp: - def setup_method(self): - self.f = np.ones(256, dtype=np.float32) - self.ef = np.ones(self.f.size, dtype=bool) - self.d = np.ones(128, dtype=np.float64) - self.ed = np.ones(self.d.size, dtype=bool) + def _create_data(self, dtype, size): + # generate data using given dtype and num for size of array + a = np.ones(size, dtype=dtype) + e = np.ones(a.size, dtype=bool) # generate values for all permutation of 256bit simd vectors s = 0 - for i in range(32): - self.f[s:s + 8] = [i & 2**x for x in range(8)] - self.ef[s:s + 8] = [(i & 2**x) != 0 for x in range(8)] - s += 8 - s = 0 - for i in range(16): - self.d[s:s + 4] = [i & 2**x for x in range(4)] - self.ed[s:s + 4] = [(i & 2**x) != 0 for x in range(4)] - s += 4 - - self.nf = self.f.copy() - self.nd = self.d.copy() - self.nf[self.ef] = np.nan - self.nd[self.ed] = np.nan - - self.inff = self.f.copy() - self.infd = self.d.copy() - self.inff[::3][self.ef[::3]] = np.inf - self.infd[::3][self.ed[::3]] = np.inf - self.inff[1::3][self.ef[1::3]] = -np.inf - self.infd[1::3][self.ed[1::3]] = -np.inf - self.inff[2::3][self.ef[2::3]] = np.nan - self.infd[2::3][self.ed[2::3]] = np.nan - self.efnonan = self.ef.copy() - self.efnonan[2::3] = False - self.ednonan = self.ed.copy() - self.ednonan[2::3] = False - - self.signf = self.f.copy() - self.signd = self.d.copy() - self.signf[self.ef] *= -1. - self.signd[self.ed] *= -1. - self.signf[1::6][self.ef[1::6]] = -np.inf - self.signd[1::6][self.ed[1::6]] = -np.inf + r = int(size / 32) + for i in range(int(size / 8)): + a[s:s + r] = [i & 2**x for x in range(r)] + e[s:s + r] = [(i & 2**x) != 0 for x in range(r)] + s += r + n = a.copy() + n[e] = np.nan + + inf = a.copy() + inf[::3][e[::3]] = np.inf + inf[1::3][e[1::3]] = -np.inf + inf[2::3][e[2::3]] = np.nan + enonan = e.copy() + enonan[2::3] = False + + sign = a.copy() + sign[e] *= -1. + sign[1::6][e[1::6]] = -np.inf # On RISC-V, many operations that produce NaNs, such as converting # a -NaN from f64 to f32, return a canonical NaN. The canonical # NaNs are always positive. See section 11.3 NaN Generation and # Propagation of the RISC-V Unprivileged ISA for more details. # We disable the float32 sign test on riscv64 for -np.nan as the sign # of the NaN will be lost when it's converted to a float32. - if platform.machine() != 'riscv64': - self.signf[3::6][self.ef[3::6]] = -np.nan - self.signd[3::6][self.ed[3::6]] = -np.nan - self.signf[4::6][self.ef[4::6]] = -0. - self.signd[4::6][self.ed[4::6]] = -0. + if not (dtype == np.float32 and platform.machine() == 'riscv64'): + sign[3::6][e[3::6]] = -np.nan + sign[4::6][e[4::6]] = -0. + return a, e, n, inf, enonan, sign def test_float(self): # offset for alignment test + f, ef, nf, inff, efnonan, signf = self._create_data(np.float32, 256) for i in range(4): - assert_array_equal(self.f[i:] > 0, self.ef[i:]) - assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) - assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) - assert_array_equal(-self.f[i:] < 0, self.ef[i:]) - assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) - r = self.f[i:] != 0 - assert_array_equal(r, self.ef[i:]) - r2 = self.f[i:] != np.zeros_like(self.f[i:]) - r3 = 0 != self.f[i:] + assert_array_equal(f[i:] > 0, ef[i:]) + assert_array_equal(f[i:] - 1 >= 0, ef[i:]) + assert_array_equal(f[i:] == 0, ~ef[i:]) + assert_array_equal(-f[i:] < 0, ef[i:]) + assert_array_equal(-f[i:] + 1 <= 0, ef[i:]) + r = f[i:] != 0 + assert_array_equal(r, ef[i:]) + r2 = f[i:] != np.zeros_like(f[i:]) + r3 = 0 != f[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 @@ -907,24 +894,25 @@ def test_float(self): assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) - assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) - assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) - assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:]) - assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) + assert_array_equal(np.isnan(nf[i:]), ef[i:]) + assert_array_equal(np.isfinite(nf[i:]), ~ef[i:]) + assert_array_equal(np.isfinite(inff[i:]), ~ef[i:]) + assert_array_equal(np.isinf(inff[i:]), efnonan[i:]) + assert_array_equal(np.signbit(signf[i:]), ef[i:]) def test_double(self): # offset for alignment test + d, ed, nd, infd, ednonan, signd = self._create_data(np.float64, 128) for i in range(2): - assert_array_equal(self.d[i:] > 0, self.ed[i:]) - assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) - assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) - assert_array_equal(-self.d[i:] < 0, self.ed[i:]) - assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) - r = self.d[i:] != 0 - assert_array_equal(r, self.ed[i:]) - r2 = self.d[i:] != np.zeros_like(self.d[i:]) - r3 = 0 != self.d[i:] + assert_array_equal(d[i:] > 0, ed[i:]) + assert_array_equal(d[i:] - 1 >= 0, ed[i:]) + assert_array_equal(d[i:] == 0, ~ed[i:]) + assert_array_equal(-d[i:] < 0, ed[i:]) + assert_array_equal(-d[i:] + 1 <= 0, ed[i:]) + r = d[i:] != 0 + assert_array_equal(r, ed[i:]) + r2 = d[i:] != np.zeros_like(d[i:]) + r3 = 0 != d[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 @@ -933,11 +921,11 @@ def test_double(self): assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) - assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) - assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) - assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:]) - assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) + assert_array_equal(np.isnan(nd[i:]), ed[i:]) + assert_array_equal(np.isfinite(nd[i:]), ~ed[i:]) + assert_array_equal(np.isfinite(infd[i:]), ~ed[i:]) + assert_array_equal(np.isinf(infd[i:]), ednonan[i:]) + assert_array_equal(np.signbit(signd[i:]), ed[i:]) class TestSeterr: @@ -1646,6 +1634,7 @@ def test_failed_itemsetting(self): with pytest.raises(ValueError): np.fromiter(iterable, dtype=np.dtype((int, 2))) + class TestNonzero: def test_nonzero_trivial(self): assert_equal(np.count_nonzero(np.array([])), 0) @@ -2315,9 +2304,8 @@ def assert_array_strict_equal(x, y): class TestClip: - def setup_method(self): - self.nr = 5 - self.nc = 3 + nr = 5 + nc = 3 def fastclip(self, a, m, M, out=None, **kwargs): return a.clip(m, M, out=out, **kwargs) @@ -2950,6 +2938,7 @@ def test_out_of_bound_pyints(self, dtype, min, max): if max is not None: assert (c <= max).all() + class TestAllclose: rtol = 1e-5 atol = 1e-8 @@ -3243,45 +3232,50 @@ def test_tol_warnings(self): class TestStdVar: - def setup_method(self): - self.A = np.array([1, -1, 1, -1]) - self.real_var = 1 + def _create_data(self): + A = np.array([1, -1, 1, -1]) + real_var = 1 + return A, real_var def test_basic(self): - assert_almost_equal(np.var(self.A), self.real_var) - assert_almost_equal(np.std(self.A)**2, self.real_var) + A, real_var = self._create_data() + assert_almost_equal(np.var(A), real_var) + assert_almost_equal(np.std(A)**2, real_var) def test_scalars(self): assert_equal(np.var(1), 0) assert_equal(np.std(1), 0) def test_ddof1(self): - assert_almost_equal(np.var(self.A, ddof=1), - self.real_var * len(self.A) / (len(self.A) - 1)) - assert_almost_equal(np.std(self.A, ddof=1)**2, - self.real_var * len(self.A) / (len(self.A) - 1)) + A, real_var = self._create_data() + assert_almost_equal(np.var(A, ddof=1), + real_var * len(A) / (len(A) - 1)) + assert_almost_equal(np.std(A, ddof=1)**2, + real_var * len(A) / (len(A) - 1)) def test_ddof2(self): - assert_almost_equal(np.var(self.A, ddof=2), - self.real_var * len(self.A) / (len(self.A) - 2)) - assert_almost_equal(np.std(self.A, ddof=2)**2, - self.real_var * len(self.A) / (len(self.A) - 2)) + A, real_var = self._create_data() + assert_almost_equal(np.var(A, ddof=2), + real_var * len(A) / (len(A) - 2)) + assert_almost_equal(np.std(A, ddof=2)**2, + real_var * len(A) / (len(A) - 2)) def test_correction(self): + A, _ = self._create_data() assert_almost_equal( - np.var(self.A, correction=1), np.var(self.A, ddof=1) + np.var(A, correction=1), np.var(A, ddof=1) ) assert_almost_equal( - np.std(self.A, correction=1), np.std(self.A, ddof=1) + np.std(A, correction=1), np.std(A, ddof=1) ) err_msg = "ddof and correction can't be provided simultaneously." with assert_raises_regex(ValueError, err_msg): - np.var(self.A, ddof=1, correction=0) + np.var(A, ddof=1, correction=0) with assert_raises_regex(ValueError, err_msg): - np.std(self.A, ddof=1, correction=1) + np.std(A, ddof=1, correction=1) def test_out_scalar(self): d = np.arange(10) @@ -3310,26 +3304,22 @@ def test_scalars(self): class TestCreationFuncs: - # Test ones, zeros, empty and full. - - def setup_method(self): - dtypes = {np.dtype(tp) for tp in itertools.chain(*sctypes.values())} - # void, bytes, str - variable_sized = {tp for tp in dtypes if tp.str.endswith('0')} + def check_function(self, func, fill_value=None): + dtypes_info = {np.dtype(tp) for tp in itertools.chain(*sctypes.values())} keyfunc = lambda dtype: dtype.str - self.dtypes = sorted(dtypes - variable_sized | + variable_sized = {tp for tp in dtypes_info if tp.str.endswith('0')} + dtypes = sorted(dtypes_info - variable_sized | {np.dtype(tp.str.replace("0", str(i))) for tp in variable_sized for i in range(1, 10)}, key=keyfunc) - self.dtypes += [type(dt) for dt in sorted(dtypes, key=keyfunc)] - self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} - self.ndims = 10 + dtypes += [type(dt) for dt in sorted(dtypes_info, key=keyfunc)] + orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} + ndims = 10 - def check_function(self, func, fill_value=None): par = ((0, 1, 2), - range(self.ndims), - self.orders, - self.dtypes) + range(ndims), + orders, + dtypes) fill_kwarg = {} if fill_value is not None: fill_kwarg = {'fill_value': fill_value} @@ -3355,7 +3345,7 @@ def check_function(self, func, fill_value=None): assert_equal(arr.dtype, np.dtype(dtype_str)) else: assert_equal(arr.dtype, np.dtype(dtype.type)) - assert_(getattr(arr.flags, self.orders[order])) + assert_(getattr(arr.flags, orders[order])) if fill_value is not None: if arr.dtype.str.startswith('|S'): @@ -3395,32 +3385,6 @@ def test_for_reference_leak(self): class TestLikeFuncs: '''Test ones_like, zeros_like, empty_like and full_like''' - def setup_method(self): - self.data = [ - # Array scalars - (np.array(3.), None), - (np.array(3), 'f8'), - # 1D arrays - (np.arange(6, dtype='f4'), None), - (np.arange(6), 'c16'), - # 2D C-layout arrays - (np.arange(6).reshape(2, 3), None), - (np.arange(6).reshape(3, 2), 'i1'), - # 2D F-layout arrays - (np.arange(6).reshape((2, 3), order='F'), None), - (np.arange(6).reshape((3, 2), order='F'), 'i1'), - # 3D C-layout arrays - (np.arange(24).reshape(2, 3, 4), None), - (np.arange(24).reshape(4, 3, 2), 'f4'), - # 3D F-layout arrays - (np.arange(24).reshape((2, 3, 4), order='F'), None), - (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), - # 3D non-C/F-layout arrays - (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), - (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), - ] - self.shapes = [(), (5,), (5, 6,), (5, 6, 7,)] - def compare_array_value(self, dz, value, fill_value): if value is not None: if fill_value: @@ -3433,11 +3397,36 @@ def compare_array_value(self, dz, value, fill_value): assert_(np.all(dz == value)) def check_like_function(self, like_function, value, fill_value=False): + data = [ + # Array scalars + (np.array(3.), None), + (np.array(3), 'f8'), + # 1D arrays + (np.arange(6, dtype='f4'), None), + (np.arange(6), 'c16'), + # 2D C-layout arrays + (np.arange(6).reshape(2, 3), None), + (np.arange(6).reshape(3, 2), 'i1'), + # 2D F-layout arrays + (np.arange(6).reshape((2, 3), order='F'), None), + (np.arange(6).reshape((3, 2), order='F'), 'i1'), + # 3D C-layout arrays + (np.arange(24).reshape(2, 3, 4), None), + (np.arange(24).reshape(4, 3, 2), 'f4'), + # 3D F-layout arrays + (np.arange(24).reshape((2, 3, 4), order='F'), None), + (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), + # 3D non-C/F-layout arrays + (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), + (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), + ] + shapes = [(), (5,), (5, 6,), (5, 6, 7,)] + if fill_value: fill_kwarg = {'fill_value': value} else: fill_kwarg = {} - for d, dtype in self.data: + for d, dtype in data: # default (K) order, dtype dz = like_function(d, dtype=dtype, **fill_kwarg) assert_equal(dz.shape, d.shape) @@ -3485,7 +3474,7 @@ def check_like_function(self, like_function, value, fill_value=False): self.compare_array_value(dz, value, fill_value) # Test the 'shape' parameter - for s in self.shapes: + for s in shapes: for o in 'CFA': sz = like_function(d, dtype=dtype, shape=s, order=o, **fill_kwarg) diff --git a/numpy/_core/tests/test_overrides.py b/numpy/_core/tests/test_overrides.py index b0d73375ed10..ebcf2f0ce112 100644 --- a/numpy/_core/tests/test_overrides.py +++ b/numpy/_core/tests/test_overrides.py @@ -550,7 +550,7 @@ def __array_function__(self, func, types, args, kwargs): class TestArrayLike: - def setup_method(self): + def _create_MyArray(self): class MyArray: def __init__(self, function=None): self.function = function @@ -563,20 +563,22 @@ def __array_function__(self, func, types, args, kwargs): return NotImplemented return my_func(*args, **kwargs) - self.MyArray = MyArray + return MyArray + def _create_MyNoArrayFunctionArray(self): class MyNoArrayFunctionArray: def __init__(self, function=None): self.function = function - self.MyNoArrayFunctionArray = MyNoArrayFunctionArray + return MyNoArrayFunctionArray + def _create_MySubclass(self): class MySubclass(np.ndarray): def __array_function__(self, func, types, args, kwargs): result = super().__array_function__(func, types, args, kwargs) return result.view(self.__class__) - self.MySubclass = MySubclass + return MySubclass def add_method(self, name, arr_class, enable_value_error=False): def _definition(*args, **kwargs): @@ -593,9 +595,10 @@ def func_args(*args, **kwargs): return args, kwargs def test_array_like_not_implemented(self): - self.add_method('array', self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) - ref = self.MyArray.array() + ref = MyArray.array() with assert_raises_regex(TypeError, 'no implementation found'): array_like = np.asarray(1, like=ref) @@ -646,15 +649,16 @@ def test_nep35_functions_as_array_functions(self,): @pytest.mark.parametrize('function, args, kwargs', _array_tests) @pytest.mark.parametrize('numpy_ref', [True, False]) def test_array_like(self, function, args, kwargs, numpy_ref): - self.add_method('array', self.MyArray) - self.add_method(function, self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method(function, MyArray) np_func = getattr(np, function) - my_func = getattr(self.MyArray, function) + my_func = getattr(MyArray, function) if numpy_ref is True: ref = np.array(1) else: - ref = self.MyArray.array() + ref = MyArray.array() like_args = tuple(a() if callable(a) else a for a in args) array_like = np_func(*like_args, **kwargs, like=ref) @@ -672,19 +676,20 @@ def test_array_like(self, function, args, kwargs, numpy_ref): assert_equal(array_like, np_arr) else: - assert type(array_like) is self.MyArray + assert type(array_like) is MyArray assert array_like.function is my_func @pytest.mark.parametrize('function, args, kwargs', _array_tests) @pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"]) def test_no_array_function_like(self, function, args, kwargs, ref): - self.add_method('array', self.MyNoArrayFunctionArray) - self.add_method(function, self.MyNoArrayFunctionArray) + MyNoArrayFunctionArray = self._create_MyNoArrayFunctionArray() + self.add_method('array', MyNoArrayFunctionArray) + self.add_method(function, MyNoArrayFunctionArray) np_func = getattr(np, function) # Instantiate ref if it's the MyNoArrayFunctionArray class if ref == "MyNoArrayFunctionArray": - ref = self.MyNoArrayFunctionArray.array() + ref = MyNoArrayFunctionArray.array() like_args = tuple(a() if callable(a) else a for a in args) @@ -694,11 +699,12 @@ def test_no_array_function_like(self, function, args, kwargs, ref): @pytest.mark.parametrize('function, args, kwargs', _array_tests) def test_subclass(self, function, args, kwargs): - ref = np.array(1).view(self.MySubclass) + MySubclass = self._create_MySubclass() + ref = np.array(1).view(MySubclass) np_func = getattr(np, function) like_args = tuple(a() if callable(a) else a for a in args) array_like = np_func(*like_args, **kwargs, like=ref) - assert type(array_like) is self.MySubclass + assert type(array_like) is MySubclass if np_func is np.empty: return np_args = tuple(a() if callable(a) else a for a in args) @@ -707,13 +713,14 @@ def test_subclass(self, function, args, kwargs): @pytest.mark.parametrize('numpy_ref', [True, False]) def test_array_like_fromfile(self, numpy_ref): - self.add_method('array', self.MyArray) - self.add_method("fromfile", self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method("fromfile", MyArray) if numpy_ref is True: ref = np.array(1) else: - ref = self.MyArray.array() + ref = MyArray.array() data = np.random.random(5) @@ -728,13 +735,14 @@ def test_array_like_fromfile(self, numpy_ref): assert_equal(np_res, data) assert_equal(array_like, np_res) else: - assert type(array_like) is self.MyArray - assert array_like.function is self.MyArray.fromfile + assert type(array_like) is MyArray + assert array_like.function is MyArray.fromfile def test_exception_handling(self): - self.add_method('array', self.MyArray, enable_value_error=True) + MyArray = self._create_MyArray() + self.add_method('array', MyArray, enable_value_error=True) - ref = self.MyArray.array() + ref = MyArray.array() with assert_raises(TypeError): # Raises the error about `value_error` being invalid first @@ -742,8 +750,9 @@ def test_exception_handling(self): @pytest.mark.parametrize('function, args, kwargs', _array_tests) def test_like_as_none(self, function, args, kwargs): - self.add_method('array', self.MyArray) - self.add_method(function, self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method(function, MyArray) np_func = getattr(np, function) like_args = tuple(a() if callable(a) else a for a in args) From 3cbaf3f37ec08843c43e5170a80aca6e103bf32d Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Tue, 26 Aug 2025 23:40:00 +0900 Subject: [PATCH 0364/1018] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index e816017496a4..e2c9c49944d0 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -125,7 +125,7 @@ class Unique(Benchmark): # sizes of the 1D arrays [200, int(5e4), int(1e7)], # percent of np.nan in arrays - [0, 0.1, 2., 50., 90.], + [0, 10., 90.], # percent of unique values in arrays [0.02, 0.5, 10.], # dtypes of the arrays From 9566c7787c923e64e398ba515e375974d7546dc3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 01:59:17 +0200 Subject: [PATCH 0365/1018] TYP: replace scalar type ``__init__`` with ``__new__`` ported from numpy/numtype#692 --- numpy/__init__.pyi | 121 +++++++++-------------- numpy/typing/tests/data/fail/scalars.pyi | 12 +-- 2 files changed, 55 insertions(+), 78 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0982a502f018..00c1a9494931 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3582,7 +3582,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # See https://github.com/numpy/numpy-stubs/pull/80 for more details. class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod - def __init__(self, *args: Any, **kwargs: Any) -> None: ... + def __new__(self) -> None: ... def __hash__(self) -> int: ... @overload def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... @@ -3853,7 +3853,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): @abstractmethod - def __init__(self, value: _NumberItemT_co, /) -> None: ... + def __new__(cls) -> Self: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... def __neg__(self) -> Self: ... @@ -3889,13 +3889,13 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def imag(self) -> np.bool[L[False]]: ... @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 - def __init__(self: np.bool[builtins.bool], value: Never, /) -> None: ... + def __new__(cls, value: Never, /) -> np.bool[builtins.bool]: ... @overload - def __init__(self: np.bool[L[False]], value: _Falsy = ..., /) -> None: ... + def __new__(cls, value: _Falsy = ..., /) -> np.bool[L[False]]: ... @overload - def __init__(self: np.bool[L[True]], value: _Truthy, /) -> None: ... + def __new__(cls, value: _Truthy, /) -> np.bool[L[True]]: ... @overload - def __init__(self: np.bool[builtins.bool], value: object, /) -> None: ... + def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... def __bool__(self, /) -> _BoolItemT_co: ... @overload @@ -4004,7 +4004,7 @@ class object_(_RealMixin, generic): def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] @overload # catch-all def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] - def __init__(self, value: object = ..., /) -> None: ... + def __hash__(self, /) -> int: ... def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ def __call__(self, /, *args: object, **kwargs: object) -> Any: ... @@ -4014,7 +4014,7 @@ class object_(_RealMixin, generic): class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): @abstractmethod - def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... + def __new__(cls) -> Self: ... # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes def bit_count(self, /) -> int: ... @@ -4038,7 +4038,7 @@ class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): def __rxor__(self, other: _IntLike_co, /) -> integer: ... class signedinteger(integer[_NBit1]): - def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... __add__: _SignedIntOp[_NBit1] __radd__: _SignedIntOp[_NBit1] @@ -4079,9 +4079,9 @@ long = signedinteger[_NBitLong] longlong = signedinteger[_NBitLongLong] class unsignedinteger(integer[_NBit1]): - # NOTE: `uint64 + signedinteger -> float64` - def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + # NOTE: `uint64 + signedinteger -> float64` __add__: _UnsignedIntOp[_NBit1] __radd__: _UnsignedIntOp[_NBit1] __sub__: _UnsignedIntOp[_NBit1] @@ -4122,10 +4122,10 @@ ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): @abstractmethod - def __init__(self, value: _InexactItemT_co | None = ..., /) -> None: ... + def __new__(cls) -> Self: ... class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): - def __init__(self, value: _ConvertibleToFloat | None = ..., /) -> None: ... + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... __add__: _FloatOp[_NBit1] __radd__: _FloatOp[_NBit1] @@ -4153,9 +4153,6 @@ float32: TypeAlias = floating[_32Bit] # either a C `double`, `float`, or `longdouble` class float64(floating[_64Bit], float): # type: ignore[misc] - def __new__(cls, x: _ConvertibleToFloat | None = ..., /) -> Self: ... - - # @property def itemsize(self) -> L[8]: ... @property @@ -4294,19 +4291,19 @@ longdouble: TypeAlias = floating[_NBitLongDouble] class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): @overload - def __init__( - self, - real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., - imag: complex | SupportsFloat | SupportsIndex = ..., + def __new__( + cls, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = 0, + imag: complex | SupportsFloat | SupportsIndex = 0, /, - ) -> None: ... + ) -> Self: ... @overload - def __init__(self, real: _ConvertibleToComplex | None = ..., /) -> None: ... + def __new__(cls, real: _ConvertibleToComplex | None = 0, /) -> Self: ... @property - def real(self) -> floating[_NBit1]: ... # type: ignore[override] + def real(self) -> floating[_NBit1]: ... @property - def imag(self) -> floating[_NBit2]: ... # type: ignore[override] + def imag(self) -> floating[_NBit2]: ... # NOTE: `__complex__` is technically defined in the concrete subtypes def __complex__(self, /) -> complex: ... @@ -4385,18 +4382,7 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): complex64: TypeAlias = complexfloating[_32Bit] -class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc] - @overload - def __new__( - cls, - real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., - imag: complex | SupportsFloat | SupportsIndex = ..., - /, - ) -> Self: ... - @overload - def __new__(cls, real: _ConvertibleToComplex | None = ..., /) -> Self: ... - - # +class complex128(complexfloating[_64Bit, _64Bit], complex): @property def itemsize(self) -> L[16]: ... @property @@ -4455,26 +4441,26 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def nbytes(self) -> L[8]: ... @overload - def __init__(self, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> None: ... + def __new__(cls, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> Self: ... @overload - def __init__(self: timedelta64[L[0]], /) -> None: ... + def __new__(cls, /) -> timedelta64[L[0]]: ... @overload - def __init__(self: timedelta64[None], value: _NaTValue | None, format: _TimeUnitSpec, /) -> None: ... + def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec, /) -> timedelta64[None]: ... @overload - def __init__(self: timedelta64[L[0]], value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[L[0]]: ... @overload - def __init__(self: timedelta64[int], value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + def __new__(cls, value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[int]: ... @overload - def __init__(self: timedelta64[int], value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + def __new__(cls, value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> timedelta64[int]: ... @overload - def __init__( - self: timedelta64[dt.timedelta], + def __new__( + cls, value: dt.timedelta | _IntLike_co, format: _TimeUnitSpec[_NativeTD64Unit] = ..., /, - ) -> None: ... + ) -> timedelta64[dt.timedelta]: ... @overload - def __init__(self, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> None: ... + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> Self: ... # inherited at runtime from `signedinteger` def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -4639,25 +4625,25 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): def nbytes(self) -> L[8]: ... @overload - def __init__(self, value: datetime64[_DT64ItemT_co], /) -> None: ... + def __new__(cls, value: datetime64[_DT64ItemT_co], /) -> Self: ... @overload - def __init__(self: datetime64[_AnyDT64Arg], value: _AnyDT64Arg, /) -> None: ... + def __new__(cls, value: _AnyDT64Arg, /) -> datetime64[_AnyDT64Arg]: ... @overload - def __init__(self: datetime64[None], value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> None: ... + def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> datetime64[None]: ... @overload - def __init__(self: datetime64[dt.datetime], value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> None: ... + def __new__(cls, value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> datetime64[dt.datetime]: ... @overload - def __init__(self: datetime64[dt.date], value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> None: ... + def __new__(cls, value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> datetime64[dt.date]: ... @overload - def __init__(self: datetime64[int], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> datetime64[int]: ... @overload - def __init__( - self: datetime64[dt.datetime], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / - ) -> None: ... + def __new__( + cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / + ) -> datetime64[dt.datetime]: ... @overload - def __init__(self: datetime64[dt.date], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> None: ... + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... @overload - def __init__(self, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> None: ... + def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> Self: ... @overload def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... @@ -4732,13 +4718,15 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): __gt__: _ComparisonOpGT[datetime64, _ArrayLikeDT64_co] __ge__: _ComparisonOpGE[datetime64, _ArrayLikeDT64_co] -class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... +class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): + @abstractmethod + def __new__(cls) -> Self: ... class void(flexible[bytes | tuple[Any, ...]]): @overload - def __init__(self, value: _IntLike_co | bytes, /, dtype: None = None) -> None: ... + def __new__(cls, value: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... @overload - def __init__(self, value: Any, /, dtype: _DTypeLikeVoid) -> None: ... + def __new__(cls, value: Any, /, dtype: _DTypeLikeVoid) -> Self: ... @overload def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... @@ -4750,7 +4738,7 @@ class void(flexible[bytes | tuple[Any, ...]]): class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): @abstractmethod - def __init__(self, value: _CharacterItemT_co = ..., /) -> None: ... + def __new__(cls) -> Self: ... # NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart @@ -4760,12 +4748,6 @@ class bytes_(character[bytes], bytes): @overload def __new__(cls, s: str, /, encoding: str, errors: str = ...) -> Self: ... - # - @overload - def __init__(self, o: object = ..., /) -> None: ... - @overload - def __init__(self, s: str, /, encoding: str, errors: str = ...) -> None: ... - # def __bytes__(self, /) -> bytes: ... @@ -4775,11 +4757,6 @@ class str_(character[str], str): @overload def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ... - # - @overload - def __init__(self, value: object = ..., /) -> None: ... - @overload - def __init__(self, value: bytes, /, encoding: str = ..., errors: str = ...) -> None: ... # See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs @final diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index 018a88e652ae..02043e06e8fe 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -46,12 +46,12 @@ np.uint64(A()) # type: ignore[arg-type] np.void("test") # type: ignore[call-overload] np.void("test", dtype=None) # type: ignore[call-overload] -np.generic(1) # type: ignore[abstract] -np.number(1) # type: ignore[abstract] -np.integer(1) # type: ignore[abstract] -np.inexact(1) # type: ignore[abstract] -np.character("test") # type: ignore[abstract] -np.flexible(b"test") # type: ignore[abstract] +np.generic(1) # type: ignore[abstract, call-arg] +np.number(1) # type: ignore[abstract, call-arg] +np.integer(1) # type: ignore[abstract, call-arg] +np.inexact(1) # type: ignore[abstract, call-arg] +np.character("test") # type: ignore[abstract, call-arg] +np.flexible(b"test") # type: ignore[abstract, call-arg] np.float64(value=0.0) # type: ignore[call-arg] np.int64(value=0) # type: ignore[call-arg] From 2f35e84809586e233d1f8fd86c143e1c88dd4850 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 02:00:55 +0200 Subject: [PATCH 0366/1018] TYP: appease ruff --- numpy/__init__.pyi | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 00c1a9494931..8dc1daf7f797 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4757,7 +4757,6 @@ class str_(character[str], str): @overload def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ... - # See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs @final class ufunc: From 9c29c3226eaa3c4879bf73ca4d9267285d7c2589 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 02:10:59 +0200 Subject: [PATCH 0367/1018] TYP: fix slightly incorrect ``memoryview`` type argument in ``ScalarType`` Ported from numpy/numtype#693 --- numpy/_core/numerictypes.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 26ed4b9d9524..0a6ac988163d 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -1,5 +1,5 @@ from builtins import bool as py_bool -from typing import Final, Literal as L, TypedDict, type_check_only +from typing import Any, Final, Literal as L, TypedDict, type_check_only import numpy as np from numpy import ( @@ -166,7 +166,7 @@ ScalarType: Final[ type[py_bool], type[bytes], type[str], - type[memoryview], + type[memoryview[Any]], type[np.bool], type[csingle], type[cdouble], From 6089ecbec5b25f1c59993755813d59d40e26af5f Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 02:11:46 +0200 Subject: [PATCH 0368/1018] TYP: remove trivial ``ScalarType`` test case --- .../typing/tests/data/reveal/numerictypes.pyi | 36 ------------------- 1 file changed, 36 deletions(-) diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index 784167467532..aa5cf10410d4 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -2,42 +2,6 @@ from typing import Literal, assert_type import numpy as np -assert_type( - np.ScalarType, - tuple[ - type[int], - type[float], - type[complex], - type[bool], - type[bytes], - type[str], - type[memoryview], - type[np.bool], - type[np.csingle], - type[np.cdouble], - type[np.clongdouble], - type[np.half], - type[np.single], - type[np.double], - type[np.longdouble], - type[np.byte], - type[np.short], - type[np.intc], - type[np.long], - type[np.longlong], - type[np.timedelta64], - type[np.datetime64], - type[np.object_], - type[np.bytes_], - type[np.str_], - type[np.ubyte], - type[np.ushort], - type[np.uintc], - type[np.ulong], - type[np.ulonglong], - type[np.void], - ], -) assert_type(np.ScalarType[0], type[int]) assert_type(np.ScalarType[3], type[bool]) assert_type(np.ScalarType[8], type[np.csingle]) From ef9edae407263110258d974202acf35c4362b91c Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 03:04:39 +0200 Subject: [PATCH 0369/1018] TYP: Make ``datetime64`` a generic type at runtime --- numpy/_core/src/multiarray/scalartypes.c.src | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index f38a16300cfd..f84c120ad409 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -2853,11 +2853,15 @@ static PyMethodDef numbertype_methods[] = { {NULL, NULL, 0, NULL} /* sentinel */ }; -static PyMethodDef booleantype_methods[] = { - /* for typing */ - {"__class_getitem__", Py_GenericAlias, METH_CLASS | METH_O, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ +/**begin repeat + * #name = boolean,datetime# + */ +static PyMethodDef @name@type_methods[] = { + /* for typing */ + {"__class_getitem__", Py_GenericAlias, METH_CLASS | METH_O, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ }; +/**end repeat**/ /**begin repeat * #name = cfloat,clongdouble# @@ -4564,6 +4568,7 @@ initialize_numeric_types(void) /**end repeat**/ + PyDatetimeArrType_Type.tp_methods = datetimetype_methods; /**begin repeat * #Type = Byte, UByte, Short, UShort, Int, UInt, Long, From 99f26d7a5824a57cf015dc90dcfd1a3474a5a103 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 03:14:03 +0200 Subject: [PATCH 0370/1018] TST: expect the ``datetime64`` type to be subscriptable --- numpy/_core/tests/test_scalar_methods.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index 26dad71794e3..6fd4846006d0 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -171,8 +171,8 @@ def test_abc_non_numeric(self, cls: type[np.generic]) -> None: @pytest.mark.parametrize("code", np.typecodes["All"]) def test_concrete(self, code: str) -> None: cls = np.dtype(code).type - if cls == np.bool: - # np.bool allows subscript + if cls in {np.bool, np.datetime64}: + # these are intentionally subscriptable assert cls[Any] else: with pytest.raises(TypeError): From aa5360d109af681cdc7c82627bf7fe7cc4889a76 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Wed, 27 Aug 2025 22:47:48 +0900 Subject: [PATCH 0371/1018] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index e2c9c49944d0..37f599f075b3 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,11 +123,11 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [200, int(5e4), int(1e7)], + [int(2e3), int(2e6)], # percent of np.nan in arrays - [0, 10., 90.], + [10., 90.], # percent of unique values in arrays - [0.02, 0.5, 10.], + [0.2, 20.], # dtypes of the arrays [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] From ad6302aef8d280e801628b72f8f6dd0e8105ffb2 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 17:46:23 +0200 Subject: [PATCH 0372/1018] TYP: add missing ``_NoValue`` annotations in ``_core.fromnumeric`` --- numpy/_core/fromnumeric.pyi | 348 ++++++++++++++++++------------------ 1 file changed, 177 insertions(+), 171 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 0c96ac8c0aa4..05816088f041 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -8,7 +8,9 @@ from typing import ( Protocol, SupportsIndex, TypeAlias, + TypedDict, TypeVar, + Unpack, overload, type_check_only, ) @@ -119,12 +121,21 @@ class _SupportsShape(Protocol[_ShapeT_co]): @property def shape(self, /) -> _ShapeT_co: ... +@type_check_only +class _UFuncKwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + order: _OrderKACF + subok: bool + signature: str | tuple[str | None, ...] + casting: _CastingKind + # a "sequence" that isn't a string, bytes, bytearray, or memoryview _T = TypeVar("_T") _PyArray: TypeAlias = list[_T] | tuple[_T, ...] # `int` also covers `bool` _PyScalar: TypeAlias = complex | bytes | str +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def take( a: _ArrayLike[_ScalarT], @@ -405,7 +416,7 @@ def argmax( axis: None = None, out: None = None, *, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... @overload def argmax( @@ -413,7 +424,7 @@ def argmax( axis: SupportsIndex | None = None, out: None = None, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload def argmax( @@ -421,7 +432,7 @@ def argmax( axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _BoolOrIntArrayT: ... @overload def argmax( @@ -429,7 +440,7 @@ def argmax( axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _BoolOrIntArrayT: ... @overload @@ -438,7 +449,7 @@ def argmin( axis: None = None, out: None = None, *, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... @overload def argmin( @@ -446,7 +457,7 @@ def argmin( axis: SupportsIndex | None = None, out: None = None, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload def argmin( @@ -454,7 +465,7 @@ def argmin( axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _BoolOrIntArrayT: ... @overload def argmin( @@ -462,9 +473,10 @@ def argmin( axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _BoolOrIntArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def searchsorted( a: ArrayLike, @@ -480,7 +492,7 @@ def searchsorted( sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> NDArray[intp]: ... -# +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def resize(a: _ArrayLike[_ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... @overload @@ -494,6 +506,7 @@ def resize(a: ArrayLike, new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dty @overload def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def squeeze( a: _ScalarT, @@ -627,69 +640,54 @@ def compress( out: _ArrayT, ) -> _ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def clip( a: _ScalarT, - a_min: ArrayLike | None, - a_max: ArrayLike | None, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., out: None = None, *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> _ScalarT: ... @overload def clip( a: _ScalarLike_co, - a_min: ArrayLike | None, - a_max: ArrayLike | None, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., out: None = None, *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... @overload def clip( a: _ArrayLike[_ScalarT], - a_min: ArrayLike | None, - a_max: ArrayLike | None, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., out: None = None, *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> NDArray[_ScalarT]: ... @overload def clip( a: ArrayLike, - a_min: ArrayLike | None, - a_max: ArrayLike | None, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., out: None = None, *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> NDArray[Any]: ... @overload def clip( @@ -698,30 +696,34 @@ def clip( a_max: ArrayLike | None, out: _ArrayT, *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: DTypeLike = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> _ArrayT: ... @overload def clip( a: ArrayLike, - a_min: ArrayLike | None, - a_max: ArrayLike | None, - out: ArrayLike = None, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: DTypeLike, - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + out: _ArrayT, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _ArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... @overload @@ -730,9 +732,9 @@ def sum( axis: None = None, dtype: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def sum( @@ -740,9 +742,9 @@ def sum( axis: None = None, dtype: None = None, out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT | NDArray[_ScalarT]: ... @overload def sum( @@ -750,9 +752,9 @@ def sum( axis: None, dtype: _DTypeLike[_ScalarT], out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def sum( @@ -761,9 +763,9 @@ def sum( *, dtype: _DTypeLike[_ScalarT], out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def sum( @@ -771,9 +773,9 @@ def sum( axis: _ShapeLike | None, dtype: _DTypeLike[_ScalarT], out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT | NDArray[_ScalarT]: ... @overload def sum( @@ -782,9 +784,9 @@ def sum( *, dtype: _DTypeLike[_ScalarT], out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT | NDArray[_ScalarT]: ... @overload def sum( @@ -792,9 +794,9 @@ def sum( axis: _ShapeLike | None = None, dtype: DTypeLike = None, out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def sum( @@ -802,9 +804,9 @@ def sum( axis: _ShapeLike | None, dtype: DTypeLike, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def sum( @@ -813,9 +815,9 @@ def sum( dtype: DTypeLike = None, *, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... # keep in sync with `any` @@ -1003,21 +1005,21 @@ def ptp( a: _ArrayLike[_ScalarT], axis: None = None, out: None = None, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., ) -> _ScalarT: ... @overload def ptp( a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload def ptp( a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... @overload def ptp( @@ -1025,7 +1027,7 @@ def ptp( axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1033,27 +1035,27 @@ def amax( a: _ArrayLike[_ScalarT], axis: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def amax( a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def amax( a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def amax( @@ -1061,9 +1063,9 @@ def amax( axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1071,27 +1073,27 @@ def amin( a: _ArrayLike[_ScalarT], axis: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def amin( a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def amin( a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def amin( @@ -1099,27 +1101,27 @@ def amin( axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... # TODO: `np.prod()``: For object arrays `initial` does not necessarily # have to be a numerical scalar. # The only requirement is that it is compatible # with the `.__mul__()` method(s) of the passed array's elements. - # Note that the same situation holds for all wrappers around # `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def prod( a: _ArrayLikeBool_co, axis: None = None, dtype: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> int_: ... @overload def prod( @@ -1127,9 +1129,9 @@ def prod( axis: None = None, dtype: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> uint64: ... @overload def prod( @@ -1137,9 +1139,9 @@ def prod( axis: None = None, dtype: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> int64: ... @overload def prod( @@ -1147,9 +1149,9 @@ def prod( axis: None = None, dtype: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> floating: ... @overload def prod( @@ -1157,9 +1159,9 @@ def prod( axis: None = None, dtype: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> complexfloating: ... @overload def prod( @@ -1167,9 +1169,9 @@ def prod( axis: _ShapeLike | None = None, dtype: None = None, out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def prod( @@ -1177,9 +1179,9 @@ def prod( axis: None, dtype: _DTypeLike[_ScalarT], out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def prod( @@ -1188,9 +1190,9 @@ def prod( *, dtype: _DTypeLike[_ScalarT], out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def prod( @@ -1198,9 +1200,9 @@ def prod( axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def prod( @@ -1208,9 +1210,9 @@ def prod( axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def prod( @@ -1219,11 +1221,12 @@ def prod( dtype: DTypeLike | None = None, *, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def cumprod( a: _ArrayLikeBool_co, @@ -1304,6 +1307,7 @@ def cumprod( out: _ArrayT, ) -> _ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def cumulative_prod( x: _ArrayLikeBool_co, @@ -1399,6 +1403,7 @@ def ndim(a: ArrayLike) -> int: ... def size(a: ArrayLike, axis: int | tuple[int, ...] | None = None) -> int: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def around( a: _BoolLike_co, @@ -1449,6 +1454,7 @@ def around( out: _ArrayT, ) -> _ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def mean( a: _ArrayLikeFloat_co, @@ -1567,7 +1573,7 @@ def std( dtype: None = None, out: None = None, ddof: float = 0, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _NoValueType = ..., @@ -1580,7 +1586,7 @@ def std( dtype: None = None, out: None = None, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1593,7 +1599,7 @@ def std( dtype: _DTypeLike[_ScalarT], out: None = None, ddof: float = 0, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1607,7 +1613,7 @@ def std( dtype: _DTypeLike[_ScalarT], out: None = None, ddof: float = 0, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -1619,7 +1625,7 @@ def std( dtype: DTypeLike = None, out: None = None, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1632,7 +1638,7 @@ def std( dtype: DTypeLike, out: _ArrayT, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1646,7 +1652,7 @@ def std( *, out: _ArrayT, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -1659,7 +1665,7 @@ def var( dtype: None = None, out: None = None, ddof: float = 0, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _NoValueType = ..., @@ -1672,7 +1678,7 @@ def var( dtype: None = None, out: None = None, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1685,7 +1691,7 @@ def var( dtype: _DTypeLike[_ScalarT], out: None = None, ddof: float = 0, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1699,7 +1705,7 @@ def var( dtype: _DTypeLike[_ScalarT], out: None = None, ddof: float = 0, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -1711,7 +1717,7 @@ def var( dtype: DTypeLike = None, out: None = None, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1724,7 +1730,7 @@ def var( dtype: DTypeLike, out: _ArrayT, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1738,7 +1744,7 @@ def var( *, out: _ArrayT, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., From bde097d548a1dddcd21084400a8fdddd6c0683d2 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Wed, 27 Aug 2025 14:17:41 -0400 Subject: [PATCH 0373/1018] TST: Replace xunit setup with methods (#29628) * TST: Replace xunit setup with methods * TST: Remove markers, err_status fixture to autouse * TST: err_status fixture scope, moved vars --- numpy/lib/tests/test_recfunctions.py | 84 ++++------ numpy/ma/tests/test_core.py | 235 ++++++++++++++------------- 2 files changed, 159 insertions(+), 160 deletions(-) diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index 72377b8f7c35..3991f92b16a3 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -1,4 +1,3 @@ - import numpy as np import numpy.ma as ma from numpy.lib.recfunctions import ( @@ -31,19 +30,14 @@ class TestRecFunctions: # Misc tests - - def setup_method(self): + def test_zip_descr(self): + # Test zip_descr x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - - def test_zip_descr(self): - # Test zip_descr - (w, x, y, z) = self.data # Std array test = zip_descr((x, x), flatten=True) @@ -448,7 +442,7 @@ def test_masked_flexible(self): class TestMergeArrays: # Test merge_arrays - def setup_method(self): + def _create_arrays(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( @@ -456,11 +450,11 @@ def setup_method(self): w = np.array( [(1, (2, 3.0, ())), (4, (5, 6.0, ()))], dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])]) - self.data = (w, x, y, z) + return w, x, y, z def test_solo(self): # Test merge_arrays on a single array. - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() test = merge_arrays(x) control = np.array([(1,), (2,)], dtype=[('f0', int)]) @@ -475,7 +469,7 @@ def test_solo(self): def test_solo_w_flatten(self): # Test merge_arrays on a single array w & w/o flattening - w = self.data[0] + w = self._create_arrays()[0] test = merge_arrays(w, flatten=False) assert_equal(test, w) @@ -487,7 +481,7 @@ def test_solo_w_flatten(self): def test_standard(self): # Test standard & standard # Test merge arrays - (_, x, y, _) = self.data + _, x, y, _ = self._create_arrays() test = merge_arrays((x, y), usemask=False) control = np.array([(1, 10), (2, 20), (-1, 30)], dtype=[('f0', int), ('f1', int)]) @@ -502,7 +496,7 @@ def test_standard(self): def test_flatten(self): # Test standard & flexible - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() test = merge_arrays((x, z), flatten=True) control = np.array([(1, 'A', 1.), (2, 'B', 2.)], dtype=[('f0', int), ('A', '|S3'), ('B', float)]) @@ -516,7 +510,7 @@ def test_flatten(self): def test_flatten_wflexible(self): # Test flatten standard & nested - (w, x, _, _) = self.data + w, x, _, _ = self._create_arrays() test = merge_arrays((x, w), flatten=True) control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], dtype=[('f0', int), @@ -532,7 +526,7 @@ def test_flatten_wflexible(self): def test_wmasked_arrays(self): # Test merge_arrays masked arrays - (_, x, _, _) = self.data + x = self._create_arrays()[1] mx = ma.array([1, 2, 3], mask=[1, 0, 0]) test = merge_arrays((x, mx), usemask=True) control = ma.array([(1, 1), (2, 2), (-1, 3)], @@ -554,7 +548,7 @@ def test_w_singlefield(self): def test_w_shorter_flex(self): # Test merge_arrays w/ a shorter flexndarray. - z = self.data[-1] + z = self._create_arrays()[-1] # Fixme, this test looks incomplete and broken #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) @@ -567,7 +561,7 @@ def test_w_shorter_flex(self): dtype=[('A', '|S3'), ('B', float), ('C', int)]) def test_singlerecord(self): - (_, x, y, z) = self.data + _, x, y, z = self._create_arrays() test = merge_arrays((x[0], y[0], z[0]), usemask=False) control = np.array([(1, 10, ('A', 1))], dtype=[('f0', int), @@ -579,18 +573,18 @@ def test_singlerecord(self): class TestAppendFields: # Test append_fields - def setup_method(self): + def _create_arrays(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) + return w, x, y, z def test_append_single(self): # Test simple case - (_, x, _, _) = self.data + x = self._create_arrays()[1] test = append_fields(x, 'A', data=[10, 20, 30]) control = ma.array([(1, 10), (2, 20), (-1, 30)], mask=[(0, 0), (0, 0), (1, 0)], @@ -599,7 +593,7 @@ def test_append_single(self): def test_append_double(self): # Test simple case - (_, x, _, _) = self.data + x = self._create_arrays()[1] test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], @@ -608,7 +602,7 @@ def test_append_double(self): def test_append_on_flex(self): # Test append_fields on flexible type arrays - z = self.data[-1] + z = self._create_arrays()[-1] test = append_fields(z, 'C', data=[10, 20, 30]) control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], @@ -617,7 +611,7 @@ def test_append_on_flex(self): def test_append_on_nested(self): # Test append_fields on nested fields - w = self.data[0] + w = self._create_arrays()[0] test = append_fields(w, 'C', data=[10, 20, 30]) control = ma.array([(1, (2, 3.0), 10), (4, (5, 6.0), 20), @@ -632,18 +626,18 @@ def test_append_on_nested(self): class TestStackArrays: # Test stack_arrays - def setup_method(self): + def _create_arrays(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) + return w, x, y, z def test_solo(self): # Test stack_arrays on single arrays - (_, x, _, _) = self.data + x = self._create_arrays()[1] test = stack_arrays((x,)) assert_equal(test, x) assert_(test is x) @@ -654,7 +648,7 @@ def test_solo(self): def test_unnamed_fields(self): # Tests combinations of arrays w/o named fields - (_, x, y, _) = self.data + _, x, y, _ = self._create_arrays() test = stack_arrays((x, x), usemask=False) control = np.array([1, 2, 1, 2]) @@ -670,7 +664,7 @@ def test_unnamed_fields(self): def test_unnamed_and_named_fields(self): # Test combination of arrays w/ & w/o named fields - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() test = stack_arrays((x, z)) control = ma.array([(1, -1, -1), (2, -1, -1), @@ -702,7 +696,7 @@ def test_unnamed_and_named_fields(self): def test_matching_named_fields(self): # Test combination of arrays w/ matching field names - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) test = stack_arrays((z, zz)) @@ -730,7 +724,7 @@ def test_matching_named_fields(self): def test_defaults(self): # Test defaults: no exception raised if keys of defaults are not fields. - (_, _, _, z) = self.data + z = self._create_arrays()[-1] zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} @@ -802,18 +796,18 @@ def test_subdtype(self): class TestJoinBy: - def setup_method(self): - self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), + def _create_arrays(self): + a = np.array(list(zip(np.arange(10), np.arange(50, 60), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('c', int)]) - self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), + b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('d', int)]) + return a, b def test_inner_join(self): # Basic test of join_by - a, b = self.a, self.b - + a, b = self._create_arrays() test = join_by('a', a, b, jointype='inner') control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), @@ -823,8 +817,7 @@ def test_inner_join(self): assert_equal(test, control) def test_join(self): - a, b = self.a, self.b - + a, b = self._create_arrays() # Fixme, this test is broken #test = join_by(('a', 'b'), a, b) #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), @@ -833,7 +826,6 @@ def test_join(self): # dtype=[('a', int), ('b', int), # ('c', int), ('d', int)]) #assert_equal(test, control) - join_by(('a', 'b'), a, b) np.array([(5, 55, 105, 100), (6, 56, 106, 101), (7, 57, 107, 102), (8, 58, 108, 103), @@ -851,8 +843,7 @@ def test_join_subdtype(self): assert_equal(res, bar.view(ma.MaskedArray)) def test_outer_join(self): - a, b = self.a, self.b - + a, b = self._create_arrays() test = join_by(('a', 'b'), a, b, 'outer') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), @@ -879,8 +870,7 @@ def test_outer_join(self): assert_equal(test, control) def test_leftouter_join(self): - a, b = self.a, self.b - + a, b = self._create_arrays() test = join_by(('a', 'b'), a, b, 'leftouter') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), @@ -1029,19 +1019,17 @@ def test_two_keys_two_vars(self): assert_equal(test.dtype, control.dtype) assert_equal(test, control) + class TestAppendFieldsObj: """ Test append_fields with arrays containing objects """ # https://github.com/numpy/numpy/issues/2346 - def setup_method(self): - from datetime import date - self.data = {'obj': date(2000, 1, 1)} - def test_append_to_objects(self): "Test append_fields when the base array contains objects" - obj = self.data['obj'] + from datetime import date + obj = date(2000, 1, 1) x = np.array([(obj, 1.), (obj, 2.)], dtype=[('A', object), ('B', float)]) y = np.array([10, 20], dtype=int) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index bdef61dac3ba..fbebd9fccc37 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -152,12 +152,11 @@ WARNING_MESSAGE = ("setting an item on a masked array which has a shared " "mask will not copy") WARNING_MARK_SPEC = f"ignore:.*{WARNING_MESSAGE}:numpy.ma.core.MaskedArrayFutureWarning" - class TestMaskedArray: # Base test class for MaskedArrays. # message for warning filters - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) @@ -170,7 +169,7 @@ def setup_method(self): zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) + return x, y, a10, m1, m2, xm, ym, z, zm, xf def test_basicattributes(self): # Tests some basic array attributes. @@ -196,7 +195,7 @@ def test_basic0d(self): def test_basic1d(self): # Test of basic array creation and properties in 1 dimension. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, _, _, m1, _, xm, ym, z, zm, xf = self._create_data() assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_((xm - ym).filled(0).any()) @@ -214,7 +213,7 @@ def test_basic1d(self): def test_basic2d(self): # Test of basic array creation and properties in 2 dimensions. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, m1, _, xm, ym, _, _, xf = self._create_data() for s in [(4, 3), (6, 2)]: x = x.reshape(s) y = y.reshape(s) @@ -234,7 +233,7 @@ def test_basic2d(self): def test_concatenate_basic(self): # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() # basic concatenation assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) assert_equal(np.concatenate((x, y)), concatenate((x, y))) @@ -243,7 +242,7 @@ def test_concatenate_basic(self): def test_concatenate_alongaxis(self): # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, m1, m2, xm, ym, z, _, xf = self._create_data() # Concatenation along an axis s = (3, 4) x = x.reshape(s) @@ -367,7 +366,7 @@ def test_unknown_keyword_parameter(self): MaskedArray([1, 2, 3], maks=[0, 1, 0]) # `mask` is misspelled. def test_asarray(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xm = self._create_data()[5] xm.fill_value = -9999 xm._hardmask = True xmm = asarray(xm) @@ -1123,8 +1122,7 @@ def test_maskedarray_tofile_raises_notimplementederror(self): class TestMaskedArrayArithmetic: # Base test class for MaskedArrays. - - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) @@ -1137,16 +1135,18 @@ def setup_method(self): zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) - self.err_status = np.geterr() - np.seterr(divide='ignore', invalid='ignore') + return x, y, a10, m1, m2, xm, ym, z, zm, xf - def teardown_method(self): - np.seterr(**self.err_status) + @pytest.fixture(autouse=True, scope="class") + def err_status(self): + err = np.geterr() + np.seterr(divide='ignore', invalid='ignore') + yield err + np.seterr(**err) def test_basic_arithmetic(self): # Test of basic arithmetic. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, a10, _, _, xm, ym, _, _, xf = self._create_data() a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) assert_equal(a2d * a2d, a2d * a2dm) @@ -1256,7 +1256,7 @@ def test_scalar_arithmetic(self): def test_basic_ufuncs(self): # Test various functions such as sin, cos. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, z, zm, _ = self._create_data() assert_equal(np.cos(x), cos(xm)) assert_equal(np.cosh(x), cosh(xm)) assert_equal(np.sin(x), sin(xm)) @@ -1318,7 +1318,7 @@ def test_count_on_python_builtins(self): def test_minmax_func(self): # Tests minimum and maximum. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, _, _, _, _ = self._create_data() # max doesn't work if shaped xr = np.ravel(x) xmr = ravel(xm) @@ -1390,7 +1390,7 @@ def test_minmax_funcs_with_output(self): def test_minmax_methods(self): # Additional tests on max/min - (_, _, _, _, _, xm, _, _, _, _) = self.d + xm = self._create_data()[5] xm.shape = (xm.size,) assert_equal(xm.max(), 10) assert_(xm[0].max() is masked) @@ -1491,7 +1491,7 @@ def minmax_with_mask(arr, mask): def test_addsumprod(self): # Tests add, sum, product. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() assert_equal(np.add.reduce(x), add.reduce(x)) assert_equal(np.add.accumulate(x), add.accumulate(x)) assert_equal(4, sum(array(4), axis=0)) @@ -1612,7 +1612,7 @@ def test_noshink_on_creation(self): def test_mod(self): # Tests mod - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() assert_equal(mod(x, y), mod(xm, ym)) test = mod(ym, xm) assert_equal(test, np.mod(ym, xm)) @@ -2590,16 +2590,17 @@ def test_fillvalue_bytes_or_str(self): class TestUfuncs: # Test class for the application of ufuncs on MaskedArrays. - - def setup_method(self): + def _create_data(self): # Base data definition. - self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + return (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) - self.err_status = np.geterr() - np.seterr(divide='ignore', invalid='ignore') - def teardown_method(self): - np.seterr(**self.err_status) + @pytest.fixture(autouse=True, scope="class") + def err_status(self): + err = np.geterr() + np.seterr(divide='ignore', invalid='ignore') + yield err + np.seterr(**err) def test_testUfuncRegression(self): # Tests new ufuncs on MaskedArrays. @@ -2625,7 +2626,7 @@ def test_testUfuncRegression(self): except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(numpy.ma.core, f) - args = self.d[:uf.nin] + args = self._create_data()[:uf.nin] ur = uf(*args) mr = mf(*args) assert_equal(ur.filled(0), mr.filled(0), f) @@ -2633,7 +2634,7 @@ def test_testUfuncRegression(self): def test_reduce(self): # Tests reduce on MaskedArrays. - a = self.d[0] + a = self._create_data()[0] assert_(not alltrue(a, axis=0)) assert_(sometrue(a, axis=0)) assert_equal(sum(a[:3], axis=0), 0) @@ -2737,34 +2738,41 @@ def test_masked_array_underflow(self): X2 = X / 2.0 np.testing.assert_array_equal(X2, x / 2) + class TestMaskedArrayInPlaceArithmetic: # Test MaskedArray Arithmetic - - def setup_method(self): + def _create_intdata(self): x = arange(10) y = arange(10) xm = arange(10) xm[2] = masked - self.intdata = (x, y, xm) - self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) - self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - self.othertypes = [np.dtype(_).type for _ in self.othertypes] - self.uint8data = ( + return x, y, xm + + def _create_floatdata(self): + x, y, xm = self._create_intdata() + return x.astype(float), y.astype(float), xm.astype(float) + + def _create_otherdata(self): + o = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + othertypes = [np.dtype(_).type for _ in o] + x, y, xm = self._create_intdata() + uint8data = ( x.astype(np.uint8), y.astype(np.uint8), xm.astype(np.uint8) ) + return othertypes, uint8data def test_inplace_addition_scalar(self): # Test of inplace additions - (x, y, xm) = self.intdata + x, y, xm = self._create_intdata() xm[2] = masked x += 1 assert_equal(x, y + 1) xm += 1 assert_equal(xm, y + 1) - (x, _, xm) = self.floatdata + x, _, xm = self._create_floatdata() id1 = x.data.ctypes.data x += 1. assert_(id1 == x.data.ctypes.data) @@ -2772,7 +2780,7 @@ def test_inplace_addition_scalar(self): def test_inplace_addition_array(self): # Test of inplace additions - (x, y, xm) = self.intdata + x, y, xm = self._create_intdata() m = xm.mask a = arange(10, dtype=np.int16) a[-1] = masked @@ -2784,7 +2792,7 @@ def test_inplace_addition_array(self): def test_inplace_subtraction_scalar(self): # Test of inplace subtractions - (x, y, xm) = self.intdata + x, y, xm = self._create_intdata() x -= 1 assert_equal(x, y - 1) xm -= 1 @@ -2792,7 +2800,7 @@ def test_inplace_subtraction_scalar(self): def test_inplace_subtraction_array(self): # Test of inplace subtractions - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() m = xm.mask a = arange(10, dtype=float) a[-1] = masked @@ -2804,7 +2812,7 @@ def test_inplace_subtraction_array(self): def test_inplace_multiplication_scalar(self): # Test of inplace multiplication - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() x *= 2.0 assert_equal(x, y * 2) xm *= 2.0 @@ -2812,7 +2820,7 @@ def test_inplace_multiplication_scalar(self): def test_inplace_multiplication_array(self): # Test of inplace multiplication - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() m = xm.mask a = arange(10, dtype=float) a[-1] = masked @@ -2824,7 +2832,7 @@ def test_inplace_multiplication_array(self): def test_inplace_division_scalar_int(self): # Test of inplace division - (x, y, xm) = self.intdata + x, y, xm = self._create_intdata() x = arange(10) * 2 xm = arange(10) * 2 xm[2] = masked @@ -2835,7 +2843,7 @@ def test_inplace_division_scalar_int(self): def test_inplace_division_scalar_float(self): # Test of inplace division - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() x /= 2.0 assert_equal(x, y / 2.0) xm /= arange(10) @@ -2843,7 +2851,7 @@ def test_inplace_division_scalar_float(self): def test_inplace_division_array_float(self): # Test of inplace division - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() m = xm.mask a = arange(10, dtype=float) a[-1] = masked @@ -3020,10 +3028,11 @@ def test_datafriendly_mul_arrays(self): def test_inplace_addition_scalar_type(self): # Test of inplace additions - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) xm[2] = masked x += t(1) assert_equal(x, y + t(1)) @@ -3032,10 +3041,11 @@ def test_inplace_addition_scalar_type(self): def test_inplace_addition_array_type(self): # Test of inplace additions - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3047,10 +3057,11 @@ def test_inplace_addition_array_type(self): def test_inplace_subtraction_scalar_type(self): # Test of inplace subtractions - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) x -= t(1) assert_equal(x, y - t(1)) xm -= t(1) @@ -3058,10 +3069,11 @@ def test_inplace_subtraction_scalar_type(self): def test_inplace_subtraction_array_type(self): # Test of inplace subtractions - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3073,10 +3085,11 @@ def test_inplace_subtraction_array_type(self): def test_inplace_multiplication_scalar_type(self): # Test of inplace multiplication - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) x *= t(2) assert_equal(x, y * t(2)) xm *= t(2) @@ -3084,10 +3097,11 @@ def test_inplace_multiplication_scalar_type(self): def test_inplace_multiplication_array_type(self): # Test of inplace multiplication - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3100,11 +3114,12 @@ def test_inplace_multiplication_array_type(self): def test_inplace_floor_division_scalar_type(self): # Test of inplace division # Check for TypeError in case of unsupported types + othertypes, uint8data = self._create_otherdata() unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} - for t in self.othertypes: + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) xm[2] = masked @@ -3120,11 +3135,12 @@ def test_inplace_floor_division_scalar_type(self): def test_inplace_floor_division_array_type(self): # Test of inplace division # Check for TypeError in case of unsupported types + othertypes, uint8data = self._create_otherdata() unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} - for t in self.othertypes: + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3143,10 +3159,11 @@ def test_inplace_floor_division_array_type(self): def test_inplace_division_scalar_type(self): # Test of inplace division + othertypes, uint8data = self._create_otherdata() with warnings.catch_warnings(): warnings.simplefilter('error', DeprecationWarning) - for t in self.othertypes: - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + for t in othertypes: + x, y, xm = (_.astype(t) for _ in uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) xm[2] = masked @@ -3179,10 +3196,11 @@ def test_inplace_division_scalar_type(self): def test_inplace_division_array_type(self): # Test of inplace division + othertypes, uint8data = self._create_otherdata() with warnings.catch_warnings(): warnings.simplefilter('error', DeprecationWarning) - for t in self.othertypes: - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + for t in othertypes: + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3219,7 +3237,8 @@ def test_inplace_division_array_type(self): def test_inplace_pow_type(self): # Test keeping data w/ (inplace) power - for t in self.othertypes: + othertypes = self._create_otherdata()[0] + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") # Test pow on scalar @@ -3236,7 +3255,7 @@ def test_inplace_pow_type(self): class TestMaskedArrayMethods: # Test class for miscellaneous MaskedArrays methods. - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -3266,7 +3285,7 @@ def setup_method(self): m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX def test_generic_methods(self): # Tests some MaskedArray methods. @@ -3363,7 +3382,7 @@ def test_allany_oddities(self): def test_argmax_argmin(self): # Tests argmin & argmax on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, _, _, _, mx, mX, _, m2x, m2X, _ = self._create_data() assert_equal(mx.argmin(), 35) assert_equal(mX.argmin(), 35) @@ -4015,8 +4034,7 @@ def test_diagonal_view(self): class TestMaskedArrayMathMethods: - - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -4046,11 +4064,11 @@ def setup_method(self): m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX def test_cumsumprod(self): # Tests cumsum & cumprod on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mX = self._create_data()[5] mXcp = mX.cumsum(0) assert_equal(mXcp._data, mX.filled(0).cumsum(0)) mXcp = mX.cumsum(1) @@ -4084,7 +4102,7 @@ def test_cumsumprod_with_output(self): def test_ptp(self): # Tests ptp on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, _, m, mx, mX, _, _, _, _ = self._create_data() (n, m) = X.shape assert_equal(mx.ptp(), np.ptp(mx.compressed())) rows = np.zeros(n, float) @@ -4148,7 +4166,7 @@ def test_anom(self): def test_trace(self): # Tests trace on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, _, _, _, mX, _, _, _, _ = self._create_data() mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_almost_equal(mX.trace(), @@ -4163,7 +4181,7 @@ def test_trace(self): def test_dot(self): # Tests dot on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, _, _, _, mx, mX, mXX, _, _, _ = self._create_data() fx = mx.filled(0) r = mx.dot(mx) assert_almost_equal(r.filled(0), fx.dot(fx)) @@ -4212,7 +4230,7 @@ def test_varmean_nomask(self): def test_varstd(self): # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, XX, _, _, mX, mXX, _, _, _ = self._create_data() assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_almost_equal(mX.std(axis=None, ddof=1), @@ -4363,7 +4381,7 @@ def test_diff_with_n_0(self): class TestMaskedArrayMathMethodsComplex: # Test class for miscellaneous MaskedArrays methods. - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -4393,11 +4411,11 @@ def setup_method(self): m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX def test_varstd(self): # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, XX, _, _, mX, mXX, _, _, _ = self._create_data() assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) @@ -4416,17 +4434,6 @@ def test_varstd(self): class TestMaskedArrayFunctions: # Test class for miscellaneous functions. - - def setup_method(self): - x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - xm.set_fill_value(1e+20) - self.info = (xm, ym) - def test_masked_where_bool(self): x = [1, 2] y = masked_where(False, x) @@ -5131,8 +5138,7 @@ def test_convolve(self): class TestMaskedFields: - - def setup_method(self): + def _create_data(self): ilist = [1, 2, 3, 4, 5] flist = [1.1, 2.2, 3.3, 4.4, 5.5] slist = ['one', 'two', 'three', 'four', 'five'] @@ -5140,11 +5146,12 @@ def setup_method(self): mdtype = [('a', bool), ('b', bool), ('c', bool)] mask = [0, 1, 0, 0, 1] base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) - self.data = {"base": base, "mask": mask, "ddtype": ddtype, "mdtype": mdtype} + return {"base": base, "mask": mask, "ddtype": ddtype, "mdtype": mdtype} def test_set_records_masks(self): - base = self.data['base'] - mdtype = self.data['mdtype'] + data = self._create_data() + base = data['base'] + mdtype = data['mdtype'] # Set w/ nomask or masked base.mask = nomask assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) @@ -5163,7 +5170,7 @@ def test_set_records_masks(self): def test_set_record_element(self): # Check setting an element of a record) - base = self.data['base'] + base = self._create_data()['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[0] = (pi, pi, 'pi') @@ -5178,7 +5185,7 @@ def test_set_record_element(self): [b'pi', b'two', b'three', b'four', b'five']) def test_set_record_slice(self): - base = self.data['base'] + base = self._create_data()['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[:3] = (pi, pi, 'pi') @@ -5194,7 +5201,7 @@ def test_set_record_slice(self): def test_mask_element(self): "Check record access" - base = self.data['base'] + base = self._create_data()['base'] base[0] = masked for n in ('a', 'b', 'c'): @@ -5287,9 +5294,10 @@ def test_setitem_scalar(self): assert_array_equal(arr.mask, [True, False, False]) def test_element_len(self): + data = self._create_data() # check that len() works for mvoid (Github issue #576) - for rec in self.data['base']: - assert_equal(len(rec), len(self.data['ddtype'])) + for rec in data['base']: + assert_equal(len(rec), len(data['ddtype'])) class TestMaskedObjectArray: @@ -5341,31 +5349,30 @@ def test_nested_ma(self): class TestMaskedView: - - def setup_method(self): + def _create_data(self): iterator = list(zip(np.arange(10), np.random.rand(10))) data = np.array(iterator) a = array(iterator, dtype=[('a', float), ('b', float)]) a.mask[0] = (1, 0) controlmask = np.array([1] + 19 * [0], dtype=bool) - self.data = (data, a, controlmask) + return data, a, controlmask def test_view_to_nothing(self): - (data, a, controlmask) = self.data + a = self._create_data()[1] test = a.view() assert_(isinstance(test, MaskedArray)) assert_equal(test._data, a._data) assert_equal(test._mask, a._mask) def test_view_to_type(self): - (data, a, controlmask) = self.data + data, a, _ = self._create_data() test = a.view(np.ndarray) assert_(not isinstance(test, MaskedArray)) assert_equal(test, a._data) assert_equal_records(test, data.view(a.dtype).squeeze()) def test_view_to_simple_dtype(self): - (data, a, controlmask) = self.data + data, a, controlmask = self._create_data() # View globally test = a.view(float) assert_(isinstance(test, MaskedArray)) @@ -5373,7 +5380,7 @@ def test_view_to_simple_dtype(self): assert_equal(test.mask, controlmask) def test_view_to_flexible_dtype(self): - (data, a, controlmask) = self.data + a = self._create_data()[1] test = a.view([('A', float), ('B', float)]) assert_equal(test.mask.dtype.names, ('A', 'B')) @@ -5393,7 +5400,7 @@ def test_view_to_flexible_dtype(self): assert_equal(test['B'], a['b'][-1]) def test_view_to_subdtype(self): - (data, a, controlmask) = self.data + data, a, controlmask = self._create_data() # View globally test = a.view((float, 2)) assert_(isinstance(test, MaskedArray)) @@ -5410,7 +5417,7 @@ def test_view_to_subdtype(self): assert_equal(test, data[-1]) def test_view_to_dtype_and_type(self): - (data, a, controlmask) = self.data + data, a, _ = self._create_data() test = a.view((float, 2), np.recarray) assert_equal(test, data) @@ -5643,6 +5650,7 @@ def test_masked_array(): a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) assert_equal(np.argwhere(a), [[1], [3]]) + def test_masked_array_no_copy(): # check nomask array is updated in place a = np.ma.array([1, 2, 3, 4]) @@ -5657,6 +5665,7 @@ def test_masked_array_no_copy(): _ = np.ma.masked_invalid(a, copy=False) assert_array_equal(a.mask, [True, False, False, False, False]) + def test_append_masked_array(): a = np.ma.masked_equal([1, 2, 3], value=2) b = np.ma.masked_equal([4, 3, 2], value=2) @@ -5695,6 +5704,7 @@ def test_append_masked_array_along_axis(): assert_array_equal(result.data, expected.data) assert_array_equal(result.mask, expected.mask) + def test_default_fill_value_complex(): # regression test for Python 3, where 'unicode' was not defined assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) @@ -5864,6 +5874,7 @@ def test_mask_shape_assignment_does_not_break_masked(): b.shape = (1,) assert_equal(a.mask.shape, ()) + @pytest.mark.skipif(sys.flags.optimize > 1, reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") # noqa: E501 def test_doc_note(): From a1098a8279fb960f53c8f3cfdaa3b0fba5203a6b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Aug 2025 23:14:41 +0000 Subject: [PATCH 0374/1018] MAINT: Bump actions/dependency-review-action from 4.7.2 to 4.7.3 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.7.2 to 4.7.3. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/bc41886e18ea39df68b1b1245f4184881938e050...595b5aeba73380359d98a5e087f648dbb0edce1b) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.7.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 248b69a0d939..be1c6f7c4bb1 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@bc41886e18ea39df68b1b1245f4184881938e050 # v4.7.2 + uses: actions/dependency-review-action@595b5aeba73380359d98a5e087f648dbb0edce1b # v4.7.3 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 From 447a903b95f885760cf8833f2787f016a5dd1b30 Mon Sep 17 00:00:00 2001 From: AnkitAhlawat Date: Thu, 28 Aug 2025 22:34:16 +0530 Subject: [PATCH 0375/1018] BUG: Fix np.unique with axis=0 and 1D input not collapsing NaNs with equal_nan=True #29336 (#29372) * BUG: Fix np.unique with axis=0 and 1D input not collapsing NaNs with equal_nan=True * Update test_arraysetops.py corrected white space * MAINT: Fix lint issue (W293) in test_arraysetops.py Review comment implementation * Fix:found failure with review comments so revert back the changes * Review commets implemented * doc test update * Apply suggestions from code review * Apply suggestions from code review --------- Co-authored-by: Sebastian Berg --- numpy/lib/_arraysetops_impl.py | 5 ++++- numpy/lib/tests/test_arraysetops.py | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index c4788385b924..f85a3d55aae9 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -21,6 +21,7 @@ import numpy as np from numpy._core import overrides from numpy._core._multiarray_umath import _array_converter, _unique_hash +from numpy.lib.array_utils import normalize_axis_index array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -290,7 +291,9 @@ def unique(ar, return_index=False, return_inverse=False, """ ar = np.asanyarray(ar) - if axis is None: + if axis is None or ar.ndim == 1: + if axis is not None: + normalize_axis_index(axis, ar.ndim) ret = _unique1d(ar, return_index, return_inverse, return_counts, equal_nan=equal_nan, inverse_shape=ar.shape, axis=None, sorted=sorted) diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index b3e2bfa279b0..86d93a569d98 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -1256,3 +1256,21 @@ def test_unique_with_matrix(self, data, transpose, dtype): u = np.unique(mat) expected = np.unique(np.asarray(mat)) assert_array_equal(u, expected, strict=True) + + def test_unique_axis0_equal_nan_on_1d_array(self): + # Test Issue #29336 + arr1d = np.array([np.nan, 0, 0, np.nan]) + expected = np.array([0., np.nan]) + result = np.unique(arr1d, axis=0, equal_nan=True) + assert_array_equal(result, expected) + + def test_unique_axis_minus1_eq_on_1d_array(self): + arr1d = np.array([np.nan, 0, 0, np.nan]) + expected = np.array([0., np.nan]) + result = np.unique(arr1d, axis=-1, equal_nan=True) + assert_array_equal(result, expected) + + def test_unique_axis_float_raises_typeerror(self): + arr1d = np.array([np.nan, 0, 0, np.nan]) + with pytest.raises(TypeError, match="integer argument expected"): + np.unique(arr1d, axis=0.0, equal_nan=False) From 2a04da3a996fe389127342f3b37a13b669e6c7ff Mon Sep 17 00:00:00 2001 From: Christian Bourjau Date: Sun, 31 Aug 2025 22:16:51 +0200 Subject: [PATCH 0376/1018] Update basics.strings.rst --- doc/source/user/basics.strings.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/basics.strings.rst b/doc/source/user/basics.strings.rst index 460bc1fe589f..cbbaa8f6e3b3 100644 --- a/doc/source/user/basics.strings.rst +++ b/doc/source/user/basics.strings.rst @@ -109,7 +109,7 @@ that empty strings are used to populate empty arrays: >>> np.empty(3, dtype=StringDType()) array(['', '', ''], dtype=StringDType()) -Optionally, you can pass create an instance of ``StringDType`` with +Optionally, you can create an instance of ``StringDType`` with support for missing values by passing ``na_object`` as a keyword argument for the initializer: From 17632fa4472e51e24037a19650a6508452667183 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Mon, 1 Sep 2025 10:36:30 -0400 Subject: [PATCH 0377/1018] TST: Replace xunit setup with methods (#29641) * TST: Replace xunit setup with methods * TST: Replace xunit setup with methods * STY: Reverted spacing --- numpy/ma/tests/test_extras.py | 35 +++++++-------- numpy/ma/tests/test_mrecords.py | 12 +++--- numpy/ma/tests/test_old_ma.py | 47 +++++++++++---------- numpy/ma/tests/test_subclassing.py | 20 ++++----- numpy/matrixlib/tests/test_masked_matrix.py | 12 +++--- 5 files changed, 64 insertions(+), 62 deletions(-) diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index f9deb33fc2c5..230c9d211f19 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -1314,11 +1314,11 @@ def test_object(self): class TestCov: - def setup_method(self): - self.data = array(np.random.rand(12)) + def _create_data(self): + return array(np.random.rand(12)) def test_covhelper(self): - x = self.data + x = self._create_data() # Test not mask output type is a float. assert_(_covhelper(x, rowvar=True)[1].dtype, np.float32) assert_(_covhelper(x, y=x, rowvar=False)[1].dtype, np.float32) @@ -1339,7 +1339,7 @@ def test_covhelper(self): def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values - x = self.data + x = self._create_data() assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), @@ -1347,7 +1347,7 @@ def test_1d_without_missing(self): def test_2d_without_missing(self): # Test cov on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) + x = self._create_data().reshape(3, 4) assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), @@ -1355,7 +1355,7 @@ def test_2d_without_missing(self): def test_1d_with_missing(self): # Test cov 1 1D variable w/missing values - x = self.data + x = self._create_data() x[-1] = masked x -= x.mean() nx = x.compressed() @@ -1379,7 +1379,7 @@ def test_1d_with_missing(self): def test_2d_with_missing(self): # Test cov on 2D variable w/ missing value - x = self.data + x = self._create_data() x[-1] = masked x = x.reshape(3, 4) valid = np.logical_not(getmaskarray(x)).astype(int) @@ -1401,13 +1401,14 @@ def test_2d_with_missing(self): class TestCorrcoef: - def setup_method(self): - self.data = array(np.random.rand(12)) - self.data2 = array(np.random.rand(12)) + def _create_data(self): + data = array(np.random.rand(12)) + data2 = array(np.random.rand(12)) + return data, data2 def test_ddof(self): # ddof raises DeprecationWarning - x, y = self.data, self.data2 + x, y = self._create_data() expected = np.corrcoef(x) expected2 = np.corrcoef(x, y) with pytest.warns(DeprecationWarning): @@ -1425,7 +1426,7 @@ def test_ddof(self): assert_almost_equal(corrcoef(x, y, ddof=3), expected2) def test_bias(self): - x, y = self.data, self.data2 + x, y = self._create_data() expected = np.corrcoef(x) # bias raises DeprecationWarning with pytest.warns(DeprecationWarning): @@ -1443,7 +1444,7 @@ def test_bias(self): def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values - x = self.data + x = self._create_data()[0] assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) @@ -1455,7 +1456,7 @@ def test_1d_without_missing(self): def test_2d_without_missing(self): # Test corrcoef on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) + x = self._create_data()[0].reshape(3, 4) assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) @@ -1467,7 +1468,7 @@ def test_2d_without_missing(self): def test_1d_with_missing(self): # Test corrcoef 1 1D variable w/missing values - x = self.data + x = self._create_data()[0] x[-1] = masked x -= x.mean() nx = x.compressed() @@ -1499,7 +1500,7 @@ def test_1d_with_missing(self): def test_2d_with_missing(self): # Test corrcoef on 2D variable w/ missing value - x = self.data + x = self._create_data()[0] x[-1] = masked x = x.reshape(3, 4) @@ -1519,7 +1520,7 @@ def test_2d_with_missing(self): class TestPolynomial: - # + def test_polyfit(self): # Tests polyfit # On ndarrays diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index e0b0db24904c..b4070df0f9a3 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -350,24 +350,24 @@ def test_exotic_formats(self): class TestView: - def setup_method(self): - (a, b) = (np.arange(10), np.random.rand(10)) + def _create_data(self): + a, b = (np.arange(10), np.random.rand(10)) ndtype = [('a', float), ('b', float)] arr = np.array(list(zip(a, b)), dtype=ndtype) mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.)) mrec.mask[3] = (False, True) - self.data = (mrec, a, b, arr) + return mrec, a, b, arr def test_view_by_itself(self): - (mrec, a, b, arr) = self.data + mrec = self._create_data()[0] test = mrec.view() assert_(isinstance(test, MaskedRecords)) assert_equal_records(test, mrec) assert_equal_records(test._mask, mrec._mask) def test_view_simple_dtype(self): - (mrec, a, b, arr) = self.data + mrec, a, b, _ = self._create_data() ntype = (float, 2) test = mrec.view(ntype) assert_(isinstance(test, ma.MaskedArray)) @@ -375,7 +375,7 @@ def test_view_simple_dtype(self): assert_(test[3, 1] is ma.masked) def test_view_flexible_type(self): - (mrec, a, b, arr) = self.data + mrec, _, _, arr = self._create_data() alttype = [('A', float), ('B', float)] test = mrec.view(alttype) assert_(isinstance(test, MaskedRecords)) diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index f5d6d3ec27b9..fcf02fa2dccb 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -97,7 +97,7 @@ def eq(v, w, msg=''): class TestMa: - def setup_method(self): + def _create_data(self): x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. @@ -110,11 +110,11 @@ def setup_method(self): xf = np.where(m1, 1e+20, x) s = x.shape xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) + return x, y, a10, m1, m2, xm, ym, z, zm, xf, s def test_testBasic1d(self): # Test of basic array creation and properties in 1 dimension. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, _, _, m1, _, xm, _, _, _, xf, s = self._create_data() assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_equal(shape(xm), s) @@ -129,7 +129,7 @@ def test_testBasic1d(self): @pytest.mark.parametrize("s", [(4, 3), (6, 2)]) def test_testBasic2d(self, s): # Test of basic array creation and properties in 2 dimensions. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, m1, _, xm, ym, _, _, xf, s = self._create_data() x.shape = s y.shape = s xm.shape = s @@ -148,7 +148,7 @@ def test_testBasic2d(self, s): def test_testArithmetic(self): # Test of basic arithmetic. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, a10, _, _, xm, ym, _, _, xf, s = self._create_data() a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) assert_(eq(a2d * a2d, a2d * a2dm)) @@ -192,7 +192,7 @@ def test_testMixedArithmetic(self): def test_testUfuncs1(self): # Test various functions such as sin, cos. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, _, _, xm, ym, z, zm, _, _ = self._create_data() assert_(eq(np.cos(x), cos(xm))) assert_(eq(np.cosh(x), cosh(xm))) assert_(eq(np.sin(x), sin(xm))) @@ -238,7 +238,7 @@ def test_xtestCount(self): def test_testMinMax(self): # Test minimum and maximum. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, _, _, _, _, xm, _, _, _, _, _ = self._create_data() xr = np.ravel(x) # max doesn't work if shaped xmr = ravel(xm) @@ -248,7 +248,7 @@ def test_testMinMax(self): def test_testAddSumProd(self): # Test add, sum, product. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, _, _, xm, ym, _, _, _, s = self._create_data() assert_(eq(np.add.reduce(x), add.reduce(x))) assert_(eq(np.add.accumulate(x), add.accumulate(x))) assert_(eq(4, sum(array(4), axis=0))) @@ -417,7 +417,7 @@ def test_testPut2(self): assert_(eq(x, [0, 1, 10, 40, 4])) def test_testMaPut(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + _, _, _, _, _, _, ym, _, zm, _, _ = self._create_data() m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] i = np.nonzero(m)[0] put(ym, i, zm) @@ -777,8 +777,9 @@ def test_assignment_by_condition_2(self): class TestUfuncs: - def setup_method(self): - self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + + def _create_data(self): + return (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) def test_testUfuncRegression(self): @@ -807,7 +808,7 @@ def test_testUfuncRegression(self): except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(np.ma, f) - args = self.d[:uf.nin] + args = self._create_data()[:uf.nin] with np.errstate(): if f in f_invalid_ignore: np.seterr(invalid='ignore') @@ -819,7 +820,7 @@ def test_testUfuncRegression(self): assert_(eqmask(ur.mask, mr.mask)) def test_reduce(self): - a = self.d[0] + a = self._create_data()[0] assert_(not alltrue(a, axis=0)) assert_(sometrue(a, axis=0)) assert_equal(sum(a[:3], axis=0), 0) @@ -843,7 +844,7 @@ def test_nonzero(self): class TestArrayMethods: - def setup_method(self): + def _create_data(self): x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, @@ -863,10 +864,10 @@ def setup_method(self): mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX) + return x, X, XX, m, mx, mX, mXX def test_trace(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, X, _, _, _, mX, _ = self._create_data() mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_(eq(mX.trace(), @@ -874,15 +875,15 @@ def test_trace(self): axis=0))) def test_clip(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + x, _, _, _, mx, _, _ = self._create_data() clipped = mx.clip(2, 8) assert_(eq(clipped.mask, mx.mask)) assert_(eq(clipped._data, x.clip(2, 8))) assert_(eq(clipped._data, mx._data.clip(2, 8))) def test_ptp(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - (n, m) = X.shape + _, X, _, m, mx, mX, _ = self._create_data() + n, m = X.shape # print(type(mx), mx.compressed()) # raise Exception() assert_equal(mx.ptp(), np.ptp(mx.compressed())) @@ -896,28 +897,28 @@ def test_ptp(self): assert_(eq(mX.ptp(1), rows)) def test_swapaxes(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, _, _, _, _, mX, mXX = self._create_data() mXswapped = mX.swapaxes(0, 1) assert_(eq(mXswapped[-1], mX[:, -1])) mXXswapped = mXX.swapaxes(0, 2) assert_equal(mXXswapped.shape, (2, 2, 3, 3)) def test_cumprod(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + mX = self._create_data()[5] mXcp = mX.cumprod(0) assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) mXcp = mX.cumprod(1) assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) def test_cumsum(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + mX = self._create_data()[5] mXcp = mX.cumsum(0) assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) mXcp = mX.cumsum(1) assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) def test_varstd(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, X, XX, _, _, mX, mXX = self._create_data() assert_(eq(mX.var(axis=None), mX.compressed().var())) assert_(eq(mX.std(axis=None), mX.compressed().std())) assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py index 3364e563097e..22bece987cb7 100644 --- a/numpy/ma/tests/test_subclassing.py +++ b/numpy/ma/tests/test_subclassing.py @@ -188,10 +188,10 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): class TestSubclassing: # Test suite for masked subclasses of ndarray. - def setup_method(self): + def _create_data(self): x = np.arange(5, dtype='float') mx = msubarray(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) + return x, mx def test_data_subclassing(self): # Tests whether the subclass is kept. @@ -205,19 +205,19 @@ def test_data_subclassing(self): def test_maskedarray_subclassing(self): # Tests subclassing MaskedArray - (x, mx) = self.data + mx = self._create_data()[1] assert_(isinstance(mx._data, subarray)) def test_masked_unary_operations(self): # Tests masked_unary_operation - (x, mx) = self.data + x, mx = self._create_data() with np.errstate(divide='ignore'): assert_(isinstance(log(mx), msubarray)) assert_equal(log(x), np.log(x)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() # Result should be a msubarray assert_(isinstance(add(mx, mx), msubarray)) assert_(isinstance(add(mx, x), msubarray)) @@ -230,7 +230,7 @@ def test_masked_binary_operations(self): def test_masked_binary_operations2(self): # Tests domained_masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() xmx = masked_array(mx.data.__array__(), mask=mx.mask) assert_(isinstance(divide(mx, mx), msubarray)) assert_(isinstance(divide(mx, x), msubarray)) @@ -427,20 +427,20 @@ def test_array_no_inheritance(): class TestClassWrapping: # Test suite for classes that wrap MaskedArrays - def setup_method(self): + def _create_data(self): m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) wm = WrappedArray(m) - self.data = (m, wm) + return m, wm def test_masked_unary_operations(self): # Tests masked_unary_operation - (m, wm) = self.data + wm = self._create_data()[1] with np.errstate(divide='ignore'): assert_(isinstance(np.log(wm), WrappedArray)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (m, wm) = self.data + m, wm = self._create_data() # Result should be a WrappedArray assert_(isinstance(np.add(wm, wm), WrappedArray)) assert_(isinstance(np.add(m, wm), WrappedArray)) diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py index 3f9414ff7d30..ee3dc96b9ac5 100644 --- a/numpy/matrixlib/tests/test_masked_matrix.py +++ b/numpy/matrixlib/tests/test_masked_matrix.py @@ -182,26 +182,26 @@ def test_view(self): class TestSubclassing: # Test suite for masked subclasses of ndarray. - def setup_method(self): + def _create_data(self): x = np.arange(5, dtype='float') mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) + return x, mx def test_maskedarray_subclassing(self): # Tests subclassing MaskedArray - (x, mx) = self.data + mx = self._create_data()[1] assert_(isinstance(mx._data, np.matrix)) def test_masked_unary_operations(self): # Tests masked_unary_operation - (x, mx) = self.data + x, mx = self._create_data() with np.errstate(divide='ignore'): assert_(isinstance(log(mx), MMatrix)) assert_equal(log(x), np.log(x)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() # Result should be a MMatrix assert_(isinstance(add(mx, mx), MMatrix)) assert_(isinstance(add(mx, x), MMatrix)) @@ -215,7 +215,7 @@ def test_masked_binary_operations(self): def test_masked_binary_operations2(self): # Tests domained_masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() xmx = masked_array(mx.data.__array__(), mask=mx.mask) assert_(isinstance(divide(mx, mx), MMatrix)) assert_(isinstance(divide(mx, x), MMatrix)) From 263bb871bffec2b99a6f4ecc2b03743d797ad3e2 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 1 Sep 2025 11:33:20 -0600 Subject: [PATCH 0378/1018] BUG: avoid thread-unsafe refcount check in temp elision --- numpy/_core/src/common/pythoncapi-compat | 2 +- numpy/_core/src/multiarray/temp_elide.c | 13 +++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 0f1d42a10a3f..90c06a4cae55 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 0f1d42a10a3f594ad48894912396df31b2c2d55d +Subproject commit 90c06a4cae557bdbfa4f231a781d2b5c1a8f6d1c diff --git a/numpy/_core/src/multiarray/temp_elide.c b/numpy/_core/src/multiarray/temp_elide.c index 9236476c4213..dbd469b05654 100644 --- a/numpy/_core/src/multiarray/temp_elide.c +++ b/numpy/_core/src/multiarray/temp_elide.c @@ -5,6 +5,7 @@ #include #include "npy_config.h" +#include "npy_pycompat.h" #include "numpy/arrayobject.h" #define NPY_NUMBER_MAX(a, b) ((a) > (b) ? (a) : (b)) @@ -115,10 +116,14 @@ check_unique_temporary(PyObject *lhs) #if PY_VERSION_HEX == 0x030E00A7 && !defined(PYPY_VERSION) #error "NumPy is broken on CPython 3.14.0a7, please update to a newer version" #elif PY_VERSION_HEX >= 0x030E00B1 && !defined(PYPY_VERSION) + // Python 3.14 changed the semantics for reference counting temporaries // see https://github.com/python/cpython/issues/133164 return PyUnstable_Object_IsUniqueReferencedTemporary(lhs); #else - return 1; + // equivalent to Py_REFCNT(lhs) == 1 except on 3.13t + // we need to use the backport on 3.13t because + // this function was first exposed in 3.14 + return PyUnstable_Object_IsUniquelyReferenced(lhs); #endif } @@ -303,13 +308,13 @@ can_elide_temp(PyObject *olhs, PyObject *orhs, int *cannot) * array of a basic type, own its data and size larger than threshold */ PyArrayObject *alhs = (PyArrayObject *)olhs; - if (Py_REFCNT(olhs) != 1 || !PyArray_CheckExact(olhs) || + if (!check_unique_temporary(olhs) || + !PyArray_CheckExact(olhs) || !PyArray_ISNUMBER(alhs) || !PyArray_CHKFLAGS(alhs, NPY_ARRAY_OWNDATA) || !PyArray_ISWRITEABLE(alhs) || PyArray_CHKFLAGS(alhs, NPY_ARRAY_WRITEBACKIFCOPY) || - PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES || - !check_unique_temporary(olhs)) { + PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES) { return 0; } if (PyArray_CheckExact(orhs) || From a4eebb1c2b3711ea0a06687ab2222117545adb6f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Sep 2025 07:49:33 +0000 Subject: [PATCH 0379/1018] MAINT: Bump github/codeql-action from 3.29.11 to 3.30.0 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.11 to 3.30.0. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/3c3833e0f8c1c83d449a7478aa59c036a9165498...2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 68aead6651e6..ecdf8cfeb85e 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 + uses: github/codeql-action/init@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3.30.0 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 + uses: github/codeql-action/autobuild@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3.30.0 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 + uses: github/codeql-action/analyze@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3.30.0 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index d61c6f1dd815..19acbd619437 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v2.1.27 + uses: github/codeql-action/upload-sarif@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v2.1.27 with: sarif_file: results.sarif From b0403816009ee50eecbb91a66acf75a446623b8f Mon Sep 17 00:00:00 2001 From: Jonathan Reimer <41432658+jonathimer@users.noreply.github.com> Date: Tue, 2 Sep 2025 14:48:34 +0200 Subject: [PATCH 0380/1018] Add Linux Foundation Health Badge to README --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index b2d3cffc8978..7bf1e13346ce 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,7 @@ https://anaconda.org/conda-forge/numpy) https://stackoverflow.com/questions/tagged/numpy) [![Nature Paper](https://img.shields.io/badge/DOI-10.1038%2Fs41586--020--2649--2-blue)]( https://doi.org/10.1038/s41586-020-2649-2) +[![LFX Health Score](https://insights.linuxfoundation.org/api/badge/health-score?project=numpy)](https://insights.linuxfoundation.org/project/numpy) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/numpy/numpy/badge)](https://securityscorecards.dev/viewer/?uri=github.com/numpy/numpy) [![Typing](https://img.shields.io/pypi/types/numpy)](https://pypi.org/project/numpy/) From 29caecb9d4761938aa80f9b1f01fe3b2e77a6044 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 2 Sep 2025 11:40:43 -0600 Subject: [PATCH 0381/1018] TST: delete global env_setup fixture (#29648) delete global env_setup fixture that previously set `PYTHONHASHSEED` presumably for historical reasons (when dict order was random). --- numpy/conftest.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/numpy/conftest.py b/numpy/conftest.py index 1454b4af59de..f411588748e0 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -147,10 +147,6 @@ def check_fpu_mode(request): def add_np(doctest_namespace): doctest_namespace['np'] = numpy -@pytest.fixture(autouse=True) -def env_setup(monkeypatch): - monkeypatch.setenv('PYTHONHASHSEED', '0') - if HAVE_SCPDT: From 9f42dca359c51cb421e24dcd211f6678b1ff52b0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 29 Aug 2025 12:05:40 -0600 Subject: [PATCH 0382/1018] ENH: Add extended sorting APIs This PR adds new sorting APIs that can be used for extending our current sort types. The new C-APIs do not have the *which* argument of the legacy C-APIs, instead they have *flags* request flag API. In this transition, heapsort can no longer be called directly, introsort (quicksort) is called instead. The sort/argsort methods get new keyword arguments used to set the flags when *kind* is not given, so *flags* is invisible at the Python level. The new keywords are: - stable -- whether the sort should stable, default is False, - descending -- whether to sort in descending order, default is False, - nanfirst -- whether NaNs sort to the last or first, default is False. The main difference here is that the keywords select by functionality, whereas *kind* selects by algorithm. Note that descending and nanfirst are not yet implemented, so an error is raised if either is specified. The added C-API functions are: - (API)PyArray_SortEx, - (API)PyArray_ArgSortEx, Those are the two functions that need changes depending on how we want to implement adding new sort algorithms. They should be pretty easy to adapt to whatever we choose to do. The legacy C-API functions, PyArray_Sort and PyArray_ArgSort, have been modified to call through the new C-API functions. In order to do so the *kind* function has been encoded in the *flags* like so: - "quicksort" - default (0) - "heapsort" - default (0) - "mergesort" - stable (1) I note that the partitioning/selection functions have not been modified, we may want to do that a some point. --- doc/release/upcoming_changes/29642.c_api.rst | 14 ++ doc/release/upcoming_changes/29642.change.rst | 4 + doc/source/reference/c-api/array.rst | 82 +++++-- numpy/_core/include/numpy/ndarraytypes.h | 41 +++- .../src/multiarray/_multiarray_tests.c.src | 16 +- numpy/_core/src/multiarray/item_selection.c | 231 ++++++++++-------- numpy/_core/src/multiarray/methods.c | 98 +++++--- numpy/_core/tests/test_multiarray.py | 4 +- 8 files changed, 325 insertions(+), 165 deletions(-) create mode 100644 doc/release/upcoming_changes/29642.c_api.rst create mode 100644 doc/release/upcoming_changes/29642.change.rst diff --git a/doc/release/upcoming_changes/29642.c_api.rst b/doc/release/upcoming_changes/29642.c_api.rst new file mode 100644 index 000000000000..0cc6b1bf4bf4 --- /dev/null +++ b/doc/release/upcoming_changes/29642.c_api.rst @@ -0,0 +1,14 @@ +The NPY_SORTKIND enum has been enhanced with new variables +---------------------------------------------------------- +This is of interest if you are using ``PyArray_Sort`` or ``PyArray_ArgSort``. +We have changed the semantics of the old names in the NPY_SORTKIND enum and +added new ones. The changes are backward compatible, and no recompilation is +needed. The new names of interest are: + +* NPY_SORT_DEFAULT -- default sort (same value as NPY_QUICKSORT) +* NPY_SORT_STABLE -- the sort must be stable (same value as NPY_MERGESORT) +* NPY_SORT_DESCENDING -- the sort must be descending +* NPY_SORT_NANFIRST -- NaNs sort to the beginning + +The semantic change is that NPY_HEAPSORT is mapped to NPY_QUICKSORT when used. + diff --git a/doc/release/upcoming_changes/29642.change.rst b/doc/release/upcoming_changes/29642.change.rst new file mode 100644 index 000000000000..6a7f656740fd --- /dev/null +++ b/doc/release/upcoming_changes/29642.change.rst @@ -0,0 +1,4 @@ +Sorting ``kind='heapsort'`` now maps to ``kind='quicksort'`` +------------------------------------------------------------ +It is unlikely that this change will be noticed, but if you do see a change in +execution time or unstable argsort order, this is likely the cause. diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index dc043b77f187..e09b61974128 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -2303,21 +2303,29 @@ Item selection and manipulation .. c:function:: PyObject* PyArray_Sort(PyArrayObject* self, int axis, NPY_SORTKIND kind) - Equivalent to :meth:`ndarray.sort` (*self*, *axis*, *kind*). - Return an array with the items of *self* sorted along *axis*. The array - is sorted using the algorithm denoted by *kind*, which is an integer/enum pointing - to the type of sorting algorithms used. - -.. c:function:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis) - - Equivalent to :meth:`ndarray.argsort` (*self*, *axis*). - Return an array of indices such that selection of these indices - along the given ``axis`` would return a sorted version of *self*. If *self* ->descr - is a data-type with fields defined, then self->descr->names is used - to determine the sort order. A comparison where the first field is equal - will use the second field and so on. To alter the sort order of a - structured array, create a new data-type with a different order of names - and construct a view of the array with that new data-type. + Equivalent to :meth:`ndarray.sort` (*self*, *axis*, + *kind*). Return an array with the items of *self* sorted along *axis*. The + array is sorted using an algorithm whose properties are specified by + *kind*, an integer/enum specifying the reguirements of the sorting + algorithm used. If *self* ->descr is a data-type with fields defined, then + self->descr->names is used to determine the sort order. A comparison where + the first field is equal will use the second field and so on. To alter the + sort order of a structured array, create a new data-type with a different + order of names and construct a view of the array with that new data-type. + + +.. c:function:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis, NPY_SORTKIND kind) + + Equivalent to :meth:`ndarray.argsort` (*self*, + *axis*, *kind*). Return an array of indices such that selection of these + indices along the given ``axis`` would return a sorted version of *self*. + The array is sorted using an algorithm whose properties are specified by + *kind*, an integer/enum specifying the reguirements of the sorting + algorithm used. If *self* ->descr is a data-type with fields defined, then + self->descr->names is used to determine the sort order. A comparison where + the first field is equal will use the second field and so on. To alter the + sort order of a structured array, create a new data-type with a different + order of names and construct a view of the array with that new data-type. .. c:function:: PyObject* PyArray_LexSort(PyObject* sort_keys, int axis) @@ -4321,7 +4329,11 @@ Enumerated Types .. c:enum:: NPY_SORTKIND A special variable-type which can take on different values to indicate - the sorting algorithm being used. + the sorting algorithm being used. These algorithm types have not been + treated strictly for some time, but rather treated as stable/not stable. + In NumPy 2.4 they are replaced by requirements (see below), but done in a + backwards compatible way. These values will continue to work, except that + that NPY_HEAPSORT will do the same thing as NPY_QUICKSORT. .. c:enumerator:: NPY_QUICKSORT @@ -4335,11 +4347,41 @@ Enumerated Types .. c:enumerator:: NPY_NSORTS - Defined to be the number of sorts. It is fixed at three by the need for - backwards compatibility, and consequently :c:data:`NPY_MERGESORT` and - :c:data:`NPY_STABLESORT` are aliased to each other and may refer to one - of several stable sorting algorithms depending on the data type. + Defined to be the number of sorts. It is fixed at three by the need for + backwards compatibility, and consequently :c:data:`NPY_MERGESORT` and + :c:data:`NPY_STABLESORT` are aliased to each other and may refer to one + of several stable sorting algorithms depending on the data type. + + In NumPy 2.4 the algorithm names are replaced by requirements. You can still use + the old values, a recompile is not needed, but they are reinterpreted such that + + * NPY_QUICKSORT and NPY_HEAPSORT -> NPY_SORT_DEFAULT + * NPY_MERGESORT and NPY_STABLE -> NPY_SORT_STABLE + + .. c:enumerator:: NPY_SORT_DEFAULT + + The default sort for the type. For the NumPy builtin types it may be + stable or not, but will be ascending and sort NaN types to the end. It + is usually chosen for speed and/or low memory. + + .. c:enumerator:: NPY_SORT_STABLE + + (Requirement) Specifies that the sort must be stable. + + .. c:enumerator:: NPY_SORT_DESCENDING + + (Requirement) Specifies that the sort must be in descending order. + This functionality is not yet implemented for any of the NumPy types + and cannot yet be set from the Python interface. + + .. c:enumerator:: NPY_SORT_NANFIRST + (Requirement) Specifies that the sort must sort NaNs to + the beginning, e.g. NaNs compare less than any other value for the + type. This is only relevant if the type has NaN equivalents. + This functionality is not yet implemented for any of the NumPy types + and cannot yet be set from the Python interface. It may change in the + future. .. c:enum:: NPY_SCALARKIND diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index baa42406ac88..3df9a4dbfcf9 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -162,18 +162,39 @@ enum NPY_TYPECHAR { }; /* - * Changing this may break Numpy API compatibility - * due to changing offsets in PyArray_ArrFuncs, so be - * careful. Here we have reused the mergesort slot for - * any kind of stable sort, the actual implementation will - * depend on the data type. + * Changing this may break Numpy API compatibility due to changing offsets in + * PyArray_ArrFuncs, so be careful. Here we have reused the mergesort slot for + * any kind of stable sort, the actual implementation will depend on the data + * type. + * + * Updated in NumPy 2.4 + * + * Updated with new names denoting requirements rather than the algorithm. All + * the previous values are reused in a way that should be downstream + * compatible, but the actual algorithms used may be different than before. The + * new approach should be more flexible and easier to update. The idea is that + * NPY_SORT_STABLE | NPY_SORT_DESCENDING | NPY_SORT_NANFIRST should provide an + * index. + * + * Names with a leading underscore are private, and should only be used + * internally by NumPy. + * + * NPY_NSORTS remains the same for backwards compatibility, it should not be + * changed. */ + typedef enum { - _NPY_SORT_UNDEFINED=-1, - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2, - NPY_STABLESORT=2, + _NPY_SORT_UNDEFINED = -1, + NPY_QUICKSORT = 0, + NPY_HEAPSORT = 1, + NPY_MERGESORT = 2, + NPY_STABLESORT = 2, + // new style names + _NPY_SORT_HEAPSORT = 1, + NPY_SORT_DEFAULT = 0, + NPY_SORT_STABLE = 2, + NPY_SORT_DESCENDING = 4, + NPY_SORT_NANFIRST = 8, } NPY_SORTKIND; #define NPY_NSORTS (NPY_STABLESORT + 1) diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 068fabc7fee8..4b58b2789e65 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -2089,10 +2089,18 @@ run_sortkind_converter(PyObject* NPY_UNUSED(self), PyObject *args) return NULL; } switch (kind) { - case _NPY_SORT_UNDEFINED: return PyUnicode_FromString("_NPY_SORT_UNDEFINED"); - case NPY_QUICKSORT: return PyUnicode_FromString("NPY_QUICKSORT"); - case NPY_HEAPSORT: return PyUnicode_FromString("NPY_HEAPSORT"); - case NPY_STABLESORT: return PyUnicode_FromString("NPY_STABLESORT"); + case _NPY_SORT_UNDEFINED: + return PyUnicode_FromString("_NPY_SORT_UNDEFINED"); + case NPY_QUICKSORT: + return PyUnicode_FromString("NPY_QUICKSORT"); + case NPY_HEAPSORT: + return PyUnicode_FromString("NPY_HEAPSORT"); + case NPY_STABLESORT: + return PyUnicode_FromString("NPY_STABLESORT"); + default: + // the other possible values in NPY_SORTKIND can only + // be set with keywords. + break; } return PyLong_FromLong(kind); } diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 5c036b704774..9ec70acde6dd 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -1549,56 +1549,6 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } -/*NUMPY_API - * Sort an array in-place - */ -NPY_NO_EXPORT int -PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - PyArray_SortFunc *sort = NULL; - int n = PyArray_NDIM(op); - - if (check_and_adjust_axis(&axis, n) < 0) { - return -1; - } - - if (PyArray_FailUnlessWriteable(op, "sort array") < 0) { - return -1; - } - - if (which < 0 || which >= NPY_NSORTS) { - PyErr_SetString(PyExc_ValueError, "not a valid sort kind"); - return -1; - } - - sort = PyDataType_GetArrFuncs(PyArray_DESCR(op))->sort[which]; - - if (sort == NULL) { - if (PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { - switch (which) { - default: - case NPY_QUICKSORT: - sort = npy_quicksort; - break; - case NPY_HEAPSORT: - sort = npy_heapsort; - break; - case NPY_STABLESORT: - sort = npy_timsort; - break; - } - } - else { - PyErr_SetString(PyExc_TypeError, - "type does not have compare function"); - return -1; - } - } - - return _new_sortlike(op, axis, sort, NULL, NULL, 0); -} - - /* * make kth array positive, ravel and sort it */ @@ -1712,52 +1662,6 @@ PyArray_Partition(PyArrayObject *op, PyArrayObject * ktharray, int axis, } -/*NUMPY_API - * ArgSort an array - */ -NPY_NO_EXPORT PyObject * -PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - PyArrayObject *op2; - PyArray_ArgSortFunc *argsort = NULL; - PyObject *ret; - - argsort = PyDataType_GetArrFuncs(PyArray_DESCR(op))->argsort[which]; - - if (argsort == NULL) { - if (PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { - switch (which) { - default: - case NPY_QUICKSORT: - argsort = npy_aquicksort; - break; - case NPY_HEAPSORT: - argsort = npy_aheapsort; - break; - case NPY_STABLESORT: - argsort = npy_atimsort; - break; - } - } - else { - PyErr_SetString(PyExc_TypeError, - "type does not have compare function"); - return NULL; - } - } - - op2 = (PyArrayObject *)PyArray_CheckAxis(op, &axis, 0); - if (op2 == NULL) { - return NULL; - } - - ret = _new_argsortlike(op2, axis, argsort, NULL, NULL, 0); - - Py_DECREF(op2); - return ret; -} - - /*NUMPY_API * ArgPartition an array */ @@ -3154,3 +3058,138 @@ PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index, return PyArray_Pack(PyArray_DESCR(self), data, obj); } + + +/* Table of generic sort functions for use in PyArray_SortEx*/ +static PyArray_SortFunc* const generic_sort_table[] = {npy_quicksort, + npy_heapsort, + npy_timsort}; + +/*NUMPY_API + * Sort an array in-place with extended parameters + */ +NPY_NO_EXPORT int +PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) +{ + PyArray_SortFunc **sort_table = NULL; + PyArray_SortFunc *sort = NULL; + + if (check_and_adjust_axis(&axis, PyArray_NDIM(op)) < 0) { + return -1; + } + + if (PyArray_FailUnlessWriteable(op, "sort array") < 0) { + return -1; + } + + // Zero the NPY_HEAPSORT bit, maps NPY_HEAPSORT to NPY_QUICKSORT + flags &= ~_NPY_SORT_HEAPSORT; + + sort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->sort; + switch (flags) { + case NPY_SORT_DEFAULT: + sort = sort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + sort = sort_table[NPY_STABLESORT]; + break; + default: + break; + } + + // Look for appropriate generic function if no type specific version + if (sort == NULL) { + if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { + PyErr_SetString(PyExc_TypeError, + "type does not have compare function"); + return -1; + } + switch (flags) { + case NPY_SORT_DEFAULT: + sort = generic_sort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + sort = generic_sort_table[NPY_STABLESORT]; + break; + default: + break; + } + } + + if (sort == NULL) { + PyErr_SetString(PyExc_TypeError, + "no current sort function meets the requirements"); + return -1; + } + return _new_sortlike(op, axis, sort, NULL, NULL, 0); +} + +/* Table of generic argsort function for use by PyArray_ArgSortEx */ +static PyArray_ArgSortFunc* const generic_argsort_table[] = {npy_aquicksort, + npy_aheapsort, + npy_atimsort}; + +/*NUMPY_API + * ArgSort an array with extended parameters + */ +NPY_NO_EXPORT PyObject * +PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) +{ + PyArrayObject *op2; + PyObject *ret; + PyArray_ArgSortFunc **argsort_table = NULL; + PyArray_ArgSortFunc *argsort = NULL; + + // Zero the NPY_HEAPSORT bit, maps NPY_HEAPSORT to NPY_QUICKSORT + flags &= ~_NPY_SORT_HEAPSORT; + + // Look for type specific functions + argsort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->argsort; + switch (flags) { + case NPY_SORT_DEFAULT: + argsort = argsort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + argsort = argsort_table[NPY_STABLESORT]; + break; + default: + break; + } + + // Look for generic function if no type specific version + if (argsort == NULL) { + if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { + PyErr_SetString(PyExc_TypeError, + "type does not have compare function"); + return NULL; + } + switch (flags) { + case NPY_SORT_DEFAULT: + argsort = generic_argsort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + argsort = generic_argsort_table[NPY_STABLESORT]; + break; + default: + break; + } + } + + if (argsort == NULL) { + PyErr_SetString(PyExc_TypeError, + "no current argsort function meets the requirements"); + return NULL; + } + + op2 = (PyArrayObject *)PyArray_CheckAxis(op, &axis, 0); + if (op2 == NULL) { + return NULL; + } + + ret = _new_argsortlike(op2, axis, argsort, NULL, NULL, 0); + + Py_DECREF(op2); + return ret; +} + + diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 7a9f5d83a57c..84f749acce3f 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1154,7 +1154,6 @@ array_copy_keeporder(PyArrayObject *self, PyObject *args) return PyArray_NewCopy(self, NPY_KEEPORDER); } -#include static PyObject * array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) { @@ -1250,11 +1249,13 @@ array_sort(PyArrayObject *self, { int axis = -1; int val; - NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; PyObject *order = NULL; PyArray_Descr *saved = NULL; PyArray_Descr *newd; + NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; int stable = -1; + int descending = -1; + int nanfirst = -1; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("sort", args, len_args, kwnames, @@ -1262,18 +1263,42 @@ array_sort(PyArrayObject *self, "|kind", &PyArray_SortkindConverter, &sortkind, "|order", NULL, &order, "$stable", &PyArray_OptionalBoolConverter, &stable, +// "$descending", &PyArray_OptionalBoolConverter, &descending, +// "$nanfirst", &PyArray_OptionalBoolConverter, &nanfirst, NULL, NULL, NULL) < 0) { return NULL; } - if (order == Py_None) { - order = NULL; + + if (sortkind == _NPY_SORT_UNDEFINED) { + // keywords only if sortkind not passed + sortkind = 0; + sortkind |= (stable > 0)? NPY_SORT_STABLE: 0; + sortkind |= (descending > 0)? NPY_SORT_DESCENDING: 0; + sortkind |= (nanfirst > 0)? NPY_SORT_NANFIRST: 0; } + else { + // Check that no keywords are used + int keywords_used = 0; + keywords_used |= (stable != -1); + keywords_used |= (descending != -1); + keywords_used |= (nanfirst != -1); + if (keywords_used) { + PyErr_SetString(PyExc_ValueError, + "`kind` and keyword parameters can't be provided at " + "the same time. Use only one of them."); + return NULL; + } + } + + order = (order != Py_None)? order: NULL; + // Reorder field names if required. if (order != NULL) { PyObject *new_name; PyObject *_numpy_internal; saved = PyArray_DESCR(self); if (!PyDataType_HASFIELDS(saved)) { - PyErr_SetString(PyExc_ValueError, "Cannot specify " \ + PyErr_SetString(PyExc_ValueError, + "Cannot specify " "order when the array has no fields."); return NULL; } @@ -1296,20 +1321,9 @@ array_sort(PyArrayObject *self, ((_PyArray_LegacyDescr *)newd)->names = new_name; ((PyArrayObject_fields *)self)->descr = newd; } - if (sortkind != _NPY_SORT_UNDEFINED && stable != -1) { - PyErr_SetString(PyExc_ValueError, - "`kind` and `stable` parameters can't be provided at " - "the same time. Use only one of them."); - return NULL; - } - else if ((sortkind == _NPY_SORT_UNDEFINED && stable == -1) || (stable == 0)) { - sortkind = NPY_QUICKSORT; - } - else if (stable == 1) { - sortkind = NPY_STABLESORT; - } val = PyArray_Sort(self, axis, sortkind); + if (order != NULL) { Py_XDECREF(PyArray_DESCR(self)); ((PyArrayObject_fields *)self)->descr = saved; @@ -1320,6 +1334,7 @@ array_sort(PyArrayObject *self, Py_RETURN_NONE; } + static PyObject * array_partition(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) @@ -1393,15 +1408,20 @@ array_partition(PyArrayObject *self, Py_RETURN_NONE; } + static PyObject * array_argsort(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { int axis = -1; + PyObject *res; + PyObject *order = NULL; + PyArray_Descr *saved = NULL; + PyArray_Descr *newd; NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; - PyObject *order = NULL, *res; - PyArray_Descr *newd, *saved=NULL; int stable = -1; + int descending = -1; + int nanfirst = -1; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argsort", args, len_args, kwnames, @@ -1409,12 +1429,35 @@ array_argsort(PyArrayObject *self, "|kind", &PyArray_SortkindConverter, &sortkind, "|order", NULL, &order, "$stable", &PyArray_OptionalBoolConverter, &stable, +// "$descending", &PyArray_OptionalBoolConverter, &descending, +// "$nanfirst", &PyArray_OptionalBoolConverter, &nanfirst, NULL, NULL, NULL) < 0) { return NULL; } - if (order == Py_None) { - order = NULL; + + if (sortkind == _NPY_SORT_UNDEFINED) { + // keywords only if sortkind not passed + sortkind = 0; + sortkind |= (stable > 0)? NPY_SORT_STABLE: 0; + sortkind |= (descending > 0)? NPY_SORT_DESCENDING: 0; + sortkind |= (nanfirst > 0)? NPY_SORT_NANFIRST: 0; + } + else { + // Check that no keywords are used + int keywords_used = 0; + keywords_used |= (stable != -1); + keywords_used |= (descending != -1); + keywords_used |= (nanfirst != -1); + if (keywords_used) { + PyErr_SetString(PyExc_ValueError, + "`kind` and keyword parameters can't be provided at " + "the same time. Use only one of them."); + return NULL; + } } + + // Reorder field names if required. + order = (order != Py_None)? order: NULL; if (order != NULL) { PyObject *new_name; PyObject *_numpy_internal; @@ -1443,20 +1486,9 @@ array_argsort(PyArrayObject *self, ((_PyArray_LegacyDescr *)newd)->names = new_name; ((PyArrayObject_fields *)self)->descr = newd; } - if (sortkind != _NPY_SORT_UNDEFINED && stable != -1) { - PyErr_SetString(PyExc_ValueError, - "`kind` and `stable` parameters can't be provided at " - "the same time. Use only one of them."); - return NULL; - } - else if ((sortkind == _NPY_SORT_UNDEFINED && stable == -1) || (stable == 0)) { - sortkind = NPY_QUICKSORT; - } - else if (stable == 1) { - sortkind = NPY_STABLESORT; - } res = PyArray_ArgSort(self, axis, sortkind); + if (order != NULL) { Py_XDECREF(PyArray_DESCR(self)); ((PyArrayObject_fields *)self)->descr = saved; diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index d5aac78c4a4d..8e50e71c3741 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -2211,7 +2211,7 @@ def test_sort(self): with assert_raises_regex( ValueError, - "kind` and `stable` parameters can't be provided at the same time" + "`kind` and keyword parameters can't be provided at the same time" ): np.sort(a, kind="stable", stable=True) @@ -2654,7 +2654,7 @@ def test_argsort(self): with assert_raises_regex( ValueError, - "kind` and `stable` parameters can't be provided at the same time" + "`kind` and keyword parameters can't be provided at the same time" ): np.argsort(a, kind="stable", stable=True) From 22be8c5db4d5dec8f3ab336d729695ebcee39601 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 2 Sep 2025 16:16:39 -0600 Subject: [PATCH 0383/1018] DOC: Documentation improvements from review. --- doc/release/upcoming_changes/29642.c_api.rst | 2 +- doc/release/upcoming_changes/29642.change.rst | 5 +- doc/source/reference/c-api/array.rst | 47 +++++++++++-------- numpy/_core/include/numpy/ndarraytypes.h | 11 ++--- 4 files changed, 37 insertions(+), 28 deletions(-) diff --git a/doc/release/upcoming_changes/29642.c_api.rst b/doc/release/upcoming_changes/29642.c_api.rst index 0cc6b1bf4bf4..0a28e32e3530 100644 --- a/doc/release/upcoming_changes/29642.c_api.rst +++ b/doc/release/upcoming_changes/29642.c_api.rst @@ -11,4 +11,4 @@ needed. The new names of interest are: * NPY_SORT_NANFIRST -- NaNs sort to the beginning The semantic change is that NPY_HEAPSORT is mapped to NPY_QUICKSORT when used. - +Note that NPY_SORT_DESCENDING and NPY_SORT_NANFIRST have yet to be implemented. diff --git a/doc/release/upcoming_changes/29642.change.rst b/doc/release/upcoming_changes/29642.change.rst index 6a7f656740fd..4a1706e00bab 100644 --- a/doc/release/upcoming_changes/29642.change.rst +++ b/doc/release/upcoming_changes/29642.change.rst @@ -1,4 +1,7 @@ Sorting ``kind='heapsort'`` now maps to ``kind='quicksort'`` ------------------------------------------------------------ It is unlikely that this change will be noticed, but if you do see a change in -execution time or unstable argsort order, this is likely the cause. +execution time or unstable argsort order, that is likely the cause. Please let +us know if there is a performance regression. Congratulate us if it is +improved :) + diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index e09b61974128..b5c1a08e2abf 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -2303,29 +2303,36 @@ Item selection and manipulation .. c:function:: PyObject* PyArray_Sort(PyArrayObject* self, int axis, NPY_SORTKIND kind) - Equivalent to :meth:`ndarray.sort` (*self*, *axis*, - *kind*). Return an array with the items of *self* sorted along *axis*. The - array is sorted using an algorithm whose properties are specified by - *kind*, an integer/enum specifying the reguirements of the sorting - algorithm used. If *self* ->descr is a data-type with fields defined, then - self->descr->names is used to determine the sort order. A comparison where - the first field is equal will use the second field and so on. To alter the - sort order of a structured array, create a new data-type with a different - order of names and construct a view of the array with that new data-type. - + Return an array with the items of ``self`` sorted along ``axis``. The array + is sorted using an algorithm whose properties are specified by the value of + ``kind``, an integer/enum specifying the reguirements of the sorting + algorithm used. If ``self* ->descr`` is a data-type with fields defined, + then ``self->descr->names`` is used to determine the sort order. A comparison + where the first field is equal will use the second field and so on. To + alter the sort order of a structured array, create a new data-type with a + different order of names and construct a view of the array with that new + data-type. + + This is the C level function called by the ndarray method + :meth:`ndarray.sort`, though with a different meaning + of ``kind`` -- see ``NPY_SORTKIND`` below. .. c:function:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis, NPY_SORTKIND kind) - Equivalent to :meth:`ndarray.argsort` (*self*, - *axis*, *kind*). Return an array of indices such that selection of these - indices along the given ``axis`` would return a sorted version of *self*. - The array is sorted using an algorithm whose properties are specified by - *kind*, an integer/enum specifying the reguirements of the sorting - algorithm used. If *self* ->descr is a data-type with fields defined, then - self->descr->names is used to determine the sort order. A comparison where - the first field is equal will use the second field and so on. To alter the - sort order of a structured array, create a new data-type with a different - order of names and construct a view of the array with that new data-type. + Return an array of indices such that selection of these indices along the + given ``axis`` would return a sorted version of ``self``. The array is + sorted using an algorithm whose properties are specified by ``kind``, an + integer/enum specifying the reguirements of the sorting algorithm used. If + ``self->descr`` is a data-type with fields defined, then + ``self->descr->names`` is used to determine the sort order. A comparison + where the first field is equal will use the second field and so on. To + alter the sort order of a structured array, create a new data-type with a + different order of names and construct a view of the array with that new + data-type. + + This is the C level function called by the ndarray method + :meth:`ndarray.argsort`, though with a different + meaning of ``kind`` -- see ``NPY_SORTKIND`` below. .. c:function:: PyObject* PyArray_LexSort(PyObject* sort_keys, int axis) diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 3df9a4dbfcf9..01fe231927f4 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -169,12 +169,11 @@ enum NPY_TYPECHAR { * * Updated in NumPy 2.4 * - * Updated with new names denoting requirements rather than the algorithm. All - * the previous values are reused in a way that should be downstream - * compatible, but the actual algorithms used may be different than before. The - * new approach should be more flexible and easier to update. The idea is that - * NPY_SORT_STABLE | NPY_SORT_DESCENDING | NPY_SORT_NANFIRST should provide an - * index. + * Updated with new names denoting requirements rather than specifying a + * particular algorithm. All the previous values are reused in a way that + * should be downstream compatible, but the actual algorithms used may be + * different than before. The new approach should be more flexible and easier + * to update. * * Names with a leading underscore are private, and should only be used * internally by NumPy. From a1fa902f818af4bdd0cdbd4fce85c072cc210760 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Wed, 3 Sep 2025 09:34:33 +0800 Subject: [PATCH 0384/1018] ENH, SIMD: Optimize the logical implementation based on Highway Wrapper --- .../src/umath/loops_logical.dispatch.cpp | 231 +++++++++--------- 1 file changed, 118 insertions(+), 113 deletions(-) diff --git a/numpy/_core/src/umath/loops_logical.dispatch.cpp b/numpy/_core/src/umath/loops_logical.dispatch.cpp index ec17f90154c8..a6f174218bd1 100644 --- a/numpy/_core/src/umath/loops_logical.dispatch.cpp +++ b/numpy/_core/src/umath/loops_logical.dispatch.cpp @@ -4,17 +4,16 @@ #include "lowlevel_strided_loops.h" #include "fast_loop_macros.h" #include - +#include "simd/simd.hpp" #include -namespace hn = hwy::HWY_NAMESPACE; struct logical_and_t {}; struct logical_or_t {}; struct absolute_t {}; struct logical_not_t {}; -const hn::ScalableTag u8; -using vec_u8 = hn::Vec; +namespace { +using namespace np::simd; /******************************************************************************* ** Defining the SIMD kernels @@ -24,86 +23,84 @@ using vec_u8 = hn::Vec; * consistent, should not be required if bool is used correctly everywhere but * you never know */ - -HWY_INLINE HWY_ATTR vec_u8 byte_to_true(vec_u8 v) +#if NPY_HWY +HWY_INLINE HWY_ATTR Vec byte_to_true(Vec v) { - return hn::IfThenZeroElse(hn::Eq(v, hn::Zero(u8)), hn::Set(u8, 1)); + return hn::IfThenZeroElse(hn::Eq(v, Zero()), Set(uint8_t(1))); } + /* * convert mask vector (0xff/0x00) to boolean true. similar to byte_to_true(), * but we've already got a mask and can skip negation. */ -HWY_INLINE HWY_ATTR vec_u8 mask_to_true(vec_u8 v) +HWY_INLINE HWY_ATTR Vec mask_to_true(Vec v) { - const vec_u8 truemask = hn::Set(u8, 1 == 1); - return hn::And(truemask, v); + return hn::IfThenElseZero(hn::Ne(v, Zero()), Set(uint8_t(1))); } + /* * For logical_and, we have to be careful to handle non-bool inputs where * bits of each operand might not overlap. Example: a = 0x01, b = 0x80 * Both evaluate to boolean true, however, a & b is false. Return value * should be consistent with byte_to_true(). */ -HWY_INLINE HWY_ATTR vec_u8 simd_logical_and_u8(vec_u8 a, vec_u8 b) +HWY_INLINE HWY_ATTR Vec simd_logical_and_u8(Vec a, Vec b) { return hn::IfThenZeroElse( - hn::Eq(hn::Zero(u8), hn::Min(a, b)), - hn::Set(u8, 1) + hn::Eq(Zero(), hn::Min(a, b)), + Set(uint8_t(1)) ); } /* * We don't really need the following, but it simplifies the templating code * below since it is paired with simd_logical_and_u8() above. */ -HWY_INLINE HWY_ATTR vec_u8 simd_logical_or_u8(vec_u8 a, vec_u8 b) +HWY_INLINE HWY_ATTR Vec simd_logical_or_u8(Vec a, Vec b) { - vec_u8 r = hn::Or(a, b); + auto r = hn::Or(a, b); return byte_to_true(r); } -HWY_INLINE HWY_ATTR npy_bool simd_any_u8(vec_u8 v) +HWY_INLINE HWY_ATTR npy_bool simd_any_u8(Vec v) { - return hn::ReduceMax(u8, v) != 0; + return hn::ReduceMax(_Tag(), v) != 0; } -HWY_INLINE HWY_ATTR npy_bool simd_all_u8(vec_u8 v) +HWY_INLINE HWY_ATTR npy_bool simd_all_u8(Vec v) { - return hn::ReduceMin(u8, v) != 0; + return hn::ReduceMin(_Tag(), v) != 0; } +#endif template struct BinaryLogicalTraits; template<> struct BinaryLogicalTraits { - static constexpr bool is_and = false; - static constexpr auto scalar_op = std::logical_or{}; + static constexpr bool is_and = false; + static constexpr auto scalar_op = std::logical_or{}; static constexpr auto scalar_cmp = std::not_equal_to{}; +#if NPY_HWY static constexpr auto anyall = simd_any_u8; - HWY_INLINE HWY_ATTR vec_u8 simd_op(vec_u8 a, vec_u8 b) { - return simd_logical_or_u8(a, b); - } - - HWY_INLINE HWY_ATTR vec_u8 reduce(vec_u8 a, vec_u8 b) { + static HWY_INLINE HWY_ATTR Vec simd_op(Vec a, Vec b) { return simd_logical_or_u8(a, b); } +#endif }; template<> struct BinaryLogicalTraits { - static constexpr bool is_and = true; - static constexpr auto scalar_op = std::logical_and{}; + static constexpr bool is_and = true; + static constexpr auto scalar_op = std::logical_and{}; static constexpr auto scalar_cmp = std::equal_to{}; +#if NPY_HWY static constexpr auto anyall = simd_all_u8; - HWY_INLINE HWY_ATTR vec_u8 simd_op(vec_u8 a, vec_u8 b) { - return simd_logical_and_u8(a, b); - } - - HWY_INLINE HWY_ATTR vec_u8 reduce(vec_u8 a, vec_u8 b) { + static HWY_INLINE HWY_ATTR Vec simd_op(Vec a, Vec b) { return simd_logical_and_u8(a, b); } +#endif }; template @@ -111,52 +108,54 @@ struct UnaryLogicalTraits; template<> struct UnaryLogicalTraits { - static constexpr bool is_not = true; + static constexpr bool is_not = true; static constexpr auto scalar_op = std::equal_to{}; - HWY_INLINE HWY_ATTR vec_u8 simd_op(vec_u8 v) { - const vec_u8 zero = hn::Zero(u8); - return mask_to_true(hn::VecFromMask(u8, hn::Eq(v, zero))); +#if NPY_HWY + static HWY_INLINE HWY_ATTR Vec simd_op(Vec v) { + const auto zero = Zero(); + return mask_to_true(hn::VecFromMask(_Tag(), hn::Eq(v, zero))); } +#endif }; template<> struct UnaryLogicalTraits { - static constexpr bool is_not = false; + static constexpr bool is_not = false; static constexpr auto scalar_op = std::not_equal_to{}; - HWY_INLINE HWY_ATTR vec_u8 simd_op(vec_u8 v) { +#if NPY_HWY + static HWY_INLINE HWY_ATTR Vec simd_op(Vec v) { return byte_to_true(v); } +#endif }; - +#if NPY_HWY template HWY_ATTR SIMD_MSVC_NOINLINE static void simd_binary_logical_BOOL(npy_bool* op, npy_bool* ip1, npy_bool* ip2, npy_intp len) { using Traits = BinaryLogicalTraits; - Traits traits; constexpr int UNROLL = 16; - const int vstep = hn::Lanes(u8); + HWY_LANES_CONSTEXPR int vstep = Lanes(); const int wstep = vstep * UNROLL; // Unrolled vectors loop for (; len >= wstep; len -= wstep, ip1 += wstep, ip2 += wstep, op += wstep) { - for(int i = 0; i < UNROLL; i++) { - vec_u8 a = hn::LoadU(u8, ip1 + vstep * i); - vec_u8 b = hn::LoadU(u8, ip2 + vstep * i); - vec_u8 r = traits.simd_op(a, b); - hn::StoreU(r, u8, op + vstep * i); + auto a = LoadU(ip1 + vstep * i); + auto b = LoadU(ip2 + vstep * i); + auto r = Traits::simd_op(a, b); + StoreU(r, op + vstep * i); } } // Single vectors loop for (; len >= vstep; len -= vstep, ip1 += vstep, ip2 += vstep, op += vstep) { - vec_u8 a = hn::LoadU(u8, ip1); - vec_u8 b = hn::LoadU(u8, ip2); - vec_u8 r = traits.simd_op(a, b); - hn::StoreU(r, u8, op); + auto a = LoadU(ip1); + auto b = LoadU(ip2); + auto r = Traits::simd_op(a, b); + StoreU(r, op); } // Scalar loop to finish off @@ -169,9 +168,8 @@ template HWY_ATTR SIMD_MSVC_NOINLINE static void simd_reduce_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { using Traits = BinaryLogicalTraits; - Traits traits; constexpr int UNROLL = 8; - const int vstep = hn::Lanes(u8); + HWY_LANES_CONSTEXPR int vstep = Lanes(); const int wstep = vstep * UNROLL; // Unrolled vectors loop @@ -179,24 +177,24 @@ static void simd_reduce_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { #if defined(NPY_HAVE_SSE2) NPY_PREFETCH(reinterpret_cast(ip + wstep), 0, 3); #endif - vec_u8 v0 = hn::LoadU(u8, ip); - vec_u8 v1 = hn::LoadU(u8, ip + vstep); - vec_u8 v2 = hn::LoadU(u8, ip + vstep * 2); - vec_u8 v3 = hn::LoadU(u8, ip + vstep * 3); - vec_u8 v4 = hn::LoadU(u8, ip + vstep * 4); - vec_u8 v5 = hn::LoadU(u8, ip + vstep * 5); - vec_u8 v6 = hn::LoadU(u8, ip + vstep * 6); - vec_u8 v7 = hn::LoadU(u8, ip + vstep * 7); + auto v0 = LoadU(ip); + auto v1 = LoadU(ip + vstep); + auto v2 = LoadU(ip + vstep * 2); + auto v3 = LoadU(ip + vstep * 3); + auto v4 = LoadU(ip + vstep * 4); + auto v5 = LoadU(ip + vstep * 5); + auto v6 = LoadU(ip + vstep * 6); + auto v7 = LoadU(ip + vstep * 7); - vec_u8 m01 = traits.reduce(v0, v1); - vec_u8 m23 = traits.reduce(v2, v3); - vec_u8 m45 = traits.reduce(v4, v5); - vec_u8 m67 = traits.reduce(v6, v7); + auto m01 = Traits::simd_op(v0, v1); + auto m23 = Traits::simd_op(v2, v3); + auto m45 = Traits::simd_op(v4, v5); + auto m67 = Traits::simd_op(v6, v7); - vec_u8 m0123 = traits.reduce(m01, m23); - vec_u8 m4567 = traits.reduce(m45, m67); + auto m0123 = Traits::simd_op(m01, m23); + auto m4567 = Traits::simd_op(m45, m67); - vec_u8 mv = traits.reduce(m0123, m4567); + auto mv = Traits::simd_op(m0123, m4567); if(Traits::anyall(mv) == !Traits::is_and) { *op = !Traits::is_and; @@ -206,7 +204,7 @@ static void simd_reduce_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { // Single vectors loop for (; len >= vstep; len -= vstep, ip += vstep) { - vec_u8 v = hn::LoadU(u8, ip); + auto v = LoadU(ip); if(Traits::anyall(v) == !Traits::is_and) { *op = !Traits::is_and; return; @@ -226,25 +224,24 @@ template HWY_ATTR SIMD_MSVC_NOINLINE static void simd_unary_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { using Traits = UnaryLogicalTraits; - Traits traits; constexpr int UNROLL = 16; - const int vstep = hn::Lanes(u8); + HWY_LANES_CONSTEXPR int vstep = Lanes(); const int wstep = vstep * UNROLL; // Unrolled vectors loop for (; len >= wstep; len -= wstep, ip += wstep, op += wstep) { for(int i = 0; i < UNROLL; i++) { - vec_u8 v = hn::LoadU(u8, ip + vstep * i); - vec_u8 r = traits.simd_op(v); - hn::StoreU(r, u8, op + vstep * i); + auto v = LoadU(ip + vstep * i); + auto r = Traits::simd_op(v); + StoreU(r, op + vstep * i); } } // Single vectors loop for (; len >= vstep; len -= vstep, ip += vstep, op += vstep) { - vec_u8 v = hn::LoadU(u8, ip); - vec_u8 r = traits.simd_op(v); - hn::StoreU(r, u8, op); + auto v = LoadU(ip); + auto r = Traits::simd_op(v); + StoreU(r, op); } // Scalar loop to finish off @@ -253,6 +250,9 @@ static void simd_unary_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { } } +#endif //NPY_HWY +} // namespace anonymous + /******************************************************************************* ** Defining ufunc inner functions ******************************************************************************/ @@ -260,12 +260,9 @@ template static NPY_INLINE int run_binary_simd_logical_BOOL( char** args, npy_intp const* dimensions, npy_intp const* steps) { -#if NPY_SIMD - if (sizeof(npy_bool) == 1 && - IS_BLOCKABLE_BINARY(sizeof(npy_bool), NPY_SIMD_WIDTH)) { - simd_binary_logical_BOOL((npy_bool*)args[2], (npy_bool*)args[0], - (npy_bool*)args[1], dimensions[0] - ); +#if NPY_HWY + if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_BINARY(sizeof(npy_bool), kMaxLanes)) { + simd_binary_logical_BOOL((npy_bool*)args[2], (npy_bool*)args[0], (npy_bool*)args[1], dimensions[0]); return 1; } #endif @@ -276,12 +273,9 @@ template static NPY_INLINE int run_reduce_simd_logical_BOOL( char** args, npy_intp const* dimensions, npy_intp const* steps) { -#if NPY_SIMD - if (sizeof(npy_bool) == 1 && - IS_BLOCKABLE_REDUCE(sizeof(npy_bool), NPY_SIMD_WIDTH)) { - simd_reduce_logical_BOOL((npy_bool*)args[0], (npy_bool*)args[1], - dimensions[0] - ); +#if NPY_HWY + if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_REDUCE(sizeof(npy_bool), kMaxLanes)) { + simd_reduce_logical_BOOL((npy_bool*)args[0], (npy_bool*)args[1], dimensions[0]); return 1; } #endif @@ -292,9 +286,8 @@ template static NPY_INLINE int run_unary_simd_logical_BOOL( char** args, npy_intp const* dimensions, npy_intp const* steps) { -#if NPY_SIMD - if (sizeof(npy_bool) == 1 && - IS_BLOCKABLE_UNARY(sizeof(npy_bool), NPY_SIMD_WIDTH)) { +#if NPY_HWY + if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_UNARY(sizeof(npy_bool), kMaxLanes)) { simd_unary_logical_BOOL((npy_bool*)args[1], (npy_bool*)args[0], dimensions[0]); return 1; } @@ -304,24 +297,34 @@ static NPY_INLINE int run_unary_simd_logical_BOOL( template void BOOL_binary_func_wrapper(char** args, npy_intp const* dimensions, npy_intp const* steps) { + char *ip1 = args[0], *ip2 = args[1], *op1 = args[2]; + npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2]; + npy_intp n = dimensions[0]; using Traits = BinaryLogicalTraits; - + +#if NPY_HWY if (run_binary_simd_logical_BOOL(args, dimensions, steps)) { return; } - else { - BINARY_LOOP { - const npy_bool in1 = *(npy_bool*)ip1; - const npy_bool in2 = *(npy_bool*)ip2; - *((npy_bool*)op1) = Traits::scalar_op(in1, in2); - } +#endif + + for(npy_intp i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) { + const npy_bool in1 = *(npy_bool*)ip1; + const npy_bool in2 = *(npy_bool*)ip2; + *((npy_bool*)op1) = Traits::scalar_op(in1, in2); } } template void BOOL_binary_reduce_wrapper(char** args, npy_intp const* dimensions, npy_intp const* steps) { + char *iop1 = args[0]; + npy_bool io1 = *(npy_bool *)iop1; + char *ip2 = args[1]; + npy_intp is2 = steps[1]; + npy_intp n = dimensions[0]; + npy_intp i; using Traits = BinaryLogicalTraits; -#if NPY_SIMD +#if NPY_HWY if (run_reduce_simd_logical_BOOL(args, dimensions, steps)) { return; } @@ -343,7 +346,6 @@ void BOOL_binary_reduce_wrapper(char** args, npy_intp const* dimensions, npy_int * with glibc >= 2.12 and memchr can only check for equal 1 */ static const npy_bool zero[4096]={0}; /* zero by C standard */ - npy_uintp i, n = dimensions[0]; for (i = 0; !*op && i < n - (n % sizeof(zero)); i += sizeof(zero)) { *op = memcmp(&args[1][i], zero, sizeof(zero)) != 0; @@ -355,14 +357,14 @@ void BOOL_binary_reduce_wrapper(char** args, npy_intp const* dimensions, npy_int return; } #endif - else { - BINARY_REDUCE_LOOP(npy_bool) { - const npy_bool in2 = *(npy_bool*)ip2; - io1 = Traits::scalar_op(io1, in2); - if ((Traits::is_and && !io1) || (!Traits::is_and && io1)) break; - } - *((npy_bool*)iop1) = io1; + + for(i = 0; i < n; i++, ip2 += is2) { + const npy_bool in2 = *(npy_bool*)ip2; + io1 = Traits::scalar_op(io1, in2); + if ((Traits::is_and && !io1) || (!Traits::is_and && io1)) + break; } + *((npy_bool*)iop1) = io1; } template @@ -390,15 +392,18 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_logical_or)( template void BOOL_func_wrapper(char** args, npy_intp const* dimensions, npy_intp const* steps) { + char *ip1 = args[0], *op1 = args[1]; + npy_intp is1 = steps[0], os1 = steps[1]; + npy_intp n = dimensions[0]; using Traits = UnaryLogicalTraits; + if (run_unary_simd_logical_BOOL(args, dimensions, steps)) { return; } - else { - UNARY_LOOP { - npy_bool in1 = *(npy_bool*)ip1; - *((npy_bool*)op1) = Traits::scalar_op(in1, 0); - } + + for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) { + npy_bool in1 = *(npy_bool*)ip1; + *((npy_bool*)op1) = Traits::scalar_op(in1, 0); } } From df835f4a25e2ef6dba689198c62f3f4747e2f016 Mon Sep 17 00:00:00 2001 From: Tobias Markus Date: Wed, 3 Sep 2025 15:34:02 +0200 Subject: [PATCH 0385/1018] BLD: Add missing include (#29662) Fixes homebrew builds, see https://github.com/orgs/Homebrew/discussions/6386#discussioncomment-14293523 --- numpy/_core/src/multiarray/unique.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index 636f1ef0137c..d31fed6f6908 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include From 1b00896c55ad5f2ce9dcab3f6c8af39691c87bdf Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Wed, 3 Sep 2025 15:21:31 -0400 Subject: [PATCH 0386/1018] TST: Replace xunit setup with methods (#29657) * TST: Replace xunit setup with methods * STY: Change var declarations --- numpy/lib/tests/test_function_base.py | 43 +++--- numpy/random/tests/test_generator_mt19937.py | 99 +++++++------ .../test_generator_mt19937_regressions.py | 64 +++++---- numpy/random/tests/test_random.py | 111 +++++++------- numpy/random/tests/test_randomstate.py | 136 +++++++++--------- 5 files changed, 241 insertions(+), 212 deletions(-) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 37f8bbadc31a..ce293ec2f256 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -974,18 +974,20 @@ def test_append(self): class TestDelete: - def setup_method(self): - self.a = np.arange(5) - self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) + def _create_arrays(self): + a = np.arange(5) + nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) + return a, nd_a def _check_inverse_of_slicing(self, indices): - a_del = delete(self.a, indices) - nd_a_del = delete(self.nd_a, indices, axis=1) + a, nd_a = self._create_arrays() + a_del = delete(a, indices) + nd_a_del = delete(nd_a, indices, axis=1) msg = f'Delete failed for obj: {indices!r}' - assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a, + assert_array_equal(setxor1d(a_del, a[indices, ]), a, err_msg=msg) - xor = setxor1d(nd_a_del[0, :, 0], self.nd_a[0, indices, 0]) - assert_array_equal(xor, self.nd_a[0, :, 0], err_msg=msg) + xor = setxor1d(nd_a_del[0, :, 0], nd_a[0, indices, 0]) + assert_array_equal(xor, nd_a[0, :, 0], err_msg=msg) def test_slices(self): lims = [-6, -2, 0, 1, 2, 4, 5] @@ -997,11 +999,12 @@ def test_slices(self): self._check_inverse_of_slicing(s) def test_fancy(self): + a, _ = self._create_arrays() self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]])) with pytest.raises(IndexError): - delete(self.a, [100]) + delete(a, [100]) with pytest.raises(IndexError): - delete(self.a, [-100]) + delete(a, [-100]) self._check_inverse_of_slicing([0, -1, 2, 2]) @@ -1009,13 +1012,13 @@ def test_fancy(self): # not legal, indexing with these would change the dimension with pytest.raises(ValueError): - delete(self.a, True) + delete(a, True) with pytest.raises(ValueError): - delete(self.a, False) + delete(a, False) # not enough items with pytest.raises(ValueError): - delete(self.a, [False] * 4) + delete(a, [False] * 4) def test_single(self): self._check_inverse_of_slicing(0) @@ -1031,7 +1034,9 @@ def test_0d(self): def test_subclass(self): class SubClass(np.ndarray): pass - a = self.a.view(SubClass) + + a_orig, _ = self._create_arrays() + a = a_orig.view(SubClass) assert_(isinstance(delete(a, 0), SubClass)) assert_(isinstance(delete(a, []), SubClass)) assert_(isinstance(delete(a, [0, 1]), SubClass)) @@ -1056,12 +1061,13 @@ def test_index_floats(self): @pytest.mark.parametrize("indexer", [np.array([1]), [1]]) def test_single_item_array(self, indexer): - a_del_int = delete(self.a, 1) - a_del = delete(self.a, indexer) + a, nd_a = self._create_arrays() + a_del_int = delete(a, 1) + a_del = delete(a, indexer) assert_equal(a_del_int, a_del) - nd_a_del_int = delete(self.nd_a, 1, axis=1) - nd_a_del = delete(self.nd_a, np.array([1]), axis=1) + nd_a_del_int = delete(nd_a, 1, axis=1) + nd_a_del = delete(nd_a, np.array([1]), axis=1) assert_equal(nd_a_del_int, nd_a_del) def test_single_item_array_non_int(self): @@ -2406,6 +2412,7 @@ def test_float16_underflow(self): # resulting in nan assert_array_equal(sinc(x), np.asarray(1.0)) + class TestUnique: def test_simple(self): diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index a06212efce0d..6594f6008c8e 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -175,8 +175,7 @@ def test_multinomial_pvals_float32(self): class TestMultivariateHypergeometric: - def setup_method(self): - self.seed = 8675309 + seed = 8675309 def test_argument_validation(self): # Error cases... @@ -308,37 +307,40 @@ def test_repeatability3(self): class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.rg = Generator(MT19937(self.seed)) - self.bit_generator = self.rg.bit_generator - self.state = self.bit_generator.state - self.legacy_state = (self.state['bit_generator'], - self.state['state']['key'], - self.state['state']['pos']) + def _create_rng(self): + seed = 1234567890 + rg = Generator(MT19937(seed)) + bit_generator = rg.bit_generator + state = bit_generator.state + legacy_state = (state['bit_generator'], + state['state']['key'], + state['state']['pos']) + return rg, bit_generator, state def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.rg.standard_normal(size=3) - self.bit_generator.state = self.state - new = self.rg.standard_normal(size=3) + rg, bit_generator, state = self._create_rng() + old = rg.standard_normal(size=3) + bit_generator.state = state + new = rg.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.rg.standard_normal() - state = self.bit_generator.state - old = self.rg.standard_normal(size=3) - self.bit_generator.state = state - new = self.rg.standard_normal(size=3) + rg, bit_generator, state = self._create_rng() + rg.standard_normal() + state = bit_generator.state + old = rg.standard_normal(size=3) + bit_generator.state = state + new = rg.standard_normal(size=3) assert_(np.all(old == new)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.rg.negative_binomial(0.5, 0.5) + rg, _, _ = self._create_rng() + rg.negative_binomial(0.5, 0.5) class TestIntegers: @@ -736,9 +738,7 @@ def test_integers_small_dtype_chisquared(self, sample_size, high, class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_integers(self): random = Generator(MT19937(self.seed)) @@ -1899,8 +1899,7 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 + seed = 123456789 def test_uniform(self): random = Generator(MT19937(self.seed)) @@ -2512,8 +2511,7 @@ def test_empty_outputs(self): @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -2558,13 +2556,11 @@ def gen_random(state, out): # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (random.exponential, random.standard_gamma, random.chisquare, random.standard_t, random.pareto, random.weibull, @@ -2579,11 +2575,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (random.uniform, random.normal, random.beta, random.gamma, random.f, random.noncentral_chisquare, @@ -2599,18 +2596,19 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_integers(self, endpoint): + _, _, _, tgtShape = self._create_arrays() itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] func = random.integers @@ -2619,27 +2617,28 @@ def test_integers(self, endpoint): for dt in itype: out = func(low, high, endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low[0], high, endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low, high[0], endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [random.noncentral_f, random.triangular, random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) @pytest.mark.parametrize("config", JUMP_TEST_DATA) diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index abfacb87dbc5..21093ef73eb6 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -6,30 +6,32 @@ class TestRegression: - - def setup_method(self): - self.mt19937 = Generator(MT19937(121263137472525314065)) + def _create_generator(self): + return Generator(MT19937(121263137472525314065)) def test_vonmises_range(self): # Make sure generated random variables are in [-pi, pi]. # Regression test for ticket #986. + mt19937 = self._create_generator() for mu in np.linspace(-7., 7., 5): - r = self.mt19937.vonmises(mu, 1, 50) + r = mt19937.vonmises(mu, 1, 50) assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) def test_hypergeometric_range(self): # Test for ticket #921 - assert_(np.all(self.mt19937.hypergeometric(3, 18, 11, size=10) < 4)) - assert_(np.all(self.mt19937.hypergeometric(18, 3, 11, size=10) > 0)) + mt19937 = self._create_generator() + assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0)) # Test for ticket #5623 args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems - assert_(self.mt19937.hypergeometric(*args) > 0) + assert_(mt19937.hypergeometric(*args) > 0) def test_logseries_convergence(self): # Test for ticket #923 + mt19937 = self._create_generator() N = 1000 - rvsn = self.mt19937.logseries(0.8, size=N) + rvsn = mt19937.logseries(0.8, size=N) # these two frequency counts should be close to theoretical # numbers with this large sample # theoretical large N result is 0.49706795 @@ -66,34 +68,39 @@ def test_multivariate_normal_size_types(self): # Test for multivariate_normal issue with 'size' argument. # Check that the multivariate_normal size argument can be a # numpy integer. - self.mt19937.multivariate_normal([0], [[0]], size=1) - self.mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) - self.mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) + mt19937 = self._create_generator() + mt19937.multivariate_normal([0], [[0]], size=1) + mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) + mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) def test_beta_small_parameters(self): # Test that beta with small a and b parameters does not produce # NaNs due to roundoff errors causing 0 / 0, gh-5851 - x = self.mt19937.beta(0.0001, 0.0001, size=100) + mt19937 = self._create_generator() + x = mt19937.beta(0.0001, 0.0001, size=100) assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta') def test_beta_very_small_parameters(self): # gh-24203: beta would hang with very small parameters. - self.mt19937.beta(1e-49, 1e-40) + mt19937 = self._create_generator() + mt19937.beta(1e-49, 1e-40) def test_beta_ridiculously_small_parameters(self): # gh-24266: beta would generate nan when the parameters # were subnormal or a small multiple of the smallest normal. + mt19937 = self._create_generator() tiny = np.finfo(1.0).tiny - x = self.mt19937.beta(tiny / 32, tiny / 40, size=50) + x = mt19937.beta(tiny / 32, tiny / 40, size=50) assert not np.any(np.isnan(x)) def test_beta_expected_zero_frequency(self): # gh-24475: For small a and b (e.g. a=0.0025, b=0.0025), beta # would generate too many zeros. + mt19937 = self._create_generator() a = 0.0025 b = 0.0025 n = 1000000 - x = self.mt19937.beta(a, b, size=n) + x = mt19937.beta(a, b, size=n) nzeros = np.count_nonzero(x == 0) # beta CDF at x = np.finfo(np.double).smallest_subnormal/2 # is p = 0.0776169083131899, e.g, @@ -114,24 +121,26 @@ def test_choice_sum_of_probs_tolerance(self): # The sum of probs should be 1.0 with some tolerance. # For low precision dtypes the tolerance was too tight. # See numpy github issue 6123. + mt19937 = self._create_generator() a = [1, 2, 3] counts = [4, 4, 2] for dt in np.float16, np.float32, np.float64: probs = np.array(counts, dtype=dt) / sum(counts) - c = self.mt19937.choice(a, p=probs) + c = mt19937.choice(a, p=probs) assert_(c in a) with pytest.raises(ValueError): - self.mt19937.choice(a, p=probs * 0.9) + mt19937.choice(a, p=probs * 0.9) def test_shuffle_of_array_of_different_length_strings(self): # Test that permuting an array of different length strings # will not cause a segfault on garbage collection # Tests gh-7710 + mt19937 = self._create_generator() a = np.array(['a', 'a' * 1000]) for _ in range(100): - self.mt19937.shuffle(a) + mt19937.shuffle(a) # Force Garbage Collection - should not segfault. import gc @@ -141,10 +150,11 @@ def test_shuffle_of_array_of_objects(self): # Test that permuting an array of objects will not cause # a segfault on garbage collection. # See gh-7719 + mt19937 = self._create_generator() a = np.array([np.arange(1), np.arange(4)], dtype=object) for _ in range(1000): - self.mt19937.shuffle(a) + mt19937.shuffle(a) # Force Garbage Collection - should not segfault. import gc @@ -174,10 +184,11 @@ def __array__(self, dtype=None, copy=None): assert_array_equal(m.__array__(), np.arange(5)) def test_gamma_0(self): - assert self.mt19937.standard_gamma(0.0) == 0.0 - assert_array_equal(self.mt19937.standard_gamma([0.0]), 0.0) + mt19937 = self._create_generator() + assert mt19937.standard_gamma(0.0) == 0.0 + assert_array_equal(mt19937.standard_gamma([0.0]), 0.0) - actual = self.mt19937.standard_gamma([0.0], dtype='float') + actual = mt19937.standard_gamma([0.0], dtype='float') expected = np.array([0.], dtype=np.float32) assert_array_equal(actual, expected) @@ -185,21 +196,24 @@ def test_geometric_tiny_prob(self): # Regression test for gh-17007. # When p = 1e-30, the probability that a sample will exceed 2**63-1 # is 0.9999999999907766, so we expect the result to be all 2**63-1. - assert_array_equal(self.mt19937.geometric(p=1e-30, size=3), + mt19937 = self._create_generator() + assert_array_equal(mt19937.geometric(p=1e-30, size=3), np.iinfo(np.int64).max) def test_zipf_large_parameter(self): # Regression test for part of gh-9829: a call such as rng.zipf(10000) # would hang. + mt19937 = self._create_generator() n = 8 - sample = self.mt19937.zipf(10000, size=n) + sample = mt19937.zipf(10000, size=n) assert_array_equal(sample, np.ones(n, dtype=np.int64)) def test_zipf_a_near_1(self): # Regression test for gh-9829: a call such as rng.zipf(1.0000000000001) # would hang. + mt19937 = self._create_generator() n = 100000 - sample = self.mt19937.zipf(1.0000000000001, size=n) + sample = mt19937.zipf(1.0000000000001, size=n) # Not much of a test, but let's do something more than verify that # it doesn't hang. Certainly for a monotonically decreasing # discrete distribution truncated to signed 64 bit integers, more diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 4eb455eb77be..b3e69b41956b 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -105,56 +105,62 @@ def test_multidimensional_pvals(self): class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.prng = random.RandomState(self.seed) - self.state = self.prng.get_state() + def _create_rng(self): + seed = 1234567890 + prng = random.RandomState(seed) + state = prng.get_state() + return prng, state def test_basic(self): - old = self.prng.tomaxint(16) - self.prng.set_state(self.state) - new = self.prng.tomaxint(16) + prng, state = self._create_rng() + old = prng.tomaxint(16) + prng.set_state(state) + new = prng.tomaxint(16) assert_(np.all(old == new)) def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.prng.standard_normal(size=3) - self.prng.set_state(self.state) - new = self.prng.standard_normal(size=3) + prng, state = self._create_rng() + old = prng.standard_normal(size=3) + prng.set_state(state) + new = prng.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.prng.standard_normal() - state = self.prng.get_state() - old = self.prng.standard_normal(size=3) - self.prng.set_state(state) - new = self.prng.standard_normal(size=3) + prng, state = self._create_rng() + prng.standard_normal() + state = prng.get_state() + old = prng.standard_normal(size=3) + prng.set_state(state) + new = prng.standard_normal(size=3) assert_(np.all(old == new)) def test_backwards_compatibility(self): # Make sure we can accept old state tuples that do not have the # cached Gaussian value. - old_state = self.state[:-2] - x1 = self.prng.standard_normal(size=16) - self.prng.set_state(old_state) - x2 = self.prng.standard_normal(size=16) - self.prng.set_state(self.state) - x3 = self.prng.standard_normal(size=16) + prng, state = self._create_rng() + old_state = state[:-2] + x1 = prng.standard_normal(size=16) + prng.set_state(old_state) + x2 = prng.standard_normal(size=16) + prng.set_state(state) + x3 = prng.standard_normal(size=16) assert_(np.all(x1 == x2)) assert_(np.all(x1 == x3)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.prng.negative_binomial(0.5, 0.5) + prng, _ = self._create_rng() + prng.negative_binomial(0.5, 0.5) def test_set_invalid_state(self): # gh-25402 + prng, _ = self._create_rng() with pytest.raises(IndexError): - self.prng.set_state(()) + prng.set_state(()) class TestRandint: @@ -299,9 +305,7 @@ def test_respect_dtype_singleton(self): class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_rand(self): np.random.seed(self.seed) @@ -1053,8 +1057,7 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 + seed = 123456789 def setSeed(self): np.random.seed(self.seed) @@ -1623,8 +1626,7 @@ def test_logseries(self): @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -1666,13 +1668,11 @@ def gen_random(state, out): # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (np.random.exponential, np.random.standard_gamma, np.random.chisquare, np.random.standard_t, np.random.pareto, np.random.weibull, @@ -1687,11 +1687,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (np.random.uniform, np.random.normal, np.random.beta, np.random.gamma, np.random.f, np.random.noncentral_chisquare, @@ -1707,18 +1708,19 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_randint(self): + _, _, _, tgtShape = self._create_arrays() itype = [bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] func = np.random.randint @@ -1727,24 +1729,25 @@ def test_randint(self): for dt in itype: out = func(low, high, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low[0], high, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low, high[0], dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [np.random.noncentral_f, np.random.triangular, np.random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index c56d3c0f186c..ff7daf0496bb 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -191,52 +191,58 @@ def test_multinomial_n_float(self): # Non-index integer types should gracefully truncate floats random.multinomial(100.5, [0.2, 0.8]) + class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.random_state = random.RandomState(self.seed) - self.state = self.random_state.get_state() + def _create_state(self): + seed = 1234567890 + random_state = random.RandomState(seed) + state = random_state.get_state() + return random_state, state def test_basic(self): - old = self.random_state.tomaxint(16) - self.random_state.set_state(self.state) - new = self.random_state.tomaxint(16) + random_state, state = self._create_state() + old = random_state.tomaxint(16) + random_state.set_state(state) + new = random_state.tomaxint(16) assert_(np.all(old == new)) def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.random_state.standard_normal(size=3) - self.random_state.set_state(self.state) - new = self.random_state.standard_normal(size=3) + random_state, state = self._create_state() + old = random_state.standard_normal(size=3) + random_state.set_state(state) + new = random_state.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.random_state.standard_normal() - state = self.random_state.get_state() - old = self.random_state.standard_normal(size=3) - self.random_state.set_state(state) - new = self.random_state.standard_normal(size=3) + random_state, state = self._create_state() + random_state.standard_normal() + state = random_state.get_state() + old = random_state.standard_normal(size=3) + random_state.set_state(state) + new = random_state.standard_normal(size=3) assert_(np.all(old == new)) def test_backwards_compatibility(self): # Make sure we can accept old state tuples that do not have the # cached Gaussian value. - old_state = self.state[:-2] - x1 = self.random_state.standard_normal(size=16) - self.random_state.set_state(old_state) - x2 = self.random_state.standard_normal(size=16) - self.random_state.set_state(self.state) - x3 = self.random_state.standard_normal(size=16) + random_state, state = self._create_state() + old_state = state[:-2] + x1 = random_state.standard_normal(size=16) + random_state.set_state(old_state) + x2 = random_state.standard_normal(size=16) + random_state.set_state(state) + x3 = random_state.standard_normal(size=16) assert_(np.all(x1 == x2)) assert_(np.all(x1 == x3)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.random_state.negative_binomial(0.5, 0.5) + random_state, _ = self._create_state() + random_state.negative_binomial(0.5, 0.5) def test_get_state_warning(self): rs = random.RandomState(PCG64()) @@ -246,34 +252,38 @@ def test_get_state_warning(self): assert state['bit_generator'] == 'PCG64' def test_invalid_legacy_state_setting(self): - state = self.random_state.get_state() + random_state, state = self._create_state() + state = random_state.get_state() new_state = ('Unknown', ) + state[1:] - assert_raises(ValueError, self.random_state.set_state, new_state) - assert_raises(TypeError, self.random_state.set_state, + assert_raises(ValueError, random_state.set_state, new_state) + assert_raises(TypeError, random_state.set_state, np.array(new_state, dtype=object)) - state = self.random_state.get_state(legacy=False) + state = random_state.get_state(legacy=False) del state['bit_generator'] - assert_raises(ValueError, self.random_state.set_state, state) + assert_raises(ValueError, random_state.set_state, state) def test_pickle(self): - self.random_state.seed(0) - self.random_state.random_sample(100) - self.random_state.standard_normal() - pickled = self.random_state.get_state(legacy=False) + random_state, _ = self._create_state() + random_state.seed(0) + random_state.random_sample(100) + random_state.standard_normal() + pickled = random_state.get_state(legacy=False) assert_equal(pickled['has_gauss'], 1) - rs_unpick = pickle.loads(pickle.dumps(self.random_state)) + rs_unpick = pickle.loads(pickle.dumps(random_state)) unpickled = rs_unpick.get_state(legacy=False) assert_mt19937_state_equal(pickled, unpickled) def test_state_setting(self): - attr_state = self.random_state.__getstate__() - self.random_state.standard_normal() - self.random_state.__setstate__(attr_state) - state = self.random_state.get_state(legacy=False) + random_state, state = self._create_state() + attr_state = random_state.__getstate__() + random_state.standard_normal() + random_state.__setstate__(attr_state) + state = random_state.get_state(legacy=False) assert_mt19937_state_equal(attr_state, state) def test_repr(self): - assert repr(self.random_state).startswith('RandomState(MT19937)') + random_state, _ = self._create_state() + assert repr(random_state).startswith('RandomState(MT19937)') class TestRandint: @@ -443,9 +453,7 @@ def test_respect_dtype_singleton(self): class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_rand(self): random.seed(self.seed) @@ -1309,8 +1317,7 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 + seed = 123456789 def set_seed(self): random.seed(self.seed) @@ -1896,8 +1903,7 @@ def test_logseries(self): @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -1942,13 +1948,11 @@ def gen_random(state, out): # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (random.exponential, random.standard_gamma, random.chisquare, random.standard_t, random.pareto, random.weibull, @@ -1963,11 +1967,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (random.uniform, random.normal, random.beta, random.gamma, random.f, random.noncentral_chisquare, @@ -1983,30 +1988,31 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [random.noncentral_f, random.triangular, random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) # Ensure returned array dtype is correct for platform From 61e488926c4b8ca54cec36f657172f2726c93180 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 3 Sep 2025 13:15:29 -0600 Subject: [PATCH 0387/1018] BUG: use correct input dtype in flatiter assignment --- numpy/_core/src/multiarray/iterators.c | 2 +- numpy/_core/tests/test_stringdtype.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 704fd4738589..1b4ed59fbfe0 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -850,7 +850,7 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) /* We can assume the newly allocated array is aligned */ int is_aligned = IsUintAligned(self->ao); if (PyArray_GetDTypeTransferFunction( - is_aligned, dtype_size, dtype_size, dtype, dtype, 0, + is_aligned, dtype_size, dtype_size, PyArray_DESCR(arrval), dtype, 0, &cast_info, &transfer_flags) < 0) { goto finish; } diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 52a225619ccf..5d2364bf8cfc 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -530,6 +530,13 @@ def test_fancy_indexing(string_list): assert_array_equal(sarr[ind], uarr[ind]) +def test_flatiter_indexing(): + # see gh-29659 + arr = np.array(['hello', 'world'], dtype='T') + arr.flat[:] = 9223372036854775 + assert_array_equal(arr, np.array([9223372036854775] * 2, dtype='T')) + + def test_creation_functions(): assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""]) assert_array_equal(np.empty(3, dtype="T"), ["", "", ""]) From c2eeeba0c34200b1a6c4377de662b73b9289c510 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 3 Sep 2025 13:34:34 -0600 Subject: [PATCH 0388/1018] MAINT: Remove NPY_SORT_NANFIRST from docs and code. --- doc/release/upcoming_changes/29642.c_api.rst | 3 +-- doc/source/reference/c-api/array.rst | 9 --------- numpy/_core/include/numpy/ndarraytypes.h | 1 - numpy/_core/src/multiarray/methods.c | 8 -------- 4 files changed, 1 insertion(+), 20 deletions(-) diff --git a/doc/release/upcoming_changes/29642.c_api.rst b/doc/release/upcoming_changes/29642.c_api.rst index 0a28e32e3530..65c804ef829b 100644 --- a/doc/release/upcoming_changes/29642.c_api.rst +++ b/doc/release/upcoming_changes/29642.c_api.rst @@ -8,7 +8,6 @@ needed. The new names of interest are: * NPY_SORT_DEFAULT -- default sort (same value as NPY_QUICKSORT) * NPY_SORT_STABLE -- the sort must be stable (same value as NPY_MERGESORT) * NPY_SORT_DESCENDING -- the sort must be descending -* NPY_SORT_NANFIRST -- NaNs sort to the beginning The semantic change is that NPY_HEAPSORT is mapped to NPY_QUICKSORT when used. -Note that NPY_SORT_DESCENDING and NPY_SORT_NANFIRST have yet to be implemented. +Note that NPY_SORT_DESCENDING is not yet implemented. diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index b5c1a08e2abf..667b9948a3d9 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -4381,15 +4381,6 @@ Enumerated Types This functionality is not yet implemented for any of the NumPy types and cannot yet be set from the Python interface. - .. c:enumerator:: NPY_SORT_NANFIRST - - (Requirement) Specifies that the sort must sort NaNs to - the beginning, e.g. NaNs compare less than any other value for the - type. This is only relevant if the type has NaN equivalents. - This functionality is not yet implemented for any of the NumPy types - and cannot yet be set from the Python interface. It may change in the - future. - .. c:enum:: NPY_SCALARKIND A special variable type indicating the number of "kinds" of diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 01fe231927f4..ecd5d9724528 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -193,7 +193,6 @@ typedef enum { NPY_SORT_DEFAULT = 0, NPY_SORT_STABLE = 2, NPY_SORT_DESCENDING = 4, - NPY_SORT_NANFIRST = 8, } NPY_SORTKIND; #define NPY_NSORTS (NPY_STABLESORT + 1) diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 84f749acce3f..f2150d63c496 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1255,7 +1255,6 @@ array_sort(PyArrayObject *self, NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; int stable = -1; int descending = -1; - int nanfirst = -1; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("sort", args, len_args, kwnames, @@ -1264,7 +1263,6 @@ array_sort(PyArrayObject *self, "|order", NULL, &order, "$stable", &PyArray_OptionalBoolConverter, &stable, // "$descending", &PyArray_OptionalBoolConverter, &descending, -// "$nanfirst", &PyArray_OptionalBoolConverter, &nanfirst, NULL, NULL, NULL) < 0) { return NULL; } @@ -1274,14 +1272,12 @@ array_sort(PyArrayObject *self, sortkind = 0; sortkind |= (stable > 0)? NPY_SORT_STABLE: 0; sortkind |= (descending > 0)? NPY_SORT_DESCENDING: 0; - sortkind |= (nanfirst > 0)? NPY_SORT_NANFIRST: 0; } else { // Check that no keywords are used int keywords_used = 0; keywords_used |= (stable != -1); keywords_used |= (descending != -1); - keywords_used |= (nanfirst != -1); if (keywords_used) { PyErr_SetString(PyExc_ValueError, "`kind` and keyword parameters can't be provided at " @@ -1421,7 +1417,6 @@ array_argsort(PyArrayObject *self, NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; int stable = -1; int descending = -1; - int nanfirst = -1; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argsort", args, len_args, kwnames, @@ -1430,7 +1425,6 @@ array_argsort(PyArrayObject *self, "|order", NULL, &order, "$stable", &PyArray_OptionalBoolConverter, &stable, // "$descending", &PyArray_OptionalBoolConverter, &descending, -// "$nanfirst", &PyArray_OptionalBoolConverter, &nanfirst, NULL, NULL, NULL) < 0) { return NULL; } @@ -1440,14 +1434,12 @@ array_argsort(PyArrayObject *self, sortkind = 0; sortkind |= (stable > 0)? NPY_SORT_STABLE: 0; sortkind |= (descending > 0)? NPY_SORT_DESCENDING: 0; - sortkind |= (nanfirst > 0)? NPY_SORT_NANFIRST: 0; } else { // Check that no keywords are used int keywords_used = 0; keywords_used |= (stable != -1); keywords_used |= (descending != -1); - keywords_used |= (nanfirst != -1); if (keywords_used) { PyErr_SetString(PyExc_ValueError, "`kind` and keyword parameters can't be provided at " From 750d653b118962dd6c2c0c5dbaa0caeede972ace Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Sep 2025 04:05:25 +0000 Subject: [PATCH 0389/1018] MAINT: Bump mamba-org/setup-micromamba from 2.0.5 to 2.0.6 Bumps [mamba-org/setup-micromamba](https://github.com/mamba-org/setup-micromamba) from 2.0.5 to 2.0.6. - [Release notes](https://github.com/mamba-org/setup-micromamba/releases) - [Commits](https://github.com/mamba-org/setup-micromamba/compare/b09ef9b599704322748535812ca03efb2625677b...7f29b8b80078b1b601dfa018b0f7425c587c63bb) --- updated-dependencies: - dependency-name: mamba-org/setup-micromamba dependency-version: 2.0.6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 6d041831c22d..1604bd3046f5 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -184,7 +184,7 @@ jobs: path: ./wheelhouse/*.whl - name: install micromamba - uses: mamba-org/setup-micromamba@b09ef9b599704322748535812ca03efb2625677b + uses: mamba-org/setup-micromamba@7f29b8b80078b1b601dfa018b0f7425c587c63bb if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment with: # for installation of anaconda-client, required for upload to From 2235ce10e5efa15d4a8b0455d36022dd72655446 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Thu, 4 Sep 2025 23:26:08 +0900 Subject: [PATCH 0390/1018] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 37f599f075b3..32626b986817 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,7 +123,7 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [int(2e3), int(2e6)], + [int(1e3), int(1e6)], # percent of np.nan in arrays [10., 90.], # percent of unique values in arrays From 282cc5939519bb22e9cc4b684edeb7ddae30b9dd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Sep 2025 17:05:44 +0000 Subject: [PATCH 0391/1018] MAINT: Bump actions/github-script from 7.0.1 to 8.0.0 Bumps [actions/github-script](https://github.com/actions/github-script) from 7.0.1 to 8.0.0. - [Release notes](https://github.com/actions/github-script/releases) - [Commits](https://github.com/actions/github-script/compare/60a0d83039c74a4aee543508d2ffcb1c3799cdea...ed597411d8f924073f98dfc5c65a23a2325f34cd) --- updated-dependencies: - dependency-name: actions/github-script dependency-version: 8.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 338a53da6a13..e62f26edd85f 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -18,7 +18,7 @@ jobs: if: ${{ github.event.workflow_run.conclusion == 'success' }} steps: - name: Download diffs - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | const fs = require('fs'); @@ -42,7 +42,7 @@ jobs: - name: Get PR number id: get-pr-number - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | const fs = require('fs'); @@ -58,7 +58,7 @@ jobs: - name: Post comment id: post-comment - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | From 5de7753badd4de682943864c012eaee98c2e927b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Sep 2025 17:05:59 +0000 Subject: [PATCH 0392/1018] MAINT: Bump actions/setup-python from 5.6.0 to 6.0.0 Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.6.0 to 6.0.0. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/a26af69be951a213d495a4c3e4e4022e16d87065...e797f83bcb11b83ae66e0230d6156d7c80228e7c) --- updated-dependencies: - dependency-name: actions/setup-python dependency-version: 6.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/linux.yml | 18 +++++++++--------- .github/workflows/linux_blas.yml | 12 ++++++------ .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 2 +- .github/workflows/mypy.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/wheels.yml | 4 ++-- .github/workflows/windows.yml | 6 +++--- 8 files changed, 29 insertions(+), 29 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 40e372cbf3e4..b55f3dedb67e 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -38,7 +38,7 @@ jobs: submodules: recursive fetch-depth: 0 persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install linter requirements @@ -65,7 +65,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: ${{ matrix.version }} - uses: ./.github/meson_actions @@ -80,7 +80,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: 'pypy3.11-v7.3.20' - name: Setup using scipy-openblas @@ -129,7 +129,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -169,7 +169,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -246,7 +246,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install build and benchmarking dependencies @@ -285,7 +285,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install gfortran and setup OpenBLAS (sdist build) @@ -329,7 +329,7 @@ jobs: path: 'array-api-tests' persist-credentials: false - name: Set up Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -358,7 +358,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 633de1fbb84c..570e90437e1a 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -70,7 +70,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -199,7 +199,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -228,7 +228,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -290,7 +290,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -354,7 +354,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -391,7 +391,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 511e1d2ba125..5bd1eab7f797 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -63,7 +63,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - uses: ./.github/meson_actions @@ -81,7 +81,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -127,7 +127,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install dependencies @@ -181,7 +181,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "${{ matrix.BUILD_PROP[2] }}" - uses: ./.github/meson_actions @@ -196,7 +196,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -247,7 +247,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index da0972678b15..97ef55fb0228 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -124,7 +124,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: ${{ matrix.version }} diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index e362a29f628a..e2dcdccf868b 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -56,7 +56,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: ${{ matrix.os_python[1] }} - name: Install dependencies diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 2bfc4197e9b8..8dfeac05d4ba 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -32,7 +32,7 @@ jobs: with: path: numpy_to_test fetch-depth: 0 - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "3.12" - name: Install dependencies diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 1604bd3046f5..9cf98d481fe3 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -143,7 +143,7 @@ jobs: if: runner.os == 'windows' # Used to push the built wheels - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "3.x" @@ -250,7 +250,7 @@ jobs: submodules: true persist-credentials: false # Used to push the built wheels - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: # Build sdist on lowest supported Python python-version: "3.11" diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index dba4f74496b2..66f544b51d7d 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -35,7 +35,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: ${{ matrix.compiler-pyversion[1] }} @@ -106,7 +106,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: ${{ matrix.compiler-pyversion[1] }} architecture: arm64 @@ -180,7 +180,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' architecture: ${{ matrix.architecture }} From 398917323d3ee430161d3001384c8b5071f207e0 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Fri, 5 Sep 2025 05:36:41 -0700 Subject: [PATCH 0393/1018] DOC: Make the image credit author link clickable (#29588) * DOC: Make the image credit author link clickable * DOC: Remove an extra blank [skip azp] [skip cirrus] [skip actions] --- doc/source/user/absolute_beginners.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index d0d7e70fa284..3ba12b84052e 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -1740,4 +1740,5 @@ For directions regarding installing Matplotlib, see the official ------------------------------------------------------- -*Image credits: Jay Alammar https://jalammar.github.io/* +*Image credits: Jay Alammar* +`https://jalammar.github.io/ `_ From 5f0b57a109af8baec31265e191971b46ebeaed46 Mon Sep 17 00:00:00 2001 From: Sanjay Kumar Sakamuri Kamalakar Date: Fri, 5 Sep 2025 20:12:59 +0530 Subject: [PATCH 0394/1018] Update regex patterns for VSX3 and VSX4 features --- meson_cpu/ppc64/meson.build | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meson_cpu/ppc64/meson.build b/meson_cpu/ppc64/meson.build index bad95257ca95..57fe47140429 100644 --- a/meson_cpu/ppc64/meson.build +++ b/meson_cpu/ppc64/meson.build @@ -23,7 +23,7 @@ if host_machine.endian() == 'little' VSX.update(implies: VSX2) endif VSX3 = mod_features.new( - 'VSX3', 3, implies: VSX2, args: {'val': '-mcpu=power9', 'match': '.*[mcpu=|vsx].*'}, + 'VSX3', 3, implies: VSX2, args: {'val': '-mcpu=power9', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX3', 'match': 'VSX.*'}, test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx3.c')[0], extra_tests: { @@ -31,7 +31,7 @@ VSX3 = mod_features.new( } ) VSX4 = mod_features.new( - 'VSX4', 4, implies: VSX3, args: {'val': '-mcpu=power10', 'match': '.*[mcpu=|vsx].*'}, + 'VSX4', 4, implies: VSX3, args: {'val': '-mcpu=power10', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX4', 'match': 'VSX.*'}, test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx4.c')[0], extra_tests: { From 598f2df11abe2ed592db911f199a06ddbff9b657 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Sat, 6 Sep 2025 00:12:29 +0900 Subject: [PATCH 0395/1018] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 32626b986817..3f7988a2ab7f 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,7 +123,7 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [int(1e3), int(1e6)], + [200, int(2e5)], # percent of np.nan in arrays [10., 90.], # percent of unique values in arrays From 52989856645d1db14f1e12ad464c0566c91417c1 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Fri, 5 Sep 2025 11:19:01 -0400 Subject: [PATCH 0396/1018] TST: Replace xunit setup with methods (#29666) * TST: Replace xunit setup with methods * TST: Change random to RandomState * TST: Move vars into test method --- numpy/_core/tests/test_multiarray.py | 360 +++++++++++++++------------ numpy/testing/tests/test_utils.py | 27 +- 2 files changed, 213 insertions(+), 174 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 8e50e71c3741..2d8587ad5ac1 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -105,16 +105,14 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None): class TestFlags: - def setup_method(self): - self.a = np.arange(10) - def test_writeable(self): + arr = np.arange(10) mydict = locals() - self.a.flags.writeable = False - assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) - self.a.flags.writeable = True - self.a[0] = 5 - self.a[0] = 0 + arr.flags.writeable = False + assert_raises(ValueError, runstring, 'arr[0] = 3', mydict) + arr.flags.writeable = True + arr[0] = 5 + arr[0] = 0 def test_writeable_any_base(self): # Ensure that any base being writeable is sufficient to change flag; @@ -252,18 +250,19 @@ class MyArr: assert np.asarray(MyArr()).flags.writeable is writeable def test_otherflags(self): - assert_equal(self.a.flags.carray, True) - assert_equal(self.a.flags['C'], True) - assert_equal(self.a.flags.farray, False) - assert_equal(self.a.flags.behaved, True) - assert_equal(self.a.flags.fnc, False) - assert_equal(self.a.flags.forc, True) - assert_equal(self.a.flags.owndata, True) - assert_equal(self.a.flags.writeable, True) - assert_equal(self.a.flags.aligned, True) - assert_equal(self.a.flags.writebackifcopy, False) - assert_equal(self.a.flags['X'], False) - assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) + arr = np.arange(10) + assert_equal(arr.flags.carray, True) + assert_equal(arr.flags['C'], True) + assert_equal(arr.flags.farray, False) + assert_equal(arr.flags.behaved, True) + assert_equal(arr.flags.fnc, False) + assert_equal(arr.flags.forc, True) + assert_equal(arr.flags.owndata, True) + assert_equal(arr.flags.writeable, True) + assert_equal(arr.flags.aligned, True) + assert_equal(arr.flags.writebackifcopy, False) + assert_equal(arr.flags['X'], False) + assert_equal(arr.flags['WRITEBACKIFCOPY'], False) def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) @@ -311,41 +310,44 @@ def test_int(self): class TestAttributes: - def setup_method(self): - self.one = np.arange(10) - self.two = np.arange(20).reshape(4, 5) - self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + def _create_arrays(self): + one = np.arange(10) + two = np.arange(20).reshape(4, 5) + three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + return one, two, three def test_attributes(self): - assert_equal(self.one.shape, (10,)) - assert_equal(self.two.shape, (4, 5)) - assert_equal(self.three.shape, (2, 5, 6)) - self.three.shape = (10, 3, 2) - assert_equal(self.three.shape, (10, 3, 2)) - self.three.shape = (2, 5, 6) - assert_equal(self.one.strides, (self.one.itemsize,)) - num = self.two.itemsize - assert_equal(self.two.strides, (5 * num, num)) - num = self.three.itemsize - assert_equal(self.three.strides, (30 * num, 6 * num, num)) - assert_equal(self.one.ndim, 1) - assert_equal(self.two.ndim, 2) - assert_equal(self.three.ndim, 3) - num = self.two.itemsize - assert_equal(self.two.size, 20) - assert_equal(self.two.nbytes, 20 * num) - assert_equal(self.two.itemsize, self.two.dtype.itemsize) - assert_equal(self.two.base, np.arange(20)) + one, two, three = self._create_arrays() + assert_equal(one.shape, (10,)) + assert_equal(two.shape, (4, 5)) + assert_equal(three.shape, (2, 5, 6)) + three.shape = (10, 3, 2) + assert_equal(three.shape, (10, 3, 2)) + three.shape = (2, 5, 6) + assert_equal(one.strides, (one.itemsize,)) + num = two.itemsize + assert_equal(two.strides, (5 * num, num)) + num = three.itemsize + assert_equal(three.strides, (30 * num, 6 * num, num)) + assert_equal(one.ndim, 1) + assert_equal(two.ndim, 2) + assert_equal(three.ndim, 3) + num = two.itemsize + assert_equal(two.size, 20) + assert_equal(two.nbytes, 20 * num) + assert_equal(two.itemsize, two.dtype.itemsize) + assert_equal(two.base, np.arange(20)) def test_dtypeattr(self): - assert_equal(self.one.dtype, np.dtype(np.int_)) - assert_equal(self.three.dtype, np.dtype(np.float64)) - assert_equal(self.one.dtype.char, np.dtype(int).char) - assert self.one.dtype.char in "lq" - assert_equal(self.three.dtype.char, 'd') - assert_(self.three.dtype.str[0] in '<>') - assert_equal(self.one.dtype.str[1], 'i') - assert_equal(self.three.dtype.str[1], 'f') + one, _, three = self._create_arrays() + assert_equal(one.dtype, np.dtype(np.int_)) + assert_equal(three.dtype, np.dtype(np.float64)) + assert_equal(one.dtype.char, np.dtype(int).char) + assert one.dtype.char in "lq" + assert_equal(three.dtype.char, 'd') + assert_(three.dtype.str[0] in '<>') + assert_equal(one.dtype.str[1], 'i') + assert_equal(three.dtype.str[1], 'f') def test_int_subclassing(self): # Regression test for https://github.com/numpy/numpy/pull/3526 @@ -356,7 +358,7 @@ def test_int_subclassing(self): assert_(not isinstance(numpy_int, int)) def test_stridesattr(self): - x = self.one + x, _, _ = self._create_arrays() def make_array(size, offset, strides): return np.ndarray(size, buffer=x, dtype=int, @@ -373,7 +375,7 @@ def make_array(size, offset, strides): make_array(0, 0, 10) def test_set_stridesattr(self): - x = self.one + x, _, _ = self._create_arrays() def make_array(size, offset, strides): try: @@ -732,46 +734,46 @@ def test_structured_non_void(self): class TestZeroRank: - def setup_method(self): - self.d = np.array(0), np.array('x', object) + def _create_arrays(self): + return np.array(0), np.array('x', object) def test_ellipsis_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_equal(a[...], 0) assert_equal(b[...], 'x') assert_(a[...].base is a) # `a[...] is a` in numpy <1.9. assert_(b[...].base is b) # `b[...] is b` in numpy <1.9. def test_empty_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_equal(a[()], 0) assert_equal(b[()], 'x') assert_(type(a[()]) is a.dtype.type) assert_(type(b[()]) is str) def test_invalid_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_raises(IndexError, lambda x: x[0], a) assert_raises(IndexError, lambda x: x[0], b) assert_raises(IndexError, lambda x: x[np.array([], int)], a) assert_raises(IndexError, lambda x: x[np.array([], int)], b) def test_ellipsis_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() a[...] = 42 assert_equal(a, 42) b[...] = '' assert_equal(b.item(), '') def test_empty_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() a[()] = 42 assert_equal(a, 42) b[()] = '' assert_equal(b.item(), '') def test_invalid_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() def assign(x, i, v): x[i] = v @@ -781,7 +783,7 @@ def assign(x, i, v): assert_raises(ValueError, assign, a, (), '') def test_newaxis(self): - a, b = self.d + a, _ = self._create_arrays() assert_equal(a[np.newaxis].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ...].shape, (1,)) @@ -792,7 +794,7 @@ def test_newaxis(self): assert_equal(a[(np.newaxis,) * 10].shape, (1,) * 10) def test_invalid_newaxis(self): - a, b = self.d + a, _ = self._create_arrays() def subscript(x, i): x[i] @@ -836,26 +838,26 @@ def test_real_imag(self): class TestScalarIndexing: - def setup_method(self): - self.d = np.array([0, 1])[0] + def _create_array(self): + return np.array([0, 1])[0] def test_ellipsis_subscript(self): - a = self.d + a = self._create_array() assert_equal(a[...], 0) assert_equal(a[...].shape, ()) def test_empty_subscript(self): - a = self.d + a = self._create_array() assert_equal(a[()], 0) assert_equal(a[()].shape, ()) def test_invalid_subscript(self): - a = self.d + a = self._create_array() assert_raises(IndexError, lambda x: x[0], a) assert_raises(IndexError, lambda x: x[np.array([], int)], a) def test_invalid_subscript_assignment(self): - a = self.d + a = self._create_array() def assign(x, i, v): x[i] = v @@ -863,7 +865,7 @@ def assign(x, i, v): assert_raises(TypeError, assign, a, 0, 42) def test_newaxis(self): - a = self.d + a = self._create_array() assert_equal(a[np.newaxis].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ...].shape, (1,)) @@ -874,7 +876,7 @@ def test_newaxis(self): assert_equal(a[(np.newaxis,) * 10].shape, (1,) * 10) def test_invalid_newaxis(self): - a = self.d + a = self._create_array() def subscript(x, i): x[i] @@ -5572,8 +5574,7 @@ def test_invalid_axis(self): # gh-7528 class TestIO: """Test tofile, fromfile, tobytes, and fromstring""" - @pytest.fixture() - def x(self): + def _create_data(self): shape = (2, 4, 3) rand = np.random.random x = rand(shape) + rand(shape).astype(complex) * 1j @@ -5631,7 +5632,8 @@ def test_empty_files_binary(self, tmp_filename): y = np.fromfile(tmp_filename, sep=" ") assert_(y.size == 0, "Array not empty") - def test_roundtrip_file(self, x, tmp_filename): + def test_roundtrip_file(self, tmp_filename): + x = self._create_data() with open(tmp_filename, 'wb') as f: x.tofile(f) # NB. doesn't work with flush+seek, due to use of C stdio @@ -5639,18 +5641,21 @@ def test_roundtrip_file(self, x, tmp_filename): y = np.fromfile(f, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_roundtrip(self, x, tmp_filename): + def test_roundtrip(self, tmp_filename): + x = self._create_data() x.tofile(tmp_filename) y = np.fromfile(tmp_filename, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_roundtrip_dump_pathlib(self, x, tmp_filename): + def test_roundtrip_dump_pathlib(self, tmp_filename): + x = self._create_data() p = pathlib.Path(tmp_filename) x.dump(p) y = np.load(p, allow_pickle=True) assert_array_equal(y, x) - def test_roundtrip_binary_str(self, x): + def test_roundtrip_binary_str(self): + x = self._create_data() s = x.tobytes() y = np.frombuffer(s, dtype=x.dtype) assert_array_equal(y, x.flat) @@ -5659,7 +5664,8 @@ def test_roundtrip_binary_str(self, x): y = np.frombuffer(s, dtype=x.dtype) assert_array_equal(y, x.flatten('F')) - def test_roundtrip_str(self, x): + def test_roundtrip_str(self): + x = self._create_data() x = x.real.ravel() s = "@".join(map(str, x)) y = np.fromstring(s, sep="@") @@ -5667,14 +5673,16 @@ def test_roundtrip_str(self, x): assert_array_equal(x[nan_mask], y[nan_mask]) assert_array_equal(x[~nan_mask], y[~nan_mask]) - def test_roundtrip_repr(self, x): + def test_roundtrip_repr(self): + x = self._create_data() x = x.real.ravel() s = "@".join(repr(x)[11:-1] for x in x) y = np.fromstring(s, sep="@") assert_array_equal(x, y) - def test_unseekable_fromfile(self, x, tmp_filename): + def test_unseekable_fromfile(self, tmp_filename): # gh-6246 + x = self._create_data() x.tofile(tmp_filename) def fail(*args, **kwargs): @@ -5685,8 +5693,9 @@ def fail(*args, **kwargs): f.tell = fail assert_raises(OSError, np.fromfile, f, dtype=x.dtype) - def test_io_open_unbuffered_fromfile(self, x, tmp_filename): + def test_io_open_unbuffered_fromfile(self, tmp_filename): # gh-6632 + x = self._create_data() x.tofile(tmp_filename) with open(tmp_filename, 'rb', buffering=0) as f: y = np.fromfile(f, dtype=x.dtype) @@ -5712,8 +5721,9 @@ def test_largish_file(self, tmp_filename): d.tofile(f) assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) - def test_io_open_buffered_fromfile(self, x, tmp_filename): + def test_io_open_buffered_fromfile(self, tmp_filename): # gh-6632 + x = self._create_data() x.tofile(tmp_filename) with open(tmp_filename, 'rb', buffering=-1) as f: y = np.fromfile(f, dtype=x.dtype) @@ -5777,7 +5787,8 @@ def test_load_object_array_fromfile(self, tmp_filename): assert_raises_regex(ValueError, "Cannot read into object array", np.fromfile, tmp_filename, dtype=object) - def test_fromfile_offset(self, x, tmp_filename): + def test_fromfile_offset(self, tmp_filename): + x = self._create_data() with open(tmp_filename, 'wb') as f: x.tofile(f) @@ -5812,13 +5823,14 @@ def test_fromfile_offset(self, x, tmp_filename): sep=",", offset=1) @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") - def test_fromfile_bad_dup(self, x, tmp_filename): + def test_fromfile_bad_dup(self, tmp_filename): def dup_str(fd): return 'abc' def dup_bigint(fd): return 2**68 + x = self._create_data() old_dup = os.dup try: with open(tmp_filename, 'wb') as f: @@ -6052,39 +6064,43 @@ def test_mmap_close(self): mm.close() class TestFlat: - def setup_method(self): - a0 = np.arange(20.0) - a = a0.reshape(4, 5) - a0 = a0.reshape((4, 5)) + def _create_arrays(self): + a = np.arange(20.0).reshape(4, 5) a.flags.writeable = False - self.a = a - self.b = a[::2, ::2] - self.a0 = a0 - self.b0 = a0[::2, ::2] + b = a[::2, ::2] + return a, b def test_contiguous(self): testpassed = False + a, _ = self._create_arrays() try: - self.a.flat[12] = 100.0 + a.flat[12] = 100.0 except ValueError: testpassed = True assert_(testpassed) - assert_(self.a.flat[12] == 12.0) + assert_(a.flat[12] == 12.0) def test_discontiguous(self): testpassed = False + _, b = self._create_arrays() try: - self.b.flat[4] = 100.0 + b.flat[4] = 100.0 except ValueError: testpassed = True assert_(testpassed) - assert_(self.b.flat[4] == 12.0) + assert_(b.flat[4] == 12.0) def test___array__(self): - c = self.a.flat.__array__() - d = self.b.flat.__array__() - e = self.a0.flat.__array__() - f = self.b0.flat.__array__() + a0 = np.arange(20.0) + a = a0.reshape(4, 5) + a0 = a0.reshape((4, 5)) + a.flags.writeable = False + b = a[::2, ::2] + b0 = a0[::2, ::2] + c = a.flat.__array__() + d = b.flat.__array__() + e = a0.flat.__array__() + f = b0.flat.__array__() assert_(c.flags.writeable is False) assert_(d.flags.writeable is False) @@ -6098,14 +6114,15 @@ def test___array__(self): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_refcount(self): # includes regression test for reference count error gh-13165 - inds = [np.intp(0), np.array([True] * self.a.size), np.array([0]), None] + a, _ = self._create_arrays() + inds = [np.intp(0), np.array([True] * a.size), np.array([0]), None] indtype = np.dtype(np.intp) rc_indtype = sys.getrefcount(indtype) for ind in inds: rc_ind = sys.getrefcount(ind) for _ in range(100): try: - self.a.flat[ind] + a.flat[ind] except IndexError: pass assert_(abs(sys.getrefcount(ind) - rc_ind) < 50) @@ -6381,12 +6398,12 @@ class TestStats: funcs = [_mean, _var, _std] - def setup_method(self): - np.random.seed(range(3)) - self.rmat = np.random.random((4, 5)) - self.cmat = self.rmat + 1j * self.rmat - self.omat = np.array([Decimal(str(r)) for r in self.rmat.flat]) - self.omat = self.omat.reshape(4, 5) + def _create_data(self): + rng = np.random.default_rng(range(3)) + rmat = rng.random((4, 5)) + cmat = rmat + 1j * rmat + omat = np.array([Decimal(str(r)) for r in rmat.flat]).reshape(4, 5) + return rmat, cmat, omat def test_python_type(self): for x in (np.float16(1.), 1, 1., 1 + 0j): @@ -6495,26 +6512,28 @@ def test_dtype_from_dtype(self): assert_(res is tgt) def test_ddof(self): + rmat, _, _ = self._create_data() for f in [_var]: for ddof in range(3): - dim = self.rmat.shape[1] - tgt = f(self.rmat, axis=1) * dim - res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof) + dim = rmat.shape[1] + tgt = f(rmat, axis=1) * dim + res = f(rmat, axis=1, ddof=ddof) * (dim - ddof) for f in [_std]: for ddof in range(3): - dim = self.rmat.shape[1] - tgt = f(self.rmat, axis=1) * np.sqrt(dim) - res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof) + dim = rmat.shape[1] + tgt = f(rmat, axis=1) * np.sqrt(dim) + res = f(rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof) assert_almost_equal(res, tgt) assert_almost_equal(res, tgt) def test_ddof_too_big(self): - dim = self.rmat.shape[1] + rmat, _, _ = self._create_data() + dim = rmat.shape[1] for f in [_var, _std]: for ddof in range(dim, dim + 2): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') - res = f(self.rmat, axis=1, ddof=ddof) + res = f(rmat, axis=1, ddof=ddof) assert_(not (res < 0).any()) assert_(len(w) > 0) assert_(issubclass(w[0].category, RuntimeWarning)) @@ -6534,7 +6553,8 @@ def test_empty(self): assert_equal(f(A, axis=axis), np.zeros([])) def test_mean_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * mat.shape[axis] @@ -6592,7 +6612,8 @@ def test_mean_where(self): assert_equal(np.mean(a, where=False), np.nan) def test_var_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) @@ -6606,9 +6627,10 @@ def test_var_values(self): ('clongdouble', 7), )) def test_var_complex_values(self, complex_dtype, ndec): + _, cmat, _ = self._create_data() # Test fast-paths for every builtin complex type for axis in [0, 1, None]: - mat = self.cmat.copy().astype(complex_dtype) + mat = cmat.copy().astype(complex_dtype) msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) tgt = msqr - mean * mean.conjugate() @@ -6618,7 +6640,8 @@ def test_var_complex_values(self, complex_dtype, ndec): def test_var_dimensions(self): # _var paths for complex number introduce additions on views that # increase dimensions. Ensure this generalizes to higher dims - mat = np.stack([self.cmat] * 3) + _, cmat, _ = self._create_data() + mat = np.stack([cmat] * 3) for axis in [0, 1, 2, -1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) @@ -6629,7 +6652,8 @@ def test_var_dimensions(self): def test_var_complex_byteorder(self): # Test that var fast-path does not cause failures for complex arrays # with non-native byteorder - cmat = self.cmat.copy().astype('complex128') + _, cmat, _ = self._create_data() + cmat = cmat.copy().astype('complex128') cmat_swapped = cmat.astype(cmat.dtype.newbyteorder()) assert_almost_equal(cmat.var(), cmat_swapped.var()) @@ -6677,7 +6701,8 @@ def test_var_where(self): assert_equal(np.var(a, where=False), np.nan) def test_std_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1, None]: tgt = np.sqrt(_var(mat, axis=axis)) res = _std(mat, axis=axis) @@ -6808,61 +6833,63 @@ def test_vdot_uncontiguous(self): class TestDot: - def setup_method(self): - np.random.seed(128) - self.A = np.random.rand(4, 2) - self.b1 = np.random.rand(2, 1) - self.b2 = np.random.rand(2) - self.b3 = np.random.rand(1, 2) - self.b4 = np.random.rand(4) - self.N = 7 + N = 7 + + def _create_data(self): + rng = np.random.RandomState(128) + A = rng.random((4, 2)) + b1 = rng.random((2, 1)) + b2 = rng.random(2) + b3 = rng.random((1, 2)) + b4 = rng.random(4) + return A, b1, b2, b3, b4 def test_dotmatmat(self): - A = self.A + A, _, _, _, _ = self._create_data() res = np.dot(A.transpose(), A) tgt = np.array([[1.45046013, 0.86323640], [0.86323640, 0.84934569]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec(self): - A, b1 = self.A, self.b1 + A, b1, _, _, _ = self._create_data() res = np.dot(A, b1) tgt = np.array([[0.32114320], [0.04889721], [0.15696029], [0.33612621]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec2(self): - A, b2 = self.A, self.b2 + A, _, b2, _, _ = self._create_data() res = np.dot(A, b2) tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat(self): - A, b4 = self.A, self.b4 + A, _, _, _, b4 = self._create_data() res = np.dot(b4, A) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat2(self): - b3, A = self.b3, self.A + A, _, _, b3, _ = self._create_data() res = np.dot(b3, A.transpose()) tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat3(self): - A, b4 = self.A, self.b4 + A, _, _, _, b4 = self._create_data() res = np.dot(A.transpose(), b4) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecouter(self): - b1, b3 = self.b1, self.b3 + _, b1, _, b3, _ = self._create_data() res = np.dot(b1, b3) tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecinner(self): - b1, b3 = self.b1, self.b3 + _, b1, _, b3, _ = self._create_data() res = np.dot(b3, b1) tgt = np.array([[0.23129668]]) assert_almost_equal(res, tgt, decimal=self.N) @@ -6882,17 +6909,17 @@ def test_dotcolumnvect2(self): assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar(self): - np.random.seed(100) - b1 = np.random.rand(1, 1) - b2 = np.random.rand(1, 4) + rng = np.random.RandomState(100) + b1 = rng.random((1, 1)) + b2 = rng.random((1, 4)) res = np.dot(b1, b2) tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar2(self): - np.random.seed(100) - b1 = np.random.rand(4, 1) - b2 = np.random.rand(1, 1) + rng = np.random.RandomState(100) + b1 = rng.random((4, 1)) + b2 = rng.random((1, 1)) res = np.dot(b1, b2) tgt = np.array([[0.00256425], [0.00131359], [0.00200324], [0.00398638]]) assert_almost_equal(res, tgt, decimal=self.N) @@ -7709,23 +7736,27 @@ def test_3d_tensor(self): class TestChoose: - def setup_method(self): - self.x = 2 * np.ones((3,), dtype=int) - self.y = 3 * np.ones((3,), dtype=int) - self.x2 = 2 * np.ones((2, 3), dtype=int) - self.y2 = 3 * np.ones((2, 3), dtype=int) - self.ind = [0, 0, 1] + def _create_data(self): + x = 2 * np.ones((3,), dtype=int) + y = 3 * np.ones((3,), dtype=int) + x2 = 2 * np.ones((2, 3), dtype=int) + y2 = 3 * np.ones((2, 3), dtype=int) + ind = [0, 0, 1] + return x, y, x2, y2, ind def test_basic(self): - A = np.choose(self.ind, (self.x, self.y)) + x, y, _, _, ind = self._create_data() + A = np.choose(ind, (x, y)) assert_equal(A, [2, 2, 3]) def test_broadcast1(self): - A = np.choose(self.ind, (self.x2, self.y2)) + _, _, x2, y2, ind = self._create_data() + A = np.choose(ind, (x2, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) def test_broadcast2(self): - A = np.choose(self.ind, (self.x, self.y2)) + x, _, _, y2, ind = self._create_data() + A = np.choose(ind, (x, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) @pytest.mark.parametrize("ops", @@ -7755,38 +7786,43 @@ def test_dimension_and_args_limit(self): class TestRepeat: - def setup_method(self): - self.m = np.array([1, 2, 3, 4, 5, 6]) - self.m_rect = self.m.reshape((2, 3)) + def _create_data(self): + m = np.array([1, 2, 3, 4, 5, 6]) + m_rect = m.reshape((2, 3)) + return m, m_rect def test_basic(self): - A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) + m, _ = self._create_data() + A = np.repeat(m, [1, 3, 2, 1, 1, 2]) assert_equal(A, [1, 2, 2, 2, 3, 3, 4, 5, 6, 6]) def test_broadcast1(self): - A = np.repeat(self.m, 2) + m, _ = self._create_data() + A = np.repeat(m, 2) assert_equal(A, [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) def test_axis_spec(self): - A = np.repeat(self.m_rect, [2, 1], axis=0) + _, m_rect = self._create_data() + A = np.repeat(m_rect, [2, 1], axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6]]) - A = np.repeat(self.m_rect, [1, 3, 2], axis=1) + A = np.repeat(m_rect, [1, 3, 2], axis=1) assert_equal(A, [[1, 2, 2, 2, 3, 3], [4, 5, 5, 5, 6, 6]]) def test_broadcast2(self): - A = np.repeat(self.m_rect, 2, axis=0) + _, m_rect = self._create_data() + A = np.repeat(m_rect, 2, axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]]) - A = np.repeat(self.m_rect, 2, axis=1) + A = np.repeat(m_rect, 2, axis=1) assert_equal(A, [[1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6]]) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 5e1c625955bb..815f7ac2930a 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -36,6 +36,9 @@ class _GenericTest: + def _assert_func(self, *args, **kwargs): + pass + def _test_equal(self, a, b): self._assert_func(a, b) @@ -82,8 +85,8 @@ def test_array_likes(self): class TestArrayEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_array_equal + def _assert_func(self, *args, **kwargs): + assert_array_equal(*args, **kwargs) def test_generic_rank1(self): """Test rank 1 array for all dtypes.""" @@ -389,8 +392,8 @@ def test_build_err_msg_custom_precision(self): class TestEqual(TestArrayEqual): - def setup_method(self): - self._assert_func = assert_equal + def _assert_func(self, *args, **kwargs): + assert_equal(*args, **kwargs) def test_nan_items(self): self._assert_func(np.nan, np.nan) @@ -484,8 +487,8 @@ def test_object(self): class TestArrayAlmostEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_array_almost_equal + def _assert_func(self, *args, **kwargs): + assert_array_almost_equal(*args, **kwargs) def test_closeness(self): # Note that in the course of time we ended up with @@ -700,8 +703,8 @@ def all(self, *args, **kwargs): class TestAlmostEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_almost_equal + def _assert_func(self, *args, **kwargs): + assert_almost_equal(*args, **kwargs) def test_closeness(self): # Note that in the course of time we ended up with @@ -870,8 +873,8 @@ def all(self, *args, **kwargs): class TestApproxEqual: - def setup_method(self): - self._assert_func = assert_approx_equal + def _assert_func(self, *args, **kwargs): + assert_approx_equal(*args, **kwargs) def test_simple_0d_arrays(self): x = np.array(1234.22) @@ -913,8 +916,8 @@ def test_nan_items(self): class TestArrayAssertLess: - def setup_method(self): - self._assert_func = assert_array_less + def _assert_func(self, *args, **kwargs): + assert_array_less(*args, **kwargs) def test_simple_arrays(self): x = np.array([1.1, 2.2]) From 57f1cf9ebdf3541aff57c9e982dd4d333fc96911 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 5 Sep 2025 17:06:15 +0000 Subject: [PATCH 0397/1018] MAINT: Bump github/codeql-action from 3.30.0 to 3.30.1 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.0 to 3.30.1. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d...f1f6e5f6af878fb37288ce1c627459e94dbf7d01) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index ecdf8cfeb85e..363ad25f2e50 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3.30.0 + uses: github/codeql-action/init@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3.30.0 + uses: github/codeql-action/autobuild@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3.30.0 + uses: github/codeql-action/analyze@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 19acbd619437..98cab6b712f6 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v2.1.27 + uses: github/codeql-action/upload-sarif@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v2.1.27 with: sarif_file: results.sarif From 1a9d7316cac3390e657718f3c2ffa0af0b35eef8 Mon Sep 17 00:00:00 2001 From: Samruddhi Baviskar <79337465+samruddhibaviskar11@users.noreply.github.com> Date: Sat, 6 Sep 2025 02:26:45 +0530 Subject: [PATCH 0398/1018] ENH: Use itertools.product for ndindex to improve performance (#29165) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * PERF: Rewrite ndindex using itertools.product for speed and memory efficiency * Refactor ndindex using itertools.product and add ASV benchmarks and expanded tests * Fix linting issues flagged by Ruff across ndindex implementation, tests, and benchmarks * Add release note and finalize ndindex improvements * BUG: Raise ValueError for negative dimensions in ndindex * MAINT: Expose ndindex in numpy.lib for public usage and benchmarks * MAINT: Fix ndindex __module__ attribute for public API test * Remove numpy.distutils tests after dropping distutils in NumPy 2.0 * Format imports in numpy.lib.__init__.py with Ruff * Set ndindex.__module__ = 'numpy.lib' to match test expectation * BUG: Raise ValueError for negative dimensions in ndindex; MAINT: Remove ndindex from numpy.lib public API; TEST: Update negative dimension test * Revert distutils test deletions as requested * Revert numpy/lib/__init__.py to upstream/main state as requested * lib: ndindex—handle empty shape and fix implementation bugs - Use min(..., default=0) to handle empty shapes safely. - Fix cascade issues in ndindex implementation. - Add regression tests to cover empty shapes and previous bugs. * TST: remove imports to fix lint for test_index_tricks.py * STYLE: Remove ruff formatting and lint fixes * REF: ndindex test cases — added functional tests and applied Ruff fixes * Add benchmark comparing ndindex vs itertools.product * MAINT: remove unused in ndindex * DOC: clarify release note for ndindex performance improvement * trigger CI rebuild --- benchmarks/benchmarks/bench_ndindex.py | 54 ++++++++ .../upcoming_changes/29165.performance.rst | 7 + numpy/lib/_index_tricks_impl.py | 12 +- numpy/lib/tests/test_index_tricks.py | 120 ++++++++++++++++++ 4 files changed, 186 insertions(+), 7 deletions(-) create mode 100644 benchmarks/benchmarks/bench_ndindex.py create mode 100644 doc/release/upcoming_changes/29165.performance.rst diff --git a/benchmarks/benchmarks/bench_ndindex.py b/benchmarks/benchmarks/bench_ndindex.py new file mode 100644 index 000000000000..132d4eeed472 --- /dev/null +++ b/benchmarks/benchmarks/bench_ndindex.py @@ -0,0 +1,54 @@ +from itertools import product + +import numpy as np + +from .common import Benchmark + + +class NdindexBenchmark(Benchmark): + """ + Benchmark comparing numpy.ndindex() and itertools.product() + for different multi-dimensional shapes. + """ + + # Fix: Define each dimension separately, not as tuples + # ASV will pass each parameter list element to setup() + params = [ + [(10, 10), (20, 20), (50, 50), (10, 10, 10), (20, 30, 40), (50, 60, 90)] + ] + param_names = ["shape"] + + def setup(self, shape): + """Setup method called before each benchmark run.""" + # Access ndindex through NumPy's main namespace + self.ndindex = np.ndindex + + def time_ndindex(self, shape): + """ + Measure time taken by np.ndindex. + It creates an iterator that goes over each index. + """ + for _ in self.ndindex(*shape): + pass # Just loop through, no work inside + + def time_itertools_product(self, shape): + """ + Measure time taken by itertools.product. + Same goal: iterate over all index positions. + """ + for _ in product(*(range(s) for s in shape)): + pass + + def peakmem_ndindex(self, shape): + """ + Measure peak memory used when fully consuming + np.ndindex iterator by converting it to a list. + """ + return list(self.ndindex(*shape)) + + def peakmem_itertools_product(self, shape): + """ + Measure peak memory used when fully consuming + itertools.product iterator by converting it to a list. + """ + return list(product(*(range(s) for s in shape))) diff --git a/doc/release/upcoming_changes/29165.performance.rst b/doc/release/upcoming_changes/29165.performance.rst new file mode 100644 index 000000000000..4e1a9a4ecdbc --- /dev/null +++ b/doc/release/upcoming_changes/29165.performance.rst @@ -0,0 +1,7 @@ +Rewrite of `np.ndindex` using `itertools.product` +-------------------------------------------------- +The `numpy.ndindex` function now uses `itertools.product` internally, +providing significant improvements in performance for large iteration spaces, +while maintaining the original behavior and interface. +For example, for an array of shape (50, 60, 90) the NumPy `ndindex` +benchmark improves performance by a factor 5.2. \ No newline at end of file diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 131bbae5d098..a0d04ad3285f 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -2,6 +2,7 @@ import math import sys import warnings +from itertools import product import numpy as np import numpy._core.numeric as _nx @@ -12,7 +13,6 @@ from numpy._core.numerictypes import issubdtype from numpy._utils import set_module from numpy.lib._function_base_impl import diff -from numpy.lib.stride_tricks import as_strided array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -688,10 +688,9 @@ class ndindex: def __init__(self, *shape): if len(shape) == 1 and isinstance(shape[0], tuple): shape = shape[0] - x = as_strided(_nx.zeros(1), shape=shape, - strides=_nx.zeros_like(shape)) - self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], - order='C') + if min(shape, default=0) < 0: + raise ValueError("negative dimensions are not allowed") + self._iter = product(*map(range, shape)) def __iter__(self): return self @@ -724,8 +723,7 @@ def __next__(self): iteration. """ - next(self._it) - return self._it.multi_index + return next(self._iter) # You can do all this with slice() plus a few special objects, diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index ed8709db5238..387fdfec28f1 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -566,3 +566,123 @@ def test_ndindex(): # Make sure 0-sized ndindex works correctly x = list(ndindex(*[0])) assert_equal(x, []) + + +def test_ndindex_zero_dimensions_explicit(): + """Test ndindex produces empty iterators for explicit + zero-length dimensions.""" + assert list(np.ndindex(0, 3)) == [] + assert list(np.ndindex(3, 0, 2)) == [] + assert list(np.ndindex(0)) == [] + + +@pytest.mark.parametrize("bad_shape", [2.5, "2", [2, 3], (2.0, 3)]) +def test_ndindex_non_integer_dimensions(bad_shape): + """Test that non-integer dimensions raise TypeError.""" + with pytest.raises(TypeError): + # Passing invalid_shape_arg directly to ndindex. It will try to use it + # as a dimension and should trigger a TypeError. + list(np.ndindex(bad_shape)) + + +def test_ndindex_stop_iteration_behavior(): + """Test that StopIteration is raised properly after exhaustion.""" + it = np.ndindex(2, 2) + # Exhaust the iterator + list(it) + # Should raise StopIteration on subsequent calls + with pytest.raises(StopIteration): + next(it) + + +def test_ndindex_iterator_independence(): + """Test that each ndindex instance creates independent iterators.""" + shape = (2, 3) + iter1 = np.ndindex(*shape) + iter2 = np.ndindex(*shape) + + next(iter1) + next(iter1) + + assert_equal(next(iter2), (0, 0)) + assert_equal(next(iter1), (0, 2)) + + +def test_ndindex_tuple_vs_args_consistency(): + """Test that ndindex(shape) and ndindex(*shape) produce same results.""" + # Single dimension + assert_equal(list(np.ndindex(5)), list(np.ndindex((5,)))) + + # Multiple dimensions + assert_equal(list(np.ndindex(2, 3)), list(np.ndindex((2, 3)))) + + # Complex shape + shape = (2, 1, 4) + assert_equal(list(np.ndindex(*shape)), list(np.ndindex(shape))) + + +def test_ndindex_against_ndenumerate_compatibility(): + """Test ndindex produces same indices as ndenumerate.""" + for shape in [(1, 2, 3), (3,), (2, 2), ()]: + ndindex_result = list(np.ndindex(shape)) + ndenumerate_indices = [ix for ix, _ in np.ndenumerate(np.zeros(shape))] + assert_array_equal(ndindex_result, ndenumerate_indices) + + +def test_ndindex_multidimensional_correctness(): + """Test ndindex produces correct indices for multidimensional arrays.""" + shape = (2, 1, 3) + result = list(np.ndindex(*shape)) + expected = [ + (0, 0, 0), + (0, 0, 1), + (0, 0, 2), + (1, 0, 0), + (1, 0, 1), + (1, 0, 2), + ] + assert_equal(result, expected) + + +def test_ndindex_large_dimensions_behavior(): + """Test ndindex behaves correctly when initialized with large dimensions.""" + large_shape = (1000, 1000) + iter_obj = np.ndindex(*large_shape) + first_element = next(iter_obj) + assert_equal(first_element, (0, 0)) + + +def test_ndindex_empty_iterator_behavior(): + """Test detailed behavior of empty iterators.""" + empty_iter = np.ndindex(0, 5) + assert_equal(list(empty_iter), []) + + empty_iter2 = np.ndindex(3, 0, 2) + with pytest.raises(StopIteration): + next(empty_iter2) + + +@pytest.mark.parametrize( + "negative_shape_arg", + [ + (-1,), # Single negative dimension + (2, -3, 4), # Negative dimension in the middle + (5, 0, -2), # Mix of valid (0) and invalid (negative) dimensions + ], +) +def test_ndindex_negative_dimensions(negative_shape_arg): + """Test that negative dimensions raise ValueError.""" + with pytest.raises(ValueError): + ndindex(negative_shape_arg) + + +def test_ndindex_empty_shape(): + import numpy as np + # ndindex() and ndindex(()) should return a single empty tuple + assert list(np.ndindex()) == [()] + assert list(np.ndindex(())) == [()] + +def test_ndindex_negative_dim_raises(): + # ndindex(-1) should raise a ValueError + with pytest.raises(ValueError): + list(np.ndindex(-1)) From facd503f42b96e40f6d97989174db5ceec56f6d7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 5 Sep 2025 23:18:57 +0200 Subject: [PATCH 0399/1018] TYP: fix ``np.bool`` method declarations --- numpy/__init__.pyi | 216 ++++++++++++++++--- numpy/_typing/_callable.pyi | 77 +------ numpy/typing/tests/data/fail/arithmetic.pyi | 2 +- numpy/typing/tests/data/fail/bitwise_ops.pyi | 2 +- 4 files changed, 192 insertions(+), 105 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8dc1daf7f797..65a60f2b2b9c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -134,12 +134,6 @@ from numpy._typing import ( ) from numpy._typing._callable import ( - _BoolOp, - _BoolBitOp, - _BoolSub, - _BoolTrueDiv, - _BoolMod, - _BoolDivMod, _IntTrueDiv, _UnsignedIntOp, _UnsignedIntBitOp, @@ -803,6 +797,7 @@ _1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) +_InexactT = TypeVar("_InexactT", bound=inexact) _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) _FloatingT_co = TypeVar("_FloatingT_co", bound=floating, default=floating, covariant=True) _IntegerT = TypeVar("_IntegerT", bound=integer) @@ -3898,12 +3893,14 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... def __bool__(self, /) -> _BoolItemT_co: ... + @overload def __int__(self: np.bool[L[False]], /) -> L[0]: ... @overload def __int__(self: np.bool[L[True]], /) -> L[1]: ... @overload def __int__(self, /) -> L[0, 1]: ... + def __abs__(self) -> Self: ... @overload @@ -3913,23 +3910,191 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __invert__(self, /) -> np.bool: ... - __add__: _BoolOp[np.bool] - __radd__: _BoolOp[np.bool] - __sub__: _BoolSub - __rsub__: _BoolSub - __mul__: _BoolOp[np.bool] - __rmul__: _BoolOp[np.bool] - __truediv__: _BoolTrueDiv - __rtruediv__: _BoolTrueDiv - __floordiv__: _BoolOp[int8] - __rfloordiv__: _BoolOp[int8] - __pow__: _BoolOp[int8] - __rpow__: _BoolOp[int8] - - __lshift__: _BoolBitOp[int8] - __rlshift__: _BoolBitOp[int8] - __rshift__: _BoolBitOp[int8] - __rrshift__: _BoolBitOp[int8] + @overload + def __add__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __add__(self, other: builtins.bool | bool_, /) -> bool_: ... + @overload + def __add__(self, other: int, /) -> int_: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + + @overload + def __radd__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __radd__(self, other: builtins.bool, /) -> bool_: ... + @overload + def __radd__(self, other: int, /) -> int_: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + + @overload + def __sub__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __sub__(self, other: int, /) -> int_: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + + @overload + def __rsub__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __rsub__(self, other: int, /) -> int_: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + + @overload + def __mul__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __mul__(self, other: builtins.bool | bool_, /) -> bool_: ... + @overload + def __mul__(self, other: int, /) -> int_: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + + @overload + def __rmul__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __rmul__(self, other: builtins.bool, /) -> bool_: ... + @overload + def __rmul__(self, other: int, /) -> int_: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + + @overload + def __pow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + @overload + def __pow__(self, other: builtins.bool | bool_, mod: None = None, /) -> int8: ... + @overload + def __pow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + + @overload + def __rpow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + @overload + def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... + @overload + def __rpow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + + @overload + def __truediv__(self, other: _InexactT, /) -> _InexactT: ... + @overload + def __truediv__(self, other: float | integer | bool_, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @overload + def __rtruediv__(self, other: _InexactT, /) -> _InexactT: ... + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + @overload + def __floordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __floordiv__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __floordiv__(self, other: int, /) -> int_: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + + @overload + def __rfloordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rfloordiv__(self, other: int, /) -> int_: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + + # keep in sync with __floordiv__ + @overload + def __mod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __mod__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __mod__(self, other: int, /) -> int_: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + + # keep in sync with __rfloordiv__ + @overload + def __rmod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __rmod__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rmod__(self, other: int, /) -> int_: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + + # keep in sync with __mod__ + @overload + def __divmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + @overload + def __divmod__(self, other: builtins.bool | bool_, /) -> _2Tuple[int8]: ... + @overload + def __divmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + + # keep in sync with __rmod__ + @overload + def __rdivmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + @overload + def __rdivmod__(self, other: builtins.bool, /) -> _2Tuple[int8]: ... + @overload + def __rdivmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + + @overload + def __lshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __lshift__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __lshift__(self, other: int, /) -> int_: ... + + @overload + def __rlshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rlshift__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rlshift__(self, other: int, /) -> int_: ... + + # keep in sync with __lshift__ + @overload + def __rshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rshift__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __rshift__(self, other: int, /) -> int_: ... + + # keep in sync with __rlshift__ + @overload + def __rrshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rrshift__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rrshift__(self, other: int, /) -> int_: ... @overload def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ... @@ -3973,11 +4138,6 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __or__(self, other: int, /) -> np.bool | intp: ... __ror__ = __or__ - __mod__: _BoolMod - __rmod__: _BoolMod - __divmod__: _BoolDivMod - __rdivmod__: _BoolDivMod - __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 1ce7de5bb423..5bb8beb81871 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -40,12 +40,11 @@ from ._nbit import _NBitInt from ._nested_sequence import _NestedSequence from ._scalars import _BoolLike_co, _IntLike_co, _NumberLike_co -_T1 = TypeVar("_T1") -_T2 = TypeVar("_T2") +_T = TypeVar("_T") _T1_contra = TypeVar("_T1_contra", contravariant=True) _T2_contra = TypeVar("_T2_contra", contravariant=True) -_2Tuple: TypeAlias = tuple[_T1, _T1] +_2Tuple: TypeAlias = tuple[_T, _T] _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) @@ -53,80 +52,8 @@ _NBit2 = TypeVar("_NBit2", bound=NBitBase) _IntType = TypeVar("_IntType", bound=integer) _FloatType = TypeVar("_FloatType", bound=floating) _NumberType = TypeVar("_NumberType", bound=number) -_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number) _GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic) -@type_check_only -class _BoolOp(Protocol[_GenericType_co]): - @overload - def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -@type_check_only -class _BoolBitOp(Protocol[_GenericType_co]): - @overload - def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: _IntType, /) -> _IntType: ... - -@type_check_only -class _BoolSub(Protocol): - # Note that `other: bool` is absent here - @overload - def __call__(self, other: bool, /) -> NoReturn: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -@type_check_only -class _BoolTrueDiv(Protocol): - @overload - def __call__(self, other: float | _IntLike_co, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -@type_check_only -class _BoolMod(Protocol): - @overload - def __call__(self, other: _BoolLike_co, /) -> int8: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: _IntType, /) -> _IntType: ... - @overload - def __call__(self, other: _FloatType, /) -> _FloatType: ... - -@type_check_only -class _BoolDivMod(Protocol): - @overload - def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ... - @overload # platform dependent - def __call__(self, other: int, /) -> _2Tuple[int_]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[np.float64]: ... - @overload - def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ... - @overload - def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ... - @type_check_only class _IntTrueDiv(Protocol[_NBit1]): @overload diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index d62906ae87d9..a68df2ea53c3 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -114,7 +114,7 @@ AR_f **= AR_LIKE_c # type: ignore[arg-type] # Scalars -b_ - b_ # type: ignore[call-overload] +b_ - b_ # type: ignore[operator] dt + dt # type: ignore[operator] td - dt # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/bitwise_ops.pyi b/numpy/typing/tests/data/fail/bitwise_ops.pyi index 29dfe79287ad..fe51f15adc52 100644 --- a/numpy/typing/tests/data/fail/bitwise_ops.pyi +++ b/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -8,7 +8,7 @@ i = 0 f8 = np.float64() -b_ >> f8 # type: ignore[call-overload] +b_ >> f8 # type: ignore[operator] i8 << f8 # type: ignore[call-overload] i | f8 # type: ignore[operator] i8 ^ f8 # type: ignore[call-overload] From 79080f1419f3dc4ce154ff5cc4acaee5d6b39dc0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 6 Sep 2025 06:18:46 +0200 Subject: [PATCH 0400/1018] TYP: appease ruff --- numpy/__init__.pyi | 14 +++++++------- numpy/_typing/_callable.pyi | 14 ++------------ 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 65a60f2b2b9c..1cb5790f3dfb 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3893,7 +3893,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... def __bool__(self, /) -> _BoolItemT_co: ... - + @overload def __int__(self: np.bool[L[False]], /) -> L[0]: ... @overload @@ -3924,7 +3924,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __radd__(self, other: _NumberT, /) -> _NumberT: ... @overload - def __radd__(self, other: builtins.bool, /) -> bool_: ... + def __radd__(self, other: builtins.bool, /) -> bool_: ... @overload def __radd__(self, other: int, /) -> int_: ... @overload @@ -3934,7 +3934,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __sub__(self, other: _NumberT, /) -> _NumberT: ... - @overload + @overload def __sub__(self, other: int, /) -> int_: ... @overload def __sub__(self, other: float, /) -> float64: ... @@ -3943,7 +3943,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rsub__(self, other: _NumberT, /) -> _NumberT: ... - @overload + @overload def __rsub__(self, other: int, /) -> int_: ... @overload def __rsub__(self, other: float, /) -> float64: ... @@ -3964,7 +3964,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rmul__(self, other: _NumberT, /) -> _NumberT: ... @overload - def __rmul__(self, other: builtins.bool, /) -> bool_: ... + def __rmul__(self, other: builtins.bool, /) -> bool_: ... @overload def __rmul__(self, other: int, /) -> int_: ... @overload @@ -3986,7 +3986,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rpow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... @overload - def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... + def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... @overload def __rpow__(self, other: int, mod: None = None, /) -> int_: ... @overload @@ -4020,7 +4020,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rfloordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... @overload - def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... + def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... @overload def __rfloordiv__(self, other: int, /) -> int_: ... @overload diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 5bb8beb81871..647d1a5afb56 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -8,16 +8,7 @@ See the `Mypy documentation`_ on protocols for more details. """ -from typing import ( - Any, - NoReturn, - Protocol, - TypeAlias, - TypeVar, - final, - overload, - type_check_only, -) +from typing import Any, Protocol, TypeAlias, TypeVar, final, overload, type_check_only import numpy as np from numpy import ( @@ -26,7 +17,6 @@ from numpy import ( float64, floating, generic, - int8, int_, integer, number, @@ -38,7 +28,7 @@ from . import NBitBase from ._array_like import NDArray from ._nbit import _NBitInt from ._nested_sequence import _NestedSequence -from ._scalars import _BoolLike_co, _IntLike_co, _NumberLike_co +from ._scalars import _NumberLike_co _T = TypeVar("_T") _T1_contra = TypeVar("_T1_contra", contravariant=True) From 1bcc509d5224cc729ea6c3229d1924b94cc0f2b0 Mon Sep 17 00:00:00 2001 From: Sanjay Kumar Sakamuri Kamalakar Date: Sat, 6 Sep 2025 17:16:34 +0530 Subject: [PATCH 0401/1018] Update VXE and VXE2 detection regex patterns --- meson_cpu/s390x/meson.build | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meson_cpu/s390x/meson.build b/meson_cpu/s390x/meson.build index a69252d1607c..b7a420c27f0d 100644 --- a/meson_cpu/s390x/meson.build +++ b/meson_cpu/s390x/meson.build @@ -7,12 +7,12 @@ VX = mod_features.new( ) VXE = mod_features.new( 'VXE', 2, implies: VX, args: {'val': '-march=arch12', 'match': '-march=.*'}, - detect: {'val': 'VXE', 'match': 'VX'}, + detect: {'val': 'VXE', 'match': '\\bvxe\\b'}, test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe.c')[0], ) VXE2 = mod_features.new( 'VXE2', 3, implies: VXE, args: {'val': '-march=arch13', 'match': '-march=.*'}, - detect: {'val': 'VXE2', 'match': 'VX.*'}, + detect: {'val': 'VXE2', 'match': '\\bvxe2\\b'}, test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe2.c')[0], ) S390X_FEATURES = {'VX': VX, 'VXE': VXE, 'VXE2': VXE2} From 3e396702d1718b374ee8cddd39d8a56fcf04a3b3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 7 Sep 2025 01:42:36 +0200 Subject: [PATCH 0402/1018] TYP: fix ``np.number`` method declarations --- numpy/__init__.pyi | 61 +++++++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 19 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 1cb5790f3dfb..9ae8664da0f1 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -134,7 +134,6 @@ from numpy._typing import ( ) from numpy._typing._callable import ( - _IntTrueDiv, _UnsignedIntOp, _UnsignedIntBitOp, _UnsignedIntMod, @@ -146,11 +145,14 @@ from numpy._typing._callable import ( _FloatOp, _FloatMod, _FloatDivMod, - _NumberOp, _ComparisonOpLT, _ComparisonOpLE, _ComparisonOpGT, _ComparisonOpGE, + _SupportsLT, + _SupportsLE, + _SupportsGT, + _SupportsGE, ) # NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform @@ -211,7 +213,7 @@ from typing import ( # library include `typing_extensions` stubs: # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite -from typing_extensions import CapsuleType, TypeVar, deprecated +from typing_extensions import CapsuleType, TypeVar, deprecated, override from numpy import ( char, @@ -3855,23 +3857,44 @@ class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): def __pos__(self) -> Self: ... def __abs__(self) -> Self: ... - __add__: _NumberOp - __radd__: _NumberOp - __sub__: _NumberOp - __rsub__: _NumberOp - __mul__: _NumberOp - __rmul__: _NumberOp - __floordiv__: _NumberOp - __rfloordiv__: _NumberOp - __pow__: _NumberOp - __rpow__: _NumberOp - __truediv__: _NumberOp - __rtruediv__: _NumberOp + def __add__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __radd__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __sub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rsub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __mul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rmul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __pow__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rpow__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __truediv__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rtruediv__(self, other: _NumberLike_co, /) -> Incomplete: ... - __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property From 5d7e9cc522e2379d7b66f8b9593adb0abc2c16a6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 7 Sep 2025 01:43:11 +0200 Subject: [PATCH 0403/1018] TYP: fix ``np.integer`` method declarations --- numpy/__init__.pyi | 16 ++++++++++++++-- numpy/_typing/_callable.pyi | 29 ----------------------------- 2 files changed, 14 insertions(+), 31 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 9ae8664da0f1..f0470e94e7e8 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4204,8 +4204,20 @@ class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): def __index__(self, /) -> int: ... def __invert__(self, /) -> Self: ... - __truediv__: _IntTrueDiv[_NBit] - __rtruediv__: _IntTrueDiv[_NBit] + @override # type: ignore[override] + @overload + def __truediv__(self, other: float | integer, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + def __floordiv__(self, value: _IntLike_co, /) -> integer: ... + def __rfloordiv__(self, value: _IntLike_co, /) -> integer: ... def __mod__(self, value: _IntLike_co, /) -> integer: ... def __rmod__(self, value: _IntLike_co, /) -> integer: ... # Ensure that objects annotated as `integer` support bit-wise operations diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 647d1a5afb56..b67ee18ba919 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -16,10 +16,8 @@ from numpy import ( complexfloating, float64, floating, - generic, int_, integer, - number, signedinteger, unsignedinteger, ) @@ -28,7 +26,6 @@ from . import NBitBase from ._array_like import NDArray from ._nbit import _NBitInt from ._nested_sequence import _NestedSequence -from ._scalars import _NumberLike_co _T = TypeVar("_T") _T1_contra = TypeVar("_T1_contra", contravariant=True) @@ -39,28 +36,6 @@ _2Tuple: TypeAlias = tuple[_T, _T] _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) -_IntType = TypeVar("_IntType", bound=integer) -_FloatType = TypeVar("_FloatType", bound=floating) -_NumberType = TypeVar("_NumberType", bound=number) -_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic) - -@type_check_only -class _IntTrueDiv(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: complex, / - ) -> complexfloating[_NBit1, _NBit1] | complex128: ... - @overload - def __call__( - self, other: integer[_NBit2], / - ) -> floating[_NBit1] | floating[_NBit2]: ... - @type_check_only class _UnsignedIntOp(Protocol[_NBit1]): # NOTE: `uint64 + signedinteger -> float64` @@ -206,10 +181,6 @@ class _FloatDivMod(Protocol[_NBit1]): self, other: integer[_NBit2] | floating[_NBit2], / ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBit2]]: ... -@type_check_only -class _NumberOp(Protocol): - def __call__(self, other: _NumberLike_co, /) -> Any: ... - @final @type_check_only class _SupportsLT(Protocol): From cad7c071f2072ec34cc77e008f1defc7a65ce735 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 7 Sep 2025 01:43:41 +0200 Subject: [PATCH 0404/1018] TYP: fix ``np.bool`` comparison method declarations --- numpy/__init__.pyi | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index f0470e94e7e8..e539a16827c2 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4161,10 +4161,33 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __or__(self, other: int, /) -> np.bool | intp: ... __ror__ = __or__ - __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... # NOTE: This should _not_ be `Final` or a `TypeAlias` bool_ = bool From eee1d753849aee953bd782a3aeddba365f9c4375 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 7 Sep 2025 03:42:33 +0200 Subject: [PATCH 0405/1018] TYP: fix ``np.signedinteger`` method declarations --- numpy/__init__.pyi | 224 +++++++++++++++--- numpy/_typing/_callable.pyi | 49 ---- numpy/typing/tests/data/fail/bitwise_ops.pyi | 4 +- numpy/typing/tests/data/reveal/arithmetic.pyi | 18 +- .../typing/tests/data/reveal/bitwise_ops.pyi | 11 +- numpy/typing/tests/data/reveal/mod.pyi | 22 +- 6 files changed, 223 insertions(+), 105 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e539a16827c2..7ccb52751da5 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -138,10 +138,6 @@ from numpy._typing._callable import ( _UnsignedIntBitOp, _UnsignedIntMod, _UnsignedIntDivMod, - _SignedIntOp, - _SignedIntBitOp, - _SignedIntMod, - _SignedIntDivMod, _FloatOp, _FloatMod, _FloatDivMod, @@ -4243,6 +4239,9 @@ class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): def __rfloordiv__(self, value: _IntLike_co, /) -> integer: ... def __mod__(self, value: _IntLike_co, /) -> integer: ... def __rmod__(self, value: _IntLike_co, /) -> integer: ... + def __divmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + def __rdivmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + # Ensure that objects annotated as `integer` support bit-wise operations def __lshift__(self, other: _IntLike_co, /) -> integer: ... def __rlshift__(self, other: _IntLike_co, /) -> integer: ... @@ -4258,30 +4257,199 @@ class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): class signedinteger(integer[_NBit1]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... - __add__: _SignedIntOp[_NBit1] - __radd__: _SignedIntOp[_NBit1] - __sub__: _SignedIntOp[_NBit1] - __rsub__: _SignedIntOp[_NBit1] - __mul__: _SignedIntOp[_NBit1] - __rmul__: _SignedIntOp[_NBit1] - __floordiv__: _SignedIntOp[_NBit1] - __rfloordiv__: _SignedIntOp[_NBit1] - __pow__: _SignedIntOp[_NBit1] - __rpow__: _SignedIntOp[_NBit1] - __lshift__: _SignedIntBitOp[_NBit1] - __rlshift__: _SignedIntBitOp[_NBit1] - __rshift__: _SignedIntBitOp[_NBit1] - __rrshift__: _SignedIntBitOp[_NBit1] - __and__: _SignedIntBitOp[_NBit1] - __rand__: _SignedIntBitOp[_NBit1] - __xor__: _SignedIntBitOp[_NBit1] - __rxor__: _SignedIntBitOp[_NBit1] - __or__: _SignedIntBitOp[_NBit1] - __ror__: _SignedIntBitOp[_NBit1] - __mod__: _SignedIntMod[_NBit1] - __rmod__: _SignedIntMod[_NBit1] - __divmod__: _SignedIntDivMod[_NBit1] - __rdivmod__: _SignedIntDivMod[_NBit1] + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> signedinteger: ... + + # integral ops (should be kept in sync) + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[signedinteger]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[signedinteger]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: integer, /) -> signedinteger: ... int8 = signedinteger[_8Bit] int16 = signedinteger[_16Bit] diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index b67ee18ba919..d1fd8243b427 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -16,7 +16,6 @@ from numpy import ( complexfloating, float64, floating, - int_, integer, signedinteger, unsignedinteger, @@ -89,54 +88,6 @@ class _UnsignedIntDivMod(Protocol[_NBit1]): self, other: unsignedinteger[_NBit2], / ) -> _2Tuple[unsignedinteger[_NBit1]] | _2Tuple[unsignedinteger[_NBit2]]: ... -@type_check_only -class _SignedIntOp(Protocol[_NBit1]): - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: signedinteger[_NBit2], /) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... - -@type_check_only -class _SignedIntBitOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... - -@type_check_only -class _SignedIntMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... - -@type_check_only -class _SignedIntDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... - @overload - def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[int_]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[signedinteger[_NBit2]]: ... - @type_check_only class _FloatOp(Protocol[_NBit1]): @overload diff --git a/numpy/typing/tests/data/fail/bitwise_ops.pyi b/numpy/typing/tests/data/fail/bitwise_ops.pyi index fe51f15adc52..6876393c9392 100644 --- a/numpy/typing/tests/data/fail/bitwise_ops.pyi +++ b/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -9,9 +9,9 @@ i = 0 f8 = np.float64() b_ >> f8 # type: ignore[operator] -i8 << f8 # type: ignore[call-overload] +i8 << f8 # type: ignore[operator] i | f8 # type: ignore[operator] -i8 ^ f8 # type: ignore[call-overload] +i8 ^ f8 # type: ignore[operator] u8 & f8 # type: ignore[call-overload] ~f8 # type: ignore[operator] # TODO: Certain mixes like i4 << u8 go to float and thus should fail diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 81a98a9d96d2..4b17ca2366b2 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -587,9 +587,9 @@ assert_type(AR_f + f4, npt.NDArray[np.float64]) # Int assert_type(i8 + i8, np.int64) -assert_type(i8 + u8, Any) -assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 + u4, Any) +assert_type(i8 + u8, np.signedinteger) +assert_type(i8 + i4, np.signedinteger) +assert_type(i8 + u4, np.signedinteger) assert_type(i8 + b_, np.int64) assert_type(i8 + b, np.int64) assert_type(i8 + c, np.complex128) @@ -607,7 +607,7 @@ assert_type(u8 + AR_f, npt.NDArray[np.float64]) assert_type(i8 + i8, np.int64) assert_type(u8 + i8, Any) -assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i4 + i8, np.signedinteger) assert_type(u4 + i8, Any) assert_type(b_ + i8, np.int64) assert_type(b + i8, np.int64) @@ -616,7 +616,7 @@ assert_type(f + i8, np.float64) assert_type(AR_f + i8, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) -assert_type(i4 + u8, Any) +assert_type(i4 + u8, np.signedinteger) assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(b_ + u8, np.uint64) assert_type(b + u8, np.uint64) @@ -624,7 +624,7 @@ assert_type(c + u8, np.complex128) assert_type(f + u8, np.float64) assert_type(AR_f + u8, npt.NDArray[np.float64]) -assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i4 + i8, np.signedinteger) assert_type(i4 + i4, np.int32) assert_type(i4 + b_, np.int32) assert_type(i4 + b, np.int32) @@ -638,14 +638,14 @@ assert_type(u4 + b_, np.uint32) assert_type(u4 + b, np.uint32) assert_type(u4 + AR_f, npt.NDArray[np.float64]) -assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 + i4, np.signedinteger) assert_type(i4 + i4, np.int32) assert_type(b_ + i4, np.int32) assert_type(b + i4, np.int32) assert_type(AR_f + i4, npt.NDArray[np.float64]) -assert_type(i8 + u4, Any) -assert_type(i4 + u4, Any) +assert_type(i8 + u4, np.signedinteger) +assert_type(i4 + u4, np.signedinteger) assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(u4 + u4, np.uint32) assert_type(b_ + u4, np.uint32) diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index bef7c6739605..66adcf3dfb30 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -2,7 +2,6 @@ from typing import Literal as L, TypeAlias, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _32Bit, _64Bit FalseType: TypeAlias = L[False] TrueType: TypeAlias = L[True] @@ -43,11 +42,11 @@ assert_type(i4 | i4, np.int32) assert_type(i4 ^ i4, np.int32) assert_type(i4 & i4, np.int32) -assert_type(i8 << i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 >> i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 | i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 ^ i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 & i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 << i4, np.signedinteger) +assert_type(i8 >> i4, np.signedinteger) +assert_type(i8 | i4, np.signedinteger) +assert_type(i8 ^ i4, np.signedinteger) +assert_type(i8 & i4, np.signedinteger) assert_type(i8 << b_, np.int64) assert_type(i8 >> b_, np.int64) diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index 9bbbd5c52d7f..338a9a1ff729 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -108,40 +108,40 @@ assert_type(divmod(AR_b, b_), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) assert_type(i8 % b, np.int64) assert_type(i8 % i8, np.int64) -assert_type(i8 % f, np.float64 | np.floating[_64Bit]) -assert_type(i8 % f8, np.float64 | np.floating[_64Bit]) -assert_type(i4 % i8, np.int64 | np.int32) -assert_type(i4 % f8, np.float64 | np.float32) +assert_type(i8 % f, np.float64) +assert_type(i8 % f8, np.float64) +assert_type(i4 % i8, np.signedinteger) +assert_type(i4 % f8, np.float64) assert_type(i4 % i4, np.int32) assert_type(i4 % f4, np.float32) assert_type(i8 % AR_b, npt.NDArray[np.int64]) assert_type(divmod(i8, b), tuple[np.int64, np.int64]) -assert_type(divmod(i8, i4), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i8, i4), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) # workarounds for https://github.com/microsoft/pyright/issues/9663 -assert_type(i8.__divmod__(f), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) -assert_type(i8.__divmod__(f8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(i8.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(i8.__divmod__(f8), tuple[np.float64, np.float64]) assert_type(divmod(i8, f4), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) assert_type(divmod(i4, f4), tuple[np.float32, np.float32]) assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) assert_type(b % i8, np.int64) -assert_type(f % i8, np.float64 | np.floating[_64Bit]) +assert_type(f % i8, np.float64) assert_type(i8 % i8, np.int64) assert_type(f8 % i8, np.float64) -assert_type(i8 % i4, np.int64 | np.int32) +assert_type(i8 % i4, np.signedinteger) assert_type(f8 % i4, np.float64) assert_type(i4 % i4, np.int32) assert_type(f4 % i4, np.float32) assert_type(AR_b % i8, npt.NDArray[np.int64]) assert_type(divmod(b, i8), tuple[np.int64, np.int64]) -assert_type(divmod(f, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(divmod(f, i8), tuple[np.float64, np.float64]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) -assert_type(divmod(i4, i8), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i4, i8), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) # workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(f4.__divmod__(i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) From 20575c2cfdb1e22e9b4951ef0c34486c68e36e05 Mon Sep 17 00:00:00 2001 From: Shirong_Wang Date: Sat, 16 Aug 2025 20:35:43 +0800 Subject: [PATCH 0406/1018] ENH: MAINT: add object copy for f2py meson backend --- numpy/f2py/_backends/_meson.py | 13 +++++++++++++ numpy/f2py/_backends/meson.build.template | 3 +++ 2 files changed, 16 insertions(+) diff --git a/numpy/f2py/_backends/_meson.py b/numpy/f2py/_backends/_meson.py index cbd9b0e32729..4c498bab2f25 100644 --- a/numpy/f2py/_backends/_meson.py +++ b/numpy/f2py/_backends/_meson.py @@ -50,6 +50,7 @@ def __init__( self.pipeline = [ self.initialize_template, self.sources_substitution, + self.objects_substitution, self.deps_substitution, self.include_substitution, self.libraries_substitution, @@ -79,6 +80,11 @@ def sources_substitution(self) -> None: [f"{self.indent}'''{source}'''," for source in self.sources] ) + def objects_substitution(self) -> None: + self.substitutions["obj_list"] = ",\n".join( + [f"{self.indent}'''{obj}'''," for obj in self.objects] + ) + def deps_substitution(self) -> None: self.substitutions["dep_list"] = f",\n{self.indent}".join( [f"{self.indent}dependency('{dep}')," for dep in self.deps] @@ -186,6 +192,7 @@ def run_meson(self, build_dir: Path): def compile(self) -> None: self.sources = _prepare_sources(self.modulename, self.sources, self.build_dir) + _prepare_objects(self.modulename, self.extra_objects, self.build_dir) self.write_meson_build(self.build_dir) self.run_meson(self.build_dir) self._move_exec_to_root(self.build_dir) @@ -216,6 +223,12 @@ def _prepare_sources(mname, sources, bdir): ] return extended_sources +def _prepare_objects(mname, objects, bdir): + Path(bdir).mkdir(parents=True, exist_ok=True) + # Copy objects + for obj in objects: + if Path(obj).exists() and Path(obj).is_file(): + shutil.copy(obj, bdir) def _get_flags(fc_flags): flag_values = [] diff --git a/numpy/f2py/_backends/meson.build.template b/numpy/f2py/_backends/meson.build.template index fdcc1b17ce21..58c6758cc503 100644 --- a/numpy/f2py/_backends/meson.build.template +++ b/numpy/f2py/_backends/meson.build.template @@ -43,6 +43,9 @@ ${source_list}, include_directories: [ inc_np, ${inc_list} + ], + objects: [ +${obj_list} ], dependencies : [ py_dep, From b78c703b3a62ae0d10056803b07a922d33768b6d Mon Sep 17 00:00:00 2001 From: Shirong_Wang Date: Wed, 20 Aug 2025 19:02:02 +0800 Subject: [PATCH 0407/1018] TST: add test for f2py meson with object file --- numpy/f2py/tests/test_f2py2e.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 2f91eb77c4bd..959e1527c482 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -673,6 +673,25 @@ def test_inclheader(capfd, hello_world_f90, monkeypatch): assert "#include " in ocmr assert "#include " in ocmr +@pytest.mark.skipif((platform.system() != 'Linux'), reason='Compiler required') +def test_cli_obj(capfd, hello_world_f90, monkeypatch): + """Ensures that the extra object can be specified when using meson backend + """ + ipath = Path(hello_world_f90) + mname = "blah" + odir = "tttmp" + obj = "extra.o" + monkeypatch.setattr(sys, "argv", + f'f2py --backend meson --build-dir {odir} -m {mname} -c {obj} {ipath}'.split()) + + with util.switchdir(ipath.parent): + Path(obj).touch() + compiler_check_f2pycli() + with Path(f"{odir}/meson.build").open() as mesonbuild: + mbld = mesonbuild.read() + assert "objects:" in mbld + assert f"'''{obj}'''" in mbld + def test_inclpath(): """Add to the include directories From 627fc3ae57713baf11e3a850c8021f99802ba3fb Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 7 Sep 2025 20:12:43 +0200 Subject: [PATCH 0408/1018] TYP: fix ``np.unsignedinteger`` method declarations --- numpy/__init__.pyi | 330 +++++++++++++++--- numpy/_typing/_callable.pyi | 63 +--- numpy/typing/tests/data/fail/bitwise_ops.pyi | 2 +- numpy/typing/tests/data/reveal/arithmetic.pyi | 18 +- .../typing/tests/data/reveal/bitwise_ops.pyi | 10 +- 5 files changed, 301 insertions(+), 122 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 7ccb52751da5..ed8fa9a93eda 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -134,10 +134,6 @@ from numpy._typing import ( ) from numpy._typing._callable import ( - _UnsignedIntOp, - _UnsignedIntBitOp, - _UnsignedIntMod, - _UnsignedIntDivMod, _FloatOp, _FloatMod, _FloatDivMod, @@ -4254,7 +4250,7 @@ class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): def __xor__(self, other: _IntLike_co, /) -> integer: ... def __rxor__(self, other: _IntLike_co, /) -> integer: ... -class signedinteger(integer[_NBit1]): +class signedinteger(integer[_NBit]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... # arithmetic ops @@ -4267,7 +4263,9 @@ class signedinteger(integer[_NBit1]): @overload def __add__(self, other: complex, /) -> complex128: ... @overload - def __add__(self, other: integer, /) -> signedinteger: ... + def __add__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4277,7 +4275,9 @@ class signedinteger(integer[_NBit1]): @overload def __radd__(self, other: complex, /) -> complex128: ... @overload - def __radd__(self, other: integer, /) -> signedinteger: ... + def __radd__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4287,7 +4287,9 @@ class signedinteger(integer[_NBit1]): @overload def __sub__(self, other: complex, /) -> complex128: ... @overload - def __sub__(self, other: integer, /) -> signedinteger: ... + def __sub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4297,7 +4299,9 @@ class signedinteger(integer[_NBit1]): @overload def __rsub__(self, other: complex, /) -> complex128: ... @overload - def __rsub__(self, other: integer, /) -> signedinteger: ... + def __rsub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4307,7 +4311,9 @@ class signedinteger(integer[_NBit1]): @overload def __mul__(self, other: complex, /) -> complex128: ... @overload - def __mul__(self, other: integer, /) -> signedinteger: ... + def __mul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4317,7 +4323,9 @@ class signedinteger(integer[_NBit1]): @overload def __rmul__(self, other: complex, /) -> complex128: ... @overload - def __rmul__(self, other: integer, /) -> signedinteger: ... + def __rmul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4327,7 +4335,9 @@ class signedinteger(integer[_NBit1]): @overload def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... @overload - def __pow__(self, other: integer, mod: None = None, /) -> signedinteger: ... + def __pow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4337,9 +4347,11 @@ class signedinteger(integer[_NBit1]): @overload def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... @overload - def __rpow__(self, other: integer, mod: None = None, /) -> signedinteger: ... + def __rpow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... - # integral ops (should be kept in sync) + # modular division ops @override # type: ignore[override] @overload @@ -4347,7 +4359,9 @@ class signedinteger(integer[_NBit1]): @overload def __floordiv__(self, other: float, /) -> float64: ... @overload - def __floordiv__(self, other: integer, /) -> signedinteger: ... + def __floordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4355,7 +4369,9 @@ class signedinteger(integer[_NBit1]): @overload def __rfloordiv__(self, other: float, /) -> float64: ... @overload - def __rfloordiv__(self, other: integer, /) -> signedinteger: ... + def __rfloordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4363,7 +4379,9 @@ class signedinteger(integer[_NBit1]): @overload def __mod__(self, other: float, /) -> float64: ... @overload - def __mod__(self, other: integer, /) -> signedinteger: ... + def __mod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4371,7 +4389,9 @@ class signedinteger(integer[_NBit1]): @overload def __rmod__(self, other: float, /) -> float64: ... @overload - def __rmod__(self, other: integer, /) -> signedinteger: ... + def __rmod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4379,7 +4399,9 @@ class signedinteger(integer[_NBit1]): @overload def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... @overload - def __divmod__(self, other: integer, /) -> _2Tuple[signedinteger]: ... + def __divmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... @override # type: ignore[override] @overload @@ -4387,7 +4409,9 @@ class signedinteger(integer[_NBit1]): @overload def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... @overload - def __rdivmod__(self, other: integer, /) -> _2Tuple[signedinteger]: ... + def __rdivmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... # bitwise ops @@ -4467,31 +4491,247 @@ longlong = signedinteger[_NBitLongLong] class unsignedinteger(integer[_NBit1]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... - # NOTE: `uint64 + signedinteger -> float64` - __add__: _UnsignedIntOp[_NBit1] - __radd__: _UnsignedIntOp[_NBit1] - __sub__: _UnsignedIntOp[_NBit1] - __rsub__: _UnsignedIntOp[_NBit1] - __mul__: _UnsignedIntOp[_NBit1] - __rmul__: _UnsignedIntOp[_NBit1] - __floordiv__: _UnsignedIntOp[_NBit1] - __rfloordiv__: _UnsignedIntOp[_NBit1] - __pow__: _UnsignedIntOp[_NBit1] - __rpow__: _UnsignedIntOp[_NBit1] - __lshift__: _UnsignedIntBitOp[_NBit1] - __rlshift__: _UnsignedIntBitOp[_NBit1] - __rshift__: _UnsignedIntBitOp[_NBit1] - __rrshift__: _UnsignedIntBitOp[_NBit1] - __and__: _UnsignedIntBitOp[_NBit1] - __rand__: _UnsignedIntBitOp[_NBit1] - __xor__: _UnsignedIntBitOp[_NBit1] - __rxor__: _UnsignedIntBitOp[_NBit1] - __or__: _UnsignedIntBitOp[_NBit1] - __ror__: _UnsignedIntBitOp[_NBit1] - __mod__: _UnsignedIntMod[_NBit1] - __rmod__: _UnsignedIntMod[_NBit1] - __divmod__: _UnsignedIntDivMod[_NBit1] - __rdivmod__: _UnsignedIntDivMod[_NBit1] + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | uint8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | uint8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | uint8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | uint8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __lshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rlshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rrshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __and__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rand__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __xor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rxor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __or__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __ror__(self, other: signedinteger, /) -> signedinteger: ... uint8: TypeAlias = unsignedinteger[_8Bit] uint16: TypeAlias = unsignedinteger[_16Bit] diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index d1fd8243b427..96fa4000889f 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -11,15 +11,7 @@ See the `Mypy documentation`_ on protocols for more details. from typing import Any, Protocol, TypeAlias, TypeVar, final, overload, type_check_only import numpy as np -from numpy import ( - complex128, - complexfloating, - float64, - floating, - integer, - signedinteger, - unsignedinteger, -) +from numpy import complex128, complexfloating, float64, floating, integer from . import NBitBase from ._array_like import NDArray @@ -35,59 +27,6 @@ _2Tuple: TypeAlias = tuple[_T, _T] _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) -@type_check_only -class _UnsignedIntOp(Protocol[_NBit1]): - # NOTE: `uint64 + signedinteger -> float64` - @overload - def __call__(self, other: int, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: unsignedinteger[_NBit2], /) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... - @overload - def __call__(self, other: signedinteger, /) -> Any: ... - -@type_check_only -class _UnsignedIntBitOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger: ... - @overload - def __call__(self, other: signedinteger, /) -> signedinteger: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... - -@type_check_only -class _UnsignedIntMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__(self, other: int | signedinteger, /) -> Any: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... - -@type_check_only -class _UnsignedIntDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... - @overload - def __call__(self, other: int | signedinteger, /) -> _2Tuple[Any]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> _2Tuple[unsignedinteger[_NBit1]] | _2Tuple[unsignedinteger[_NBit2]]: ... - @type_check_only class _FloatOp(Protocol[_NBit1]): @overload diff --git a/numpy/typing/tests/data/fail/bitwise_ops.pyi b/numpy/typing/tests/data/fail/bitwise_ops.pyi index 6876393c9392..f4de2928ff54 100644 --- a/numpy/typing/tests/data/fail/bitwise_ops.pyi +++ b/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -12,6 +12,6 @@ b_ >> f8 # type: ignore[operator] i8 << f8 # type: ignore[operator] i | f8 # type: ignore[operator] i8 ^ f8 # type: ignore[operator] -u8 & f8 # type: ignore[call-overload] +u8 & f8 # type: ignore[operator] ~f8 # type: ignore[operator] # TODO: Certain mixes like i4 << u8 go to float and thus should fail diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 4b17ca2366b2..1c564df5de70 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -587,9 +587,9 @@ assert_type(AR_f + f4, npt.NDArray[np.float64]) # Int assert_type(i8 + i8, np.int64) -assert_type(i8 + u8, np.signedinteger) +assert_type(i8 + u8, Any) assert_type(i8 + i4, np.signedinteger) -assert_type(i8 + u4, np.signedinteger) +assert_type(i8 + u4, Any) assert_type(i8 + b_, np.int64) assert_type(i8 + b, np.int64) assert_type(i8 + c, np.complex128) @@ -598,7 +598,7 @@ assert_type(i8 + AR_f, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(u8 + i4, Any) -assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u8 + u4, np.unsignedinteger) assert_type(u8 + b_, np.uint64) assert_type(u8 + b, np.uint64) assert_type(u8 + c, np.complex128) @@ -616,8 +616,8 @@ assert_type(f + i8, np.float64) assert_type(AR_f + i8, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) -assert_type(i4 + u8, np.signedinteger) -assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(i4 + u8, Any) +assert_type(u4 + u8, np.unsignedinteger) assert_type(b_ + u8, np.uint64) assert_type(b + u8, np.uint64) assert_type(c + u8, np.complex128) @@ -632,7 +632,7 @@ assert_type(i4 + AR_f, npt.NDArray[np.float64]) assert_type(u4 + i8, Any) assert_type(u4 + i4, Any) -assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u4 + u8, np.unsignedinteger) assert_type(u4 + u4, np.uint32) assert_type(u4 + b_, np.uint32) assert_type(u4 + b, np.uint32) @@ -644,9 +644,9 @@ assert_type(b_ + i4, np.int32) assert_type(b + i4, np.int32) assert_type(AR_f + i4, npt.NDArray[np.float64]) -assert_type(i8 + u4, np.signedinteger) -assert_type(i4 + u4, np.signedinteger) -assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(i8 + u4, Any) +assert_type(i4 + u4, Any) +assert_type(u8 + u4, np.unsignedinteger) assert_type(u4 + u4, np.uint32) assert_type(b_ + u4, np.uint32) assert_type(b + u4, np.uint32) diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index 66adcf3dfb30..49986bd5d12c 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -84,11 +84,11 @@ assert_type(u4 | i4, np.signedinteger) assert_type(u4 ^ i4, np.signedinteger) assert_type(u4 & i4, np.signedinteger) -assert_type(u4 << i, np.signedinteger) -assert_type(u4 >> i, np.signedinteger) -assert_type(u4 | i, np.signedinteger) -assert_type(u4 ^ i, np.signedinteger) -assert_type(u4 & i, np.signedinteger) +assert_type(u4 << i, np.uint32) +assert_type(u4 >> i, np.uint32) +assert_type(u4 | i, np.uint32) +assert_type(u4 ^ i, np.uint32) +assert_type(u4 & i, np.uint32) assert_type(u8 << b_, np.uint64) assert_type(u8 >> b_, np.uint64) From e1a3ad7b0c42d4ba716f9c923ba57c3afbcc521f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20Gro=C3=9F?= Date: Mon, 8 Sep 2025 01:40:32 +0200 Subject: [PATCH 0409/1018] replace atry_collapse() with afound_new_run(). change atimsort_() --- numpy/_core/src/npysort/timsort.cpp | 68 +++++++---------------------- 1 file changed, 15 insertions(+), 53 deletions(-) diff --git a/numpy/_core/src/npysort/timsort.cpp b/numpy/_core/src/npysort/timsort.cpp index 9ecf88c0aeb9..0dfb4d32f64a 100644 --- a/numpy/_core/src/npysort/timsort.cpp +++ b/numpy/_core/src/npysort/timsort.cpp @@ -782,59 +782,24 @@ amerge_at_(type *arr, npy_intp *tosort, const run *stack, const npy_intp at, template static int -atry_collapse_(type *arr, npy_intp *tosort, run *stack, npy_intp *stack_ptr, - buffer_intp *buffer) +afound_new_run_(type *arr, npy_intp *tosort, run *stack, npy_intp *stack_ptr, npy_intp n2, + npy_intp num, buffer_intp *buffer) { int ret; - npy_intp A, B, C, top; - top = *stack_ptr; - - while (1 < top) { - B = stack[top - 2].l; - C = stack[top - 1].l; - - if ((2 < top && stack[top - 3].l <= B + C) || - (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) { - A = stack[top - 3].l; - - if (A <= C) { - ret = amerge_at_(arr, tosort, stack, top - 3, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 3].l += B; - stack[top - 2] = stack[top - 1]; - --top; - } - else { - ret = amerge_at_(arr, tosort, stack, top - 2, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 2].l += C; - --top; - } - } - else if (1 < top && B <= C) { - ret = amerge_at_(arr, tosort, stack, top - 2, buffer); - + if (*stack_ptr > 0) { + npy_intp s1 = stack[*stack_ptr - 1].s; + npy_intp n1 = stack[*stack_ptr - 1].l; + int power = powerloop(s1, n1, n2, num); + while (*stack_ptr > 1 && stack[*stack_ptr - 2].power > power) { + ret = amerge_at_(arr, tosort, stack, *stack_ptr - 2, buffer); if (NPY_UNLIKELY(ret < 0)) { return ret; } - - stack[top - 2].l += C; - --top; - } - else { - break; + stack[*stack_ptr - 2].l += stack[*stack_ptr - 1].l; + --(*stack_ptr); } + stack[*stack_ptr - 1].power = power; } - - *stack_ptr = top; return 0; } @@ -897,16 +862,13 @@ atimsort_(void *v, npy_intp *tosort, npy_intp num) for (l = 0; l < num;) { n = acount_run_((type *)v, tosort, l, num, minrun); - stack[stack_ptr].s = l; - stack[stack_ptr].l = n; - ++stack_ptr; - ret = atry_collapse_((type *)v, tosort, stack, &stack_ptr, - &buffer); - + ret = afound_new_run_((type*)v, tosort, stack, &stack_ptr, n, num, &buffer); if (NPY_UNLIKELY(ret < 0)) { goto cleanup; } - + stack[stack_ptr].s = l; + stack[stack_ptr].l = n; + ++stack_ptr; l += n; } From eeab643fbdd39f3eb2c1613ab91d4a91b91c0987 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 8 Sep 2025 09:54:37 -0600 Subject: [PATCH 0410/1018] MAINT: update spin to 0.14 in requirements files --- requirements/build_requirements.txt | 2 +- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index a51143a780e7..8bd2521933d7 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 Cython>=3.0.6 ninja -spin==0.13 +spin==0.14 build diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 87831586e01e..10ac5f7ecc9f 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ -spin==0.13 +spin==0.14 # Keep this in sync with ci_requirements.txt scipy-openblas32==0.3.30.0.1 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index dd16787923e3..bd2f75ab8c1d 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ -spin==0.13 +spin==0.14 # Keep this in sync with ci32_requirements.txt scipy-openblas32==0.3.30.0.1 scipy-openblas64==0.3.30.0.1 From ba128dd0f051be8355721096c3f62301daaf5c96 Mon Sep 17 00:00:00 2001 From: Kelvin Li Date: Mon, 8 Sep 2025 15:58:13 -0400 Subject: [PATCH 0411/1018] TST: update test_regression::test_gph25784 (#29714) - restrict Windows only - use options that are supported on all compilers --- numpy/f2py/tests/test_regression.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 93eb29e8e723..c4636a764914 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -158,7 +158,7 @@ def test_gh26623(): @pytest.mark.slow -@pytest.mark.skipif(platform.system() not in ['Linux', 'Darwin'], reason='Unsupported on this platform for now') +@pytest.mark.skipif(platform.system() == "Windows", reason='Unsupported on this platform for now') def test_gh25784(): # Compile dubious file using passed flags try: @@ -167,7 +167,7 @@ def test_gh25784(): options=[ # Meson will collect and dedup these to pass to fortran_args: "--f77flags='-ffixed-form -O2'", - "--f90flags=\"-ffixed-form -Og\"", + "--f90flags=\"-ffixed-form -g\"", ], module_name="Blah", ) From e10df5274b9209ff329ecf1403f4bfe491dfb6be Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Mon, 8 Sep 2025 16:00:47 -0400 Subject: [PATCH 0412/1018] TST: Replace test_deprecations setup/teardown with context manager (#29692) * WIP: Replace xunit setup with methods * WIP: Revert "WIP: Replace xunit setup with methods" This reverts commit 87c374fd36d668d0ff2e14c6cea9340667fcb4cd. * TST: Change setup to context manager * TST: Remove thread_unsafe marker --- numpy/_core/tests/test_deprecations.py | 56 +++++++++++--------------- 1 file changed, 23 insertions(+), 33 deletions(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 252189ef321a..3c6ed420dbfa 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -20,22 +20,20 @@ class _DeprecationTestCase: message = '' warning_cls = DeprecationWarning - def setup_method(self): - self.warn_ctx = warnings.catch_warnings(record=True) - self.log = self.warn_ctx.__enter__() - - # Do *not* ignore other DeprecationWarnings. Ignoring warnings - # can give very confusing results because of - # https://bugs.python.org/issue4180 and it is probably simplest to - # try to keep the tests cleanly giving only the right warning type. - # (While checking them set to "error" those are ignored anyway) - # We still have them show up, because otherwise they would be raised - warnings.filterwarnings("always", category=self.warning_cls) - warnings.filterwarnings("always", message=self.message, - category=self.warning_cls) - - def teardown_method(self): - self.warn_ctx.__exit__() + @contextlib.contextmanager + def filter_warnings(self): + with warnings.catch_warnings(record=True) as w: + # Do *not* ignore other DeprecationWarnings. Ignoring warnings + # can give very confusing results because of + # https://bugs.python.org/issue4180 and it is probably simplest to + # try to keep the tests cleanly giving only the right warning type. + # (While checking them set to "error" those are ignored anyway) + # We still have them show up, because otherwise they would be raised + warnings.filterwarnings("always", category=self.warning_cls) + warnings.filterwarnings("always", message=self.message, + category=self.warning_cls) + yield w + return def assert_deprecated(self, function, num=1, ignore_others=False, function_fails=False, @@ -72,9 +70,6 @@ def assert_deprecated(self, function, num=1, ignore_others=False, """ __tracebackhide__ = True # Hide traceback for py.test - # reset the log - self.log[:] = [] - if exceptions is np._NoValue: exceptions = (self.warning_cls,) @@ -83,11 +78,12 @@ def assert_deprecated(self, function, num=1, ignore_others=False, else: context_manager = contextlib.nullcontext() with context_manager: - function(*args, **kwargs) + with self.filter_warnings() as w_context: + function(*args, **kwargs) # just in case, clear the registry num_found = 0 - for warning in self.log: + for warning in w_context: if warning.category is self.warning_cls: num_found += 1 elif not ignore_others: @@ -95,8 +91,8 @@ def assert_deprecated(self, function, num=1, ignore_others=False, "expected %s but got: %s" % (self.warning_cls.__name__, warning.category)) if num is not None and num_found != num: - msg = f"{len(self.log)} warnings found but {num} expected." - lst = [str(w) for w in self.log] + msg = f"{len(w_context)} warnings found but {num} expected." + lst = [str(w) for w in w_context] raise AssertionError("\n".join([msg] + lst)) with warnings.catch_warnings(): @@ -131,7 +127,6 @@ class _VisibleDeprecationTestCase(_DeprecationTestCase): class TestTestDeprecated: def test_assert_deprecated(self): test_case_instance = _DeprecationTestCase() - test_case_instance.setup_method() assert_raises(AssertionError, test_case_instance.assert_deprecated, lambda: None) @@ -140,7 +135,6 @@ def foo(): warnings.warn("foo", category=DeprecationWarning, stacklevel=2) test_case_instance.assert_deprecated(foo) - test_case_instance.teardown_method() class TestBincount(_DeprecationTestCase): @@ -242,9 +236,7 @@ def test_deprecated(self, func): @pytest.mark.parametrize("func", [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) def test_both_passed(self, func): - with warnings.catch_warnings(): - # catch the DeprecationWarning so that it does not raise: - warnings.simplefilter("always", DeprecationWarning) + with pytest.warns(DeprecationWarning): with pytest.raises(TypeError): func([0., 1.], 0., interpolation="nearest", method="nearest") @@ -365,12 +357,10 @@ def test_lib_functions_deprecation_call(self): class TestDeprecatedDTypeAliases(_DeprecationTestCase): def _check_for_warning(self, func): - with warnings.catch_warnings(record=True) as caught_warnings: + with pytest.warns(DeprecationWarning, + match="alias 'a' was deprecated in NumPy 2.0") as w: func() - assert len(caught_warnings) == 1 - w = caught_warnings[0] - assert w.category is DeprecationWarning - assert "alias 'a' was deprecated in NumPy 2.0" in str(w.message) + assert len(w) == 1 def test_a_dtype_alias(self): for dtype in ["a", "a10"]: From c26d2ef099d527106aa3ee413a7200e2ece43f86 Mon Sep 17 00:00:00 2001 From: Timmy Date: Mon, 8 Sep 2025 21:26:44 +0100 Subject: [PATCH 0413/1018] fix BUG: standardize 'Mean of empty slice' inconsistent message --- numpy/_core/_methods.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index a3d5b02a2a14..b013fd952beb 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -119,7 +119,7 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) if rcount == 0 if where is True else umr_any(rcount == 0, axis=None): - warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) # Cast bool, unsigned int, and int to float64 by default if dtype is None: From 3ed58a635dd9bcb6ab367a1e22f15f384b743703 Mon Sep 17 00:00:00 2001 From: Nicholas Bidler Date: Mon, 8 Sep 2025 13:47:25 -0700 Subject: [PATCH 0414/1018] DOC: clarify numpy.asarray, numpy.asanyarray, numpy.asarray_chkfinite parameter 'order' (#29655) * DOC: clarified numpy.asarray parameter 'order' Changed description to make explicit that setting 'A' requires user to set 'copy' to 'True' to get the described behavior. Made explicit that 'A' has two cases where it acts as equivalent to setting 'F'. See 28247. * DOC: clarified numpy.asarray parameter 'order' See 28247. Changed description of 'order' to make the behavior of 'A' more explicit and how to cause it to preserve non-contiguous Fortran-order. Changed description of 'C' and 'F' to clarify that output will be contiguous. * DOC: clarified numpy.asarray() parameter 'order' See 28247. To differentiate between 'A' and 'K', made explicit that 'A' will not ensure the output is contiguous. 'K' has more complex behavior related to flags and stride detection, but will try to keep the output as close as possible to the input. * DOC: clarify numpy.asarray parameter 'order' See 28247. Made explicit that 'C' and 'F' can copy input to output, 'A' does not ensure contiguous output. * DOC numpy.asarray parameter 'order' description added line break * DOC: Additional request to change identical text blocks. * DOC: fixed linebreak position --- numpy/_core/_add_newdocs.py | 27 +++++++++++++++------------ numpy/lib/_function_base_impl.py | 15 +++++++++------ 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index e3009a490bd3..ad7029deaa0f 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -967,12 +967,13 @@ dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'K'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) is the default and preserves the input order for the output. device : str, optional The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -1059,12 +1060,14 @@ dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'C'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) preserves the input order for the output. + 'C' is the default. device : str, optional The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 416b18f387c3..421dee5578ab 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -613,12 +613,15 @@ def asarray_chkfinite(a, dtype=None, order=None): dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'C'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or + Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) preserves the input order for the output. + 'C' is the default. Returns ------- From 6fb592f15e13f0d1c2853d3ce453508db81b1831 Mon Sep 17 00:00:00 2001 From: Kelvin Li Date: Mon, 8 Sep 2025 17:59:09 -0400 Subject: [PATCH 0415/1018] TST: not to include the LONGDOUBLE test on AIX --- numpy/f2py/tests/test_array_from_pyobj.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index a8f952752cf4..15383e9431cc 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -147,9 +147,9 @@ def is_intent_exact(self, *names): # and several tests fail as the alignment flag can be randomly true or false # when numpy gains an aligned allocator the tests could be enabled again # -# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. +# Furthermore, on macOS ARM64 and AIX, LONGDOUBLE is an alias for DOUBLE. if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) - and sys.platform != "win32" + and sys.platform not in ["win32", "aix"] and (platform.system(), platform.processor()) != ("Darwin", "arm")): _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"]) _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [ From c4734a679e338feb59dc5ab074f138e14cbad682 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 11 Aug 2025 11:40:48 +0200 Subject: [PATCH 0416/1018] CI: run some wheel build jobs by default, remove the rest This accounts for the actual release builds moving to the separate `numpy-release` repo. We want to keep a decent subset of wheel builds in the main repo and always run them, just like other CI jobs that test some config that matters. --- .github/workflows/wheels.yml | 215 +------ tools/wheels/LICENSE_linux.txt | 902 ------------------------------ tools/wheels/LICENSE_osx.txt | 902 ------------------------------ tools/wheels/LICENSE_win32.txt | 881 ----------------------------- tools/wheels/check_license.py | 58 -- tools/wheels/cibw_before_build.sh | 13 - tools/wheels/cibw_test_command.sh | 3 +- tools/wheels/upload_wheels.sh | 54 -- 8 files changed, 9 insertions(+), 3019 deletions(-) delete mode 100644 tools/wheels/LICENSE_linux.txt delete mode 100644 tools/wheels/LICENSE_osx.txt delete mode 100644 tools/wheels/LICENSE_win32.txt delete mode 100644 tools/wheels/check_license.py delete mode 100644 tools/wheels/upload_wheels.sh diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 9cf98d481fe3..1885577f7a12 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -1,34 +1,14 @@ -# Workflow to build and test wheels. -# To work on the wheel building infrastructure on a fork, comment out: +# Workflow to build and test wheels, similarly to numpy/numpy-release. +# To work on these jobs in a fork, comment out: # -# if: github.repository == 'numpy/numpy' -# -# in the get_commit_message job. Be sure to include [wheel build] in your commit -# message to trigger the build. All files related to wheel building are located -# at tools/wheels/ -# Alternatively, you can add labels to the pull request in order to trigger wheel -# builds. -# The labels that trigger builds are: -# 36 - Build(for changes to the building process, -# 14 - Release(ensure wheels build before release) +# if: github.repository == 'numpy/numpy' name: Wheel builder on: - schedule: - # ┌───────────── minute (0 - 59) - # │ ┌───────────── hour (0 - 23) - # │ │ ┌───────────── day of the month (1 - 31) - # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) - # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) - # │ │ │ │ │ - - cron: "42 2 * * SUN,WED" pull_request: branches: - main - maintenance/** - push: - tags: - - v* workflow_dispatch: concurrency: @@ -39,42 +19,12 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - get_commit_message: - name: Get commit message - runs-on: ubuntu-latest - # Only workflow_dispatch is enabled on forks. - # To enable this job and subsequent jobs on a fork for other events, comment out: - if: github.repository == 'numpy/numpy' || github.event_name == 'workflow_dispatch' - outputs: - message: ${{ steps.commit_message.outputs.message }} - steps: - - name: Checkout numpy - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - # Gets the correct commit message for pull request - with: - ref: ${{ github.event.pull_request.head.sha }} - persist-credentials: false - - name: Get commit message - id: commit_message - env: - HEAD: ${{ github.ref }} - run: | - set -xe - COMMIT_MSG=$(git log --no-merges -1 --oneline) - echo "message=$COMMIT_MSG" >> $GITHUB_OUTPUT - echo github.ref "$HEAD" - build_wheels: name: Build wheel ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} - needs: get_commit_message - if: >- - contains(needs.get_commit_message.outputs.message, '[wheel build]') || - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' || - (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') && ( ! endsWith(github.ref, 'dev0'))) + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' runs-on: ${{ matrix.buildplat[0] }} strategy: - # Ensure that a wheel builder finishes even if another fails fail-fast: false matrix: # Github Actions doesn't support pairing matrix values together, let's improvise @@ -85,34 +35,13 @@ jobs: - [ubuntu-22.04-arm, manylinux_aarch64, ""] - [ubuntu-22.04-arm, musllinux_aarch64, ""] - [macos-13, macosx_x86_64, openblas] - - [macos-13, macosx_x86_64, accelerate] - [macos-14, macosx_arm64, openblas] - - [macos-14, macosx_arm64, accelerate] - [windows-2022, win_amd64, ""] - - [windows-2022, win32, ""] - [windows-11-arm, win_arm64, ""] - python: ["cp311", "cp312", "cp313", "cp313t", "cp314", "cp314t", "pp311"] - exclude: - # Don't build PyPy 32-bit windows - - buildplat: [windows-2022, win32, ""] - python: "pp311" - # Don't build PyPy arm64 windows - - buildplat: [windows-11-arm, win_arm64, ""] - python: "pp311" - # No PyPy on musllinux images - - buildplat: [ ubuntu-22.04, musllinux_x86_64, "" ] - python: "pp311" - - buildplat: [ ubuntu-22.04-arm, musllinux_aarch64, "" ] - python: "pp311" - - buildplat: [ macos13, macosx_x86_64, openblas ] - python: "cp313t" - - buildplat: [ macos13, macosx_x86_64, openblas ] - python: "cp314t" + python: ["cp311"] env: - IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} - IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }} - IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} + IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh steps: - name: Checkout numpy uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 @@ -131,6 +60,7 @@ jobs: uses: ./.github/windows_arm64_steps - name: pkg-config-for-win + if: runner.os == 'windows' run: | choco install -y --no-progress --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite $CIBW = "${{ github.workspace }}/.openblas" @@ -140,12 +70,6 @@ jobs: # passed through, so convert it to '/' $CIBW = $CIBW.replace("\","/") echo "CIBW_ENVIRONMENT_WINDOWS=PKG_CONFIG_PATH=$CIBW" >> $env:GITHUB_ENV - if: runner.os == 'windows' - - # Used to push the built wheels - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 - with: - python-version: "3.x" - name: Setup macOS if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' @@ -182,126 +106,3 @@ jobs: with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - - name: install micromamba - uses: mamba-org/setup-micromamba@7f29b8b80078b1b601dfa018b0f7425c587c63bb - if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # Note that this step is *after* specific pythons have been used to - # build and test the wheel - # for installation of anaconda-client, for upload to anaconda.org - # environment will be activated after creation, and in future bash steps - init-shell: bash - environment-name: upload-env - create-args: >- - anaconda-client - - - name: win-arm64 install anaconda client - if: ${{ matrix.buildplat[1] == 'win_arm64' }} - run: | - # Rust installation needed for rpds-py. - Invoke-WebRequest https://static.rust-lang.org/rustup/dist/aarch64-pc-windows-msvc/rustup-init.exe -UseBasicParsing -Outfile rustup-init.exe - .\rustup-init.exe -y - $env:PATH="$env:PATH;$env:USERPROFILE\.cargo\bin" - pip install anaconda-client - - - - name: Upload wheels - if: success() && github.repository == 'numpy/numpy' - shell: bash -el {0} - # see https://github.com/marketplace/actions/setup-miniconda for why - # `-el {0}` is required. - env: - NUMPY_STAGING_UPLOAD_TOKEN: ${{ secrets.NUMPY_STAGING_UPLOAD_TOKEN }} - NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - run: | - source tools/wheels/upload_wheels.sh - set_upload_vars - # trigger an upload to - # https://anaconda.org/scientific-python-nightly-wheels/numpy - # for cron jobs or "Run workflow" (restricted to main branch). - # Tags will upload to - # https://anaconda.org/multibuild-wheels-staging/numpy - # The tokens were originally generated at anaconda.org - upload_wheels - - build_sdist: - name: Build sdist - needs: get_commit_message - if: >- - contains(needs.get_commit_message.outputs.message, '[wheel build]') || - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' || - (github.event_name == 'pull_request' && - (contains(github.event.pull_request.labels.*.name, '36 - Build') || - contains(github.event.pull_request.labels.*.name, '14 - Release'))) || - (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') && ( ! endsWith(github.ref, 'dev0'))) - runs-on: ubuntu-latest - env: - IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }} - # commented out so the sdist doesn't upload to nightly - # IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} - steps: - - name: Checkout numpy - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - with: - submodules: true - persist-credentials: false - # Used to push the built wheels - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 - with: - # Build sdist on lowest supported Python - python-version: "3.11" - - name: Build sdist - run: | - python -m pip install -U pip build - python -m build --sdist -Csetup-args=-Dallow-noblas=true - - name: Test the sdist - run: | - # TODO: Don't run test suite, and instead build wheels from sdist - # Depends on pypa/cibuildwheel#1020 - python -m pip install dist/*.gz -Csetup-args=-Dallow-noblas=true - pip install -r requirements/test_requirements.txt - cd .. # Can't import numpy within numpy src directory - python -c "import numpy, sys; print(numpy.__version__); sys.exit(numpy.test() is False)" - - - name: Check README rendering for PyPI - run: | - python -mpip install twine - twine check dist/* - - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: sdist - path: ./dist/* - - - uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # default (and activated) environment name is test - # Note that this step is *after* specific pythons have been used to - # build and test - auto-update-conda: true - python-version: "3.11" - - - name: Upload sdist - if: success() && github.repository == 'numpy/numpy' - shell: bash -el {0} - env: - NUMPY_STAGING_UPLOAD_TOKEN: ${{ secrets.NUMPY_STAGING_UPLOAD_TOKEN }} - # commented out so the sdist doesn't upload to nightly - # NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - run: | - conda install -y anaconda-client - source tools/wheels/upload_wheels.sh - set_upload_vars - # trigger an upload to - # https://anaconda.org/scientific-python-nightly-wheels/numpy - # for cron jobs or "Run workflow" (restricted to main branch). - # Tags will upload to - # https://anaconda.org/multibuild-wheels-staging/numpy - # The tokens were originally generated at anaconda.org - upload_wheels diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt deleted file mode 100644 index db488c6cff47..000000000000 --- a/tools/wheels/LICENSE_linux.txt +++ /dev/null @@ -1,902 +0,0 @@ - ----- - -This binary distribution of NumPy also bundles the following software: - - -Name: OpenBLAS -Files: numpy.libs/libscipy_openblas*.so -Description: bundled as a dynamically linked library -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: LAPACK -Files: numpy.libs/libscipy_openblas*.so -Description: bundled in OpenBLAS -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Open-MPI - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: GCC runtime library -Files: numpy.libs/libgfortran*.so -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-or-later WITH GCC-exception-3.1 - Copyright (C) 2002-2017 Free Software Foundation, Inc. - - Libgfortran is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgfortran is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . - ----- - -Full text of license texts referred to above follows (that they are -listed below does not necessarily imply the conditions apply to the -present binary release): - ----- - -GCC RUNTIME LIBRARY EXCEPTION - -Version 3.1, 31 March 2009 - -Copyright (C) 2009 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -This GCC Runtime Library Exception ("Exception") is an additional -permission under section 7 of the GNU General Public License, version -3 ("GPLv3"). It applies to a given file (the "Runtime Library") that -bears a notice placed by the copyright holder of the file stating that -the file is governed by GPLv3 along with this Exception. - -When you use GCC to compile a program, GCC may combine portions of -certain GCC header files and runtime libraries with the compiled -program. The purpose of this Exception is to allow compilation of -non-GPL (including proprietary) programs to use, in this way, the -header files and runtime libraries covered by this Exception. - -0. Definitions. - -A file is an "Independent Module" if it either requires the Runtime -Library for execution after a Compilation Process, or makes use of an -interface provided by the Runtime Library, but is not otherwise based -on the Runtime Library. - -"GCC" means a version of the GNU Compiler Collection, with or without -modifications, governed by version 3 (or a specified later version) of -the GNU General Public License (GPL) with the option of using any -subsequent versions published by the FSF. - -"GPL-compatible Software" is software whose conditions of propagation, -modification and use would permit combination with GCC in accord with -the license of GCC. - -"Target Code" refers to output from any compiler for a real or virtual -target processor architecture, in executable form or suitable for -input to an assembler, loader, linker and/or execution -phase. Notwithstanding that, Target Code does not include data in any -format that is used as a compiler intermediate representation, or used -for producing a compiler intermediate representation. - -The "Compilation Process" transforms code entirely represented in -non-intermediate languages designed for human-written code, and/or in -Java Virtual Machine byte code, into Target Code. Thus, for example, -use of source code generators and preprocessors need not be considered -part of the Compilation Process, since the Compilation Process can be -understood as starting with the output of the generators or -preprocessors. - -A Compilation Process is "Eligible" if it is done using GCC, alone or -with other GPL-compatible software, or if it is done without using any -work based on GCC. For example, using non-GPL-compatible Software to -optimize any GCC intermediate representations would not qualify as an -Eligible Compilation Process. - -1. Grant of Additional Permission. - -You have permission to propagate a work of Target Code formed by -combining the Runtime Library with Independent Modules, even if such -propagation would otherwise violate the terms of GPLv3, provided that -all Target Code was generated by Eligible Compilation Processes. You -may then convey such a combination under terms of your choice, -consistent with the licensing of the Independent Modules. - -2. No Weakening of GCC Copyleft. - -The availability of this Exception does not imply any general -presumption that third-party software is unaffected by the copyleft -requirements of the license of GCC. - ----- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - -Name: libquadmath -Files: numpy.libs/libquadmath*.so -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath -License: LGPL-2.1-or-later - - GCC Quad-Precision Math Library - Copyright (C) 2010-2019 Free Software Foundation, Inc. - Written by Francois-Xavier Coudert - - This file is part of the libquadmath library. - Libquadmath is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - Libquadmath is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt deleted file mode 100644 index 5cea18441b35..000000000000 --- a/tools/wheels/LICENSE_osx.txt +++ /dev/null @@ -1,902 +0,0 @@ - ----- - -This binary distribution of NumPy also bundles the following software: - - -Name: OpenBLAS -Files: numpy/.dylibs/libscipy_openblas*.so -Description: bundled as a dynamically linked library -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: LAPACK -Files: numpy/.dylibs/libscipy_openblas*.so -Description: bundled in OpenBLAS -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Open-MPI - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: GCC runtime library -Files: numpy/.dylibs/libgfortran*, numpy/.dylibs/libgcc* -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-or-later WITH GCC-exception-3.1 - Copyright (C) 2002-2017 Free Software Foundation, Inc. - - Libgfortran is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgfortran is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . - ----- - -Full text of license texts referred to above follows (that they are -listed below does not necessarily imply the conditions apply to the -present binary release): - ----- - -GCC RUNTIME LIBRARY EXCEPTION - -Version 3.1, 31 March 2009 - -Copyright (C) 2009 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -This GCC Runtime Library Exception ("Exception") is an additional -permission under section 7 of the GNU General Public License, version -3 ("GPLv3"). It applies to a given file (the "Runtime Library") that -bears a notice placed by the copyright holder of the file stating that -the file is governed by GPLv3 along with this Exception. - -When you use GCC to compile a program, GCC may combine portions of -certain GCC header files and runtime libraries with the compiled -program. The purpose of this Exception is to allow compilation of -non-GPL (including proprietary) programs to use, in this way, the -header files and runtime libraries covered by this Exception. - -0. Definitions. - -A file is an "Independent Module" if it either requires the Runtime -Library for execution after a Compilation Process, or makes use of an -interface provided by the Runtime Library, but is not otherwise based -on the Runtime Library. - -"GCC" means a version of the GNU Compiler Collection, with or without -modifications, governed by version 3 (or a specified later version) of -the GNU General Public License (GPL) with the option of using any -subsequent versions published by the FSF. - -"GPL-compatible Software" is software whose conditions of propagation, -modification and use would permit combination with GCC in accord with -the license of GCC. - -"Target Code" refers to output from any compiler for a real or virtual -target processor architecture, in executable form or suitable for -input to an assembler, loader, linker and/or execution -phase. Notwithstanding that, Target Code does not include data in any -format that is used as a compiler intermediate representation, or used -for producing a compiler intermediate representation. - -The "Compilation Process" transforms code entirely represented in -non-intermediate languages designed for human-written code, and/or in -Java Virtual Machine byte code, into Target Code. Thus, for example, -use of source code generators and preprocessors need not be considered -part of the Compilation Process, since the Compilation Process can be -understood as starting with the output of the generators or -preprocessors. - -A Compilation Process is "Eligible" if it is done using GCC, alone or -with other GPL-compatible software, or if it is done without using any -work based on GCC. For example, using non-GPL-compatible Software to -optimize any GCC intermediate representations would not qualify as an -Eligible Compilation Process. - -1. Grant of Additional Permission. - -You have permission to propagate a work of Target Code formed by -combining the Runtime Library with Independent Modules, even if such -propagation would otherwise violate the terms of GPLv3, provided that -all Target Code was generated by Eligible Compilation Processes. You -may then convey such a combination under terms of your choice, -consistent with the licensing of the Independent Modules. - -2. No Weakening of GCC Copyleft. - -The availability of this Exception does not imply any general -presumption that third-party software is unaffected by the copyleft -requirements of the license of GCC. - ----- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - -Name: libquadmath -Files: numpy/.dylibs/libquadmath*.so -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath -License: LGPL-2.1-or-later - - GCC Quad-Precision Math Library - Copyright (C) 2010-2019 Free Software Foundation, Inc. - Written by Francois-Xavier Coudert - - This file is part of the libquadmath library. - Libquadmath is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - Libquadmath is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt deleted file mode 100644 index aed96845583b..000000000000 --- a/tools/wheels/LICENSE_win32.txt +++ /dev/null @@ -1,881 +0,0 @@ - ----- - -This binary distribution of NumPy also bundles the following software: - - -Name: OpenBLAS -Files: numpy.libs\libscipy_openblas*.dll -Description: bundled as a dynamically linked library -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: LAPACK -Files: numpy.libs\libscipy_openblas*.dll -Description: bundled in OpenBLAS -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Open-MPI - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: GCC runtime library -Files: numpy.libs\libscipy_openblas*.dll -Description: statically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-or-later WITH GCC-exception-3.1 - Copyright (C) 2002-2017 Free Software Foundation, Inc. - - Libgfortran is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgfortran is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . - ----- - -Full text of license texts referred to above follows (that they are -listed below does not necessarily imply the conditions apply to the -present binary release): - ----- - -GCC RUNTIME LIBRARY EXCEPTION - -Version 3.1, 31 March 2009 - -Copyright (C) 2009 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -This GCC Runtime Library Exception ("Exception") is an additional -permission under section 7 of the GNU General Public License, version -3 ("GPLv3"). It applies to a given file (the "Runtime Library") that -bears a notice placed by the copyright holder of the file stating that -the file is governed by GPLv3 along with this Exception. - -When you use GCC to compile a program, GCC may combine portions of -certain GCC header files and runtime libraries with the compiled -program. The purpose of this Exception is to allow compilation of -non-GPL (including proprietary) programs to use, in this way, the -header files and runtime libraries covered by this Exception. - -0. Definitions. - -A file is an "Independent Module" if it either requires the Runtime -Library for execution after a Compilation Process, or makes use of an -interface provided by the Runtime Library, but is not otherwise based -on the Runtime Library. - -"GCC" means a version of the GNU Compiler Collection, with or without -modifications, governed by version 3 (or a specified later version) of -the GNU General Public License (GPL) with the option of using any -subsequent versions published by the FSF. - -"GPL-compatible Software" is software whose conditions of propagation, -modification and use would permit combination with GCC in accord with -the license of GCC. - -"Target Code" refers to output from any compiler for a real or virtual -target processor architecture, in executable form or suitable for -input to an assembler, loader, linker and/or execution -phase. Notwithstanding that, Target Code does not include data in any -format that is used as a compiler intermediate representation, or used -for producing a compiler intermediate representation. - -The "Compilation Process" transforms code entirely represented in -non-intermediate languages designed for human-written code, and/or in -Java Virtual Machine byte code, into Target Code. Thus, for example, -use of source code generators and preprocessors need not be considered -part of the Compilation Process, since the Compilation Process can be -understood as starting with the output of the generators or -preprocessors. - -A Compilation Process is "Eligible" if it is done using GCC, alone or -with other GPL-compatible software, or if it is done without using any -work based on GCC. For example, using non-GPL-compatible Software to -optimize any GCC intermediate representations would not qualify as an -Eligible Compilation Process. - -1. Grant of Additional Permission. - -You have permission to propagate a work of Target Code formed by -combining the Runtime Library with Independent Modules, even if such -propagation would otherwise violate the terms of GPLv3, provided that -all Target Code was generated by Eligible Compilation Processes. You -may then convey such a combination under terms of your choice, -consistent with the licensing of the Independent Modules. - -2. No Weakening of GCC Copyleft. - -The availability of this Exception does not imply any general -presumption that third-party software is unaffected by the copyleft -requirements of the license of GCC. - ----- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - diff --git a/tools/wheels/check_license.py b/tools/wheels/check_license.py deleted file mode 100644 index 9aa50015d00b..000000000000 --- a/tools/wheels/check_license.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python -""" -check_license.py [MODULE] - -Check the presence of a LICENSE.txt in the installed module directory, -and that it appears to contain text prevalent for a NumPy binary -distribution. - -""" -import argparse -import pathlib -import re -import sys - - -def check_text(text): - ok = "Copyright (c)" in text and re.search( - r"This binary distribution of \w+ also bundles the following software", - text, - ) - return ok - - -def main(): - p = argparse.ArgumentParser(usage=__doc__.rstrip()) - p.add_argument("module", nargs="?", default="numpy") - args = p.parse_args() - - # Drop '' from sys.path - sys.path.pop(0) - - # Find module path - __import__(args.module) - mod = sys.modules[args.module] - - # LICENSE.txt is installed in the .dist-info directory, so find it there - sitepkgs = pathlib.Path(mod.__file__).parent.parent - distinfo_path = next(iter(sitepkgs.glob("numpy-*.dist-info"))) - - # Check license text - license_txt = distinfo_path / "licenses" / "LICENSE.txt" - with open(license_txt, encoding="utf-8") as f: - text = f.read() - - ok = check_text(text) - if not ok: - print( - f"ERROR: License text {license_txt} does not contain expected " - "text fragments\n" - ) - print(text) - sys.exit(1) - - sys.exit(0) - - -if __name__ == "__main__": - main() diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index ed2640471fed..f5da8968258a 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -2,22 +2,9 @@ set -xe PROJECT_DIR="${1:-$PWD}" - # remove any cruft from a previous run rm -rf build -# Update license -echo "" >> $PROJECT_DIR/LICENSE.txt -echo "----" >> $PROJECT_DIR/LICENSE.txt -echo "" >> $PROJECT_DIR/LICENSE.txt -if [[ $RUNNER_OS == "Linux" ]] ; then - cat $PROJECT_DIR/tools/wheels/LICENSE_linux.txt >> $PROJECT_DIR/LICENSE.txt -elif [[ $RUNNER_OS == "macOS" ]]; then - cat $PROJECT_DIR/tools/wheels/LICENSE_osx.txt >> $PROJECT_DIR/LICENSE.txt -elif [[ $RUNNER_OS == "Windows" ]]; then - cat $PROJECT_DIR/tools/wheels/LICENSE_win32.txt >> $PROJECT_DIR/LICENSE.txt -fi - if [[ $(python -c"import sys; print(sys.maxsize)") < $(python -c"import sys; print(2**33)") ]]; then echo "No BLAS used for 32-bit wheels" export INSTALL_OPENBLAS=false diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 60e90ef5beb6..57f97f5ed706 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -1,5 +1,5 @@ # This script is used by .github/workflows/wheels.yml to run the full test -# suite, checks for license inclusion and that the openblas version is correct. +# suite, and that the openblas version is correct. set -xe PROJECT_DIR="$1" @@ -45,4 +45,3 @@ fi # durations for the 10 slowests tests to help with debugging slow or hanging # tests python -c "import sys; import numpy; sys.exit(not numpy.test(label='full', extra_argv=['-n=auto', '--timeout=1800', '--durations=10']))" -python $PROJECT_DIR/tools/wheels/check_license.py diff --git a/tools/wheels/upload_wheels.sh b/tools/wheels/upload_wheels.sh deleted file mode 100644 index ccd713c907a2..000000000000 --- a/tools/wheels/upload_wheels.sh +++ /dev/null @@ -1,54 +0,0 @@ -set_travis_vars() { - # Set env vars - echo "TRAVIS_EVENT_TYPE is $TRAVIS_EVENT_TYPE" - echo "TRAVIS_TAG is $TRAVIS_TAG" - if [[ "$TRAVIS_EVENT_TYPE" == "push" && "$TRAVIS_TAG" == v* ]]; then - IS_PUSH="true" - else - IS_PUSH="false" - fi - if [[ "$TRAVIS_EVENT_TYPE" == "cron" ]]; then - IS_SCHEDULE_DISPATCH="true" - else - IS_SCHEDULE_DISPATCH="false" - fi -} -set_upload_vars() { - echo "IS_PUSH is $IS_PUSH" - echo "IS_SCHEDULE_DISPATCH is $IS_SCHEDULE_DISPATCH" - if [[ "$IS_PUSH" == "true" ]]; then - echo push and tag event - export ANACONDA_ORG="multibuild-wheels-staging" - export TOKEN="$NUMPY_STAGING_UPLOAD_TOKEN" - export ANACONDA_UPLOAD="true" - elif [[ "$IS_SCHEDULE_DISPATCH" == "true" ]]; then - echo scheduled or dispatched event - export ANACONDA_ORG="scientific-python-nightly-wheels" - export TOKEN="$NUMPY_NIGHTLY_UPLOAD_TOKEN" - export ANACONDA_UPLOAD="true" - else - echo non-dispatch event - export ANACONDA_UPLOAD="false" - fi -} -upload_wheels() { - echo ${PWD} - if [[ ${ANACONDA_UPLOAD} == true ]]; then - if [[ -z ${TOKEN} ]]; then - echo no token set, not uploading - else - # sdists are located under dist folder when built through setup.py - if compgen -G "./dist/*.gz"; then - echo "Found sdist" - anaconda -q -t ${TOKEN} upload --force -u ${ANACONDA_ORG} ./dist/*.gz - elif compgen -G "./wheelhouse/*.whl"; then - echo "Found wheel" - anaconda -q -t ${TOKEN} upload --force -u ${ANACONDA_ORG} ./wheelhouse/*.whl - else - echo "Files do not exist" - return 1 - fi - echo "PyPI-style index: https://pypi.anaconda.org/$ANACONDA_ORG/simple" - fi - fi -} From 6a16e90eb42e9d584436ece5322ca04c8f03c0dc Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 11 Aug 2025 21:39:46 +0200 Subject: [PATCH 0417/1018] CI: remove redundant Windows, musllinux jobs These configs are covered by `wheels.yml`, which is now part of the regular test suite rather than off-by-default on PRs. --- .github/workflows/linux.yml | 35 ----------- .github/workflows/linux_musl.yml | 69 --------------------- .github/workflows/windows.yml | 102 +------------------------------ 3 files changed, 3 insertions(+), 203 deletions(-) delete mode 100644 .github/workflows/linux_musl.yml diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index b55f3dedb67e..5a453521f2dd 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -157,41 +157,6 @@ jobs: env: PYTHONOPTIMIZE: 2 - - aarch64_test: - needs: [smoke_test] - if: github.repository == 'numpy/numpy' - runs-on: ubuntu-22.04-arm - steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 - with: - python-version: '3.11' - - - name: Install Python dependencies - run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install -r requirements/test_requirements.txt - python -m pip install -r requirements/ci32_requirements.txt - mkdir -p ./.openblas - python -c"import scipy_openblas32 as ob32; print(ob32.get_pkg_config())" > ./.openblas/scipy-openblas.pc - - - name: Build - env: - PKG_CONFIG_PATH: ${{ github.workspace }}/.openblas - run: | - spin build - - - name: Test - run: | - spin test -j2 -m full -- --timeout=600 --durations=10 - - armhf_test: # Tests NumPy on 32-bit ARM hard-float (armhf) via compatibility mode # running on aarch64 (ARM 64-bit) GitHub runners. diff --git a/.github/workflows/linux_musl.yml b/.github/workflows/linux_musl.yml deleted file mode 100644 index 547c031bc84b..000000000000 --- a/.github/workflows/linux_musl.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: Test musllinux_x86_64 - -on: - pull_request: - branches: - - main - - maintenance/** - - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - - -permissions: - contents: read # to fetch code (actions/checkout) - - -jobs: - musllinux_x86_64: - runs-on: ubuntu-latest - # To enable this workflow on a fork, comment out: - if: github.repository == 'numpy/numpy' - container: - # Use container used for building musllinux wheels - # it has git installed, all the pythons, etc - image: quay.io/pypa/musllinux_1_2_x86_64 - - steps: - - name: setup - run: | - apk update --quiet - - # using git commands to clone because versioneer doesn't work when - # actions/checkout is used for the clone step in a container - - git config --global --add safe.directory $PWD - - if [ $GITHUB_EVENT_NAME != pull_request ]; then - git clone --recursive --branch=$GITHUB_REF_NAME https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE - git reset --hard $GITHUB_SHA - else - git clone --recursive https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE - git fetch origin $GITHUB_REF:my_ref_name - git checkout $GITHUB_BASE_REF - git -c user.email="you@example.com" merge --no-commit my_ref_name - fi - git submodule update --init - - ln -s /usr/local/bin/python3.11 /usr/local/bin/python - - - name: test-musllinux_x86_64 - env: - PKG_CONFIG_PATH: ${{ github.workspace }}/.openblas - run: | - python -m venv test_env - source test_env/bin/activate - - pip install -r requirements/ci_requirements.txt - pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - - # use meson to build and test - spin build --with-scipy-openblas=64 - spin test -j auto -- --timeout=600 --durations=10 - - - name: Meson Log - shell: bash - run: | - cat build/meson-logs/meson-log.txt diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 66f544b51d7d..2cfb98e67aba 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -14,18 +14,11 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - python64bit_openblas: + clangcl_python64bit_openblas32: name: x86-64, LP64 OpenBLAS runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' - strategy: - fail-fast: false - matrix: - compiler-pyversion: - - ["MSVC", "3.11"] - - ["Clang-cl", "3.14t-dev"] - steps: - name: Checkout uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 @@ -37,7 +30,7 @@ jobs: - name: Setup Python uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: - python-version: ${{ matrix.compiler-pyversion[1] }} + python-version: "3.14t-dev" - name: Install build dependencies from PyPI run: | @@ -48,23 +41,7 @@ jobs: choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $env:GITHUB_ENV - - - name: Install Clang-cl - if: matrix.compiler-pyversion[0] == 'Clang-cl' - run: | - # llvm is preinstalled, but leave - # this here in case we need to pin the - # version at some point. - #choco install llvm -y - - - name: Install NumPy (MSVC) - if: matrix.compiler-pyversion[0] == 'MSVC' - run: | - pip install -r requirements/ci_requirements.txt - spin build --with-scipy-openblas=32 -j2 -- --vsenv - - name: Install NumPy (Clang-cl) - if: matrix.compiler-pyversion[0] == 'Clang-cl' run: | "[binaries]","c = 'clang-cl'","cpp = 'clang-cl'","ar = 'llvm-lib'","c_ld = 'lld-link'","cpp_ld = 'lld-link'" | Out-File $PWD/clang-cl-build.ini -Encoding ascii pip install -r requirements/ci_requirements.txt @@ -85,80 +62,7 @@ jobs: run: | spin test -- --timeout=600 --durations=10 - python64bit_openblas_winarm64: - name: arm64, LPARM64 OpenBLAS - runs-on: windows-11-arm - # To enable this job on a fork, comment out: - if: github.repository == 'numpy/numpy' - strategy: - fail-fast: false - matrix: - compiler-pyversion: - - ["MSVC", "3.11"] - - ["Clang-cl", "3.14t-dev"] - - steps: - - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - - name: Setup Python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 - with: - python-version: ${{ matrix.compiler-pyversion[1] }} - architecture: arm64 - - - name: Setup MSVC - if: matrix.compiler-pyversion[0] == 'MSVC' - uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 - with: - architecture: arm64 - - - name: Install build dependencies from PyPI - run: | - pip install -r requirements/build_requirements.txt - - - name: Install pkg-config - run: | - choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite - echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $env:GITHUB_ENV - - - name: Install Clang-cl - if: matrix.compiler-pyversion[0] == 'Clang-cl' - uses: ./.github/windows_arm64_steps - - - name: Install NumPy (MSVC) - if: matrix.compiler-pyversion[0] == 'MSVC' - run: | - pip install -r requirements/ci_requirements.txt - spin build --with-scipy-openblas=32 -j2 -- --vsenv - - - name: Install NumPy (Clang-cl) - if: matrix.compiler-pyversion[0] == 'Clang-cl' - run: | - "[binaries]","c = 'clang-cl'","cpp = 'clang-cl'","ar = 'llvm-lib'","c_ld = 'lld-link'","cpp_ld = 'lld-link'" | Out-File $PWD/clang-cl-arm64.ini -Encoding ascii - pip install -r requirements/ci_requirements.txt - spin build --with-scipy-openblas=32 -j2 -- --vsenv --native-file=$PWD/clang-cl-arm64.ini - - - name: Meson Log - shell: bash - if: ${{ failure() }} - run: | - cat build/meson-logs/meson-log.txt - - - name: Install test dependencies - run: | - python -m pip install -r requirements/test_requirements.txt - python -m pip install threadpoolctl - - - name: Run test suite - run: | - spin test -- --timeout=600 --durations=10 - - msvc_python_no_openblas: + msvc_python32bit_no_openblas: name: MSVC, ${{ matrix.architecture }} Python , no BLAS runs-on: ${{ matrix.os }} strategy: From c9cae4d6d5f7d550146cbd850d776f830cb9d088 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Sep 2025 17:08:13 +0000 Subject: [PATCH 0418/1018] MAINT: Bump github/codeql-action from 3.30.1 to 3.30.2 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.1 to 3.30.2. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/f1f6e5f6af878fb37288ce1c627459e94dbf7d01...d3678e237b9c32a6c9bffb3315c335f976f3549f) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 363ad25f2e50..13619ca762c3 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1 + uses: github/codeql-action/init@d3678e237b9c32a6c9bffb3315c335f976f3549f # v3.30.2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1 + uses: github/codeql-action/autobuild@d3678e237b9c32a6c9bffb3315c335f976f3549f # v3.30.2 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1 + uses: github/codeql-action/analyze@d3678e237b9c32a6c9bffb3315c335f976f3549f # v3.30.2 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 98cab6b712f6..9eb76bdc0525 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v2.1.27 + uses: github/codeql-action/upload-sarif@d3678e237b9c32a6c9bffb3315c335f976f3549f # v2.1.27 with: sarif_file: results.sarif From 788889237029e2f3403d5b21b513be99dafa163e Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 9 Sep 2025 16:17:53 +0200 Subject: [PATCH 0419/1018] CI: sync wheel build cleanups back from `numpy-release` repo --- pyproject.toml | 43 ++++++++++------------------- tools/wheels/cibw_before_build.sh | 46 +++++++++++++++---------------- tools/wheels/cibw_test_command.sh | 28 ++++--------------- 3 files changed, 44 insertions(+), 73 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b678be83a486..5ffbd9ea0247 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -182,9 +182,8 @@ tracker = "https://github.com/numpy/numpy/issues" [tool.cibuildwheel] # Note: the below skip command doesn't do much currently, the platforms to -# build wheels for in CI are controlled in `.github/workflows/wheels.yml` and -# `tools/ci/cirrus_wheels.yml`. -build-frontend = "build" +# build wheels for in CI are controlled in `.github/workflows/wheels.yml`. +# universal2 wheels are not supported (see gh-21233), use `delocate-fuse` if you need them skip = ["*_i686", "*_ppc64le", "*_s390x", "*_universal2"] before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" before-test = "pip install -r {project}/requirements/test_requirements.txt" @@ -202,45 +201,33 @@ manylinux-aarch64-image = "manylinux_2_28" musllinux-x86_64-image = "musllinux_1_2" musllinux-aarch64-image = "musllinux_1_2" -[tool.cibuildwheel.pyodide] -before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" -# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten -repair-wheel-command = "" -test-command = "python -m pytest --pyargs numpy -m 'not slow'" - -[tool.cibuildwheel.pyodide.config-settings] -build-dir = "build" -setup-args = ["--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross", "-Dblas=none", "-Dlapack=none"] - [tool.cibuildwheel.linux.environment] -# RUNNER_OS is a GitHub Actions specific env var; define it here so it works on Cirrus CI too +# RUNNER_OS is a GitHub Actions specific env var; define it here so it's +# defined when running cibuildwheel locally RUNNER_OS="Linux" # /project will be the $PWD equivalent inside the docker used to build the wheel PKG_CONFIG_PATH="/project/.openblas" -LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/project/.openblas/lib" - -[tool.cibuildwheel.macos] -# universal2 wheels are not supported (see gh-21233), use `delocate-fuse` if you need them -# note that universal2 wheels are not built, they're listed in the tool.cibuildwheel.skip -# section -# Not clear why the DYLD_LIBRARY_PATH is not passed through from the environment -repair-wheel-command = [ - "export DYLD_LIBRARY_PATH=$PWD/.openblas/lib", - "echo DYLD_LIBRARY_PATH $DYLD_LIBRARY_PATH", - "delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel}", -] [tool.cibuildwheel.windows] config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=false"], build-dir="build"} repair-wheel-command = "bash -el ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" -# This does not work, use CIBW_ENVIRONMENT_WINDOWS -environment = {PKG_CONFIG_PATH="./.openblas"} [[tool.cibuildwheel.overrides]] select = ["*-win32"] config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=true"], build-dir="build"} repair-wheel-command = "" +[tool.cibuildwheel.pyodide] +before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" +# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten +repair-wheel-command = "" +test-command = "python -m pytest --pyargs numpy -m 'not slow'" + +[tool.cibuildwheel.pyodide.config-settings] +build-dir = "build" +setup-args = ["--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross", "-Dblas=none", "-Dlapack=none"] + + [tool.meson-python] meson = 'vendored-meson/meson/meson.py' diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index f5da8968258a..381c329a5372 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -13,40 +13,40 @@ elif [ -z $INSTALL_OPENBLAS ]; then export INSTALL_OPENBLAS=true fi -# Install Openblas from scipy-openblas64 +# Install OpenBLAS from scipy-openblas32|64 if [[ "$INSTALL_OPENBLAS" = "true" ]] ; then - # by default, use scipy-openblas64 + # By default, use scipy-openblas64 + # On 32-bit platforms and on win-arm64, use scipy-openblas32 OPENBLAS=openblas64 - # Possible values for RUNNER_ARCH in github are - # X86, X64, ARM, or ARM64 - # TODO: should we detect a missing RUNNER_ARCH and use platform.machine() - # when wheel build is run outside github? - # On 32-bit platforms, use scipy_openblas32 - # On win-arm64 use scipy_openblas32 + # Possible values for RUNNER_ARCH in GitHub Actions are: X86, X64, ARM, or ARM64 if [[ $RUNNER_ARCH == "X86" || $RUNNER_ARCH == "ARM" ]] ; then OPENBLAS=openblas32 elif [[ $RUNNER_ARCH == "ARM64" && $RUNNER_OS == "Windows" ]] ; then OPENBLAS=openblas32 fi - echo PKG_CONFIG_PATH is $PKG_CONFIG_PATH, OPENBLAS is ${OPENBLAS} - PKG_CONFIG_PATH=$PROJECT_DIR/.openblas - rm -rf $PKG_CONFIG_PATH - mkdir -p $PKG_CONFIG_PATH - python -m pip install -r requirements/ci_requirements.txt - python -c "import scipy_${OPENBLAS}; print(scipy_${OPENBLAS}.get_pkg_config())" > $PKG_CONFIG_PATH/scipy-openblas.pc - # Copy the shared objects to a path under $PKG_CONFIG_PATH, the build - # will point $LD_LIBRARY_PATH there and then auditwheel/delocate-wheel will - # pull these into the wheel. Use python to avoid windows/posix problems - python < $pkgconf_path/scipy-openblas.pc + + # Copy scipy-openblas DLL's to a fixed location so we can point delvewheel + # at it in `repair_windows.sh` (needed only on Windows because of the lack + # of RPATH support). + if [[ $RUNNER_OS == "Windows" ]]; then + python < Date: Tue, 9 Sep 2025 14:21:46 -0400 Subject: [PATCH 0420/1018] TST: xfail test_kind::test_quad_precision on AIX/PPC (#29693) * TST: xfail test_kind::test_quad_precision on AIX/PPC * Change to use platform.system() instead --- numpy/f2py/tests/test_kind.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py index ecf884fb4999..c219cc8bfd09 100644 --- a/numpy/f2py/tests/test_kind.py +++ b/numpy/f2py/tests/test_kind.py @@ -10,6 +10,7 @@ from . import util +IS_PPC_OR_AIX = platform.machine().lower().startswith("ppc") or platform.system() == 'AIX' class TestKind(util.F2PyTest): sources = [util.getpath("tests", "src", "kind", "foo.f90")] @@ -37,7 +38,7 @@ def test_real(self): i ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" - @pytest.mark.xfail(platform.machine().lower().startswith("ppc"), + @pytest.mark.xfail(IS_PPC_OR_AIX, reason="Some PowerPC may not support full IEEE 754 precision") def test_quad_precision(self): """ From ed275f33ce4b6a4d207e0226bc724816e9ae59ef Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 9 Sep 2025 12:32:15 -0600 Subject: [PATCH 0421/1018] MAINT: Update main after 2.3.3 release. - Forward port 2.3.3-notes.rst - Forward port 2.3.3-changelog.rst - Update release.rst --- doc/changelog/2.3.3-changelog.rst | 50 +++++++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.3.3-notes.rst | 59 ++++++++++++++++++++++++++++++ 3 files changed, 110 insertions(+) create mode 100644 doc/changelog/2.3.3-changelog.rst create mode 100644 doc/source/release/2.3.3-notes.rst diff --git a/doc/changelog/2.3.3-changelog.rst b/doc/changelog/2.3.3-changelog.rst new file mode 100644 index 000000000000..0398b30072af --- /dev/null +++ b/doc/changelog/2.3.3-changelog.rst @@ -0,0 +1,50 @@ + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aleksandr A. Voyt + +* Bernard Roesler + +* Charles Harris +* Hunter Hogan + +* Joren Hammudoglu +* Maanas Arora +* Matti Picus +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sanjay Kumar Sakamuri Kamalakar + +* Tobias Markus + +* Warren Weckesser +* Zebreus + + +Pull requests merged +==================== + +A total of 23 pull requests were merged for this release. + +* `#29440 `__: MAINT: Prepare 2.3.x for further development. +* `#29446 `__: BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR... +* `#29447 `__: BLD: allow targeting webassembly without emscripten +* `#29460 `__: MAINT: Backport write_release.py +* `#29473 `__: MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 +* `#29500 `__: BUG: Always return a real dtype from linalg.cond (gh-18304) (#29333) +* `#29501 `__: MAINT: Add .file entry to all .s SVML files +* `#29556 `__: BUG: Casting from one timedelta64 to another didn't handle NAT. +* `#29562 `__: BLD: update vendored Meson to 1.8.3 [wheel build] +* `#29563 `__: BUG: Fix metadata not roundtripping when pickling datetime (#29555) +* `#29587 `__: TST: update link and version for Intel SDE download +* `#29593 `__: TYP: add ``sorted`` kwarg to ``unique`` +* `#29672 `__: MAINT: Update pythoncapi-compat from main. +* `#29673 `__: MAINT: Update cibuildwheel. +* `#29674 `__: MAINT: Fix typo in wheels.yml +* `#29683 `__: BUG, BLD: Correct regex for ppc64 VSX3/VSX4 feature detection +* `#29684 `__: TYP: ndarray.fill() takes no keyword arguments +* `#29685 `__: BUG: avoid thread-unsafe refcount check in temp elision +* `#29687 `__: CI: replace comment-hider action in mypy_primer workflow +* `#29689 `__: BLD: Add missing include +* `#29691 `__: BUG: use correct input dtype in flatiter assignment +* `#29700 `__: TYP: fix np.bool method declarations +* `#29701 `__: BUG: Correct ambiguous logic for s390x CPU feature detection + diff --git a/doc/source/release.rst b/doc/source/release.rst index 3af644a57562..72ddab818a77 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.4.0 + 2.3.3 2.3.2 2.3.1 2.3.0 diff --git a/doc/source/release/2.3.3-notes.rst b/doc/source/release/2.3.3-notes.rst new file mode 100644 index 000000000000..3c293c3db322 --- /dev/null +++ b/doc/source/release/2.3.3-notes.rst @@ -0,0 +1,59 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.3 Release Notes +========================= + +The NumPy 2.3.3 release is a patch release split between a number of maintenance +updates and bug fixes. This release supports Python versions 3.11-3.14. Note +that the 3.14.0 final is currently expected in Oct, 2025. This release is based +on 3.14.0rc2. + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aleksandr A. Voyt + +* Bernard Roesler + +* Charles Harris +* Hunter Hogan + +* Joren Hammudoglu +* Maanas Arora +* Matti Picus +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sanjay Kumar Sakamuri Kamalakar + +* Tobias Markus + +* Warren Weckesser +* Zebreus + + +Pull requests merged +==================== + +A total of 23 pull requests were merged for this release. + +* `#29440 `__: MAINT: Prepare 2.3.x for further development. +* `#29446 `__: BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR... +* `#29447 `__: BLD: allow targeting webassembly without emscripten +* `#29460 `__: MAINT: Backport write_release.py +* `#29473 `__: MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 +* `#29500 `__: BUG: Always return a real dtype from linalg.cond (gh-18304) (#29333) +* `#29501 `__: MAINT: Add .file entry to all .s SVML files +* `#29556 `__: BUG: Casting from one timedelta64 to another didn't handle NAT. +* `#29562 `__: BLD: update vendored Meson to 1.8.3 [wheel build] +* `#29563 `__: BUG: Fix metadata not roundtripping when pickling datetime (#29555) +* `#29587 `__: TST: update link and version for Intel SDE download +* `#29593 `__: TYP: add ``sorted`` kwarg to ``unique`` +* `#29672 `__: MAINT: Update pythoncapi-compat from main. +* `#29673 `__: MAINT: Update cibuildwheel. +* `#29674 `__: MAINT: Fix typo in wheels.yml +* `#29683 `__: BUG, BLD: Correct regex for ppc64 VSX3/VSX4 feature detection +* `#29684 `__: TYP: ndarray.fill() takes no keyword arguments +* `#29685 `__: BUG: avoid thread-unsafe refcount check in temp elision +* `#29687 `__: CI: replace comment-hider action in mypy_primer workflow +* `#29689 `__: BLD: Add missing include +* `#29691 `__: BUG: use correct input dtype in flatiter assignment +* `#29700 `__: TYP: fix np.bool method declarations +* `#29701 `__: BUG: Correct ambiguous logic for s390x CPU feature detection From 83402f7a43363e8041b8f297fdd4f76a7fa9fea1 Mon Sep 17 00:00:00 2001 From: Sandeep Gupta Date: Wed, 10 Sep 2025 01:09:26 +0530 Subject: [PATCH 0422/1018] CI: Add native ``ppc64le`` CI job using GitHub Actions (#29212) --- .github/workflows/linux-ppc64le.yml | 52 +++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 .github/workflows/linux-ppc64le.yml diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml new file mode 100644 index 000000000000..223da69a7b8d --- /dev/null +++ b/.github/workflows/linux-ppc64le.yml @@ -0,0 +1,52 @@ +name: Native ppc64le Linux Test + +on: + pull_request: + branches: + - main + - maintenance/** + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + native_ppc64le: + # This job runs only in the main NumPy repository. + # It requires a native ppc64le GHA runner, which is not available on forks. + # For more details, see: https://github.com/numpy/numpy/issues/29125 + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-24.04-ppc64le + name: "Native PPC64LE" + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - name: Install dependencies + run: | + sudo apt update + sudo apt install -y python3 python3-pip python3-dev ninja-build gfortran \ + build-essential libopenblas-dev liblapack-dev pkg-config + pip install --upgrade pip + pip install -r requirements/build_requirements.txt + pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout spin + echo "/home/runner/.local/bin" >> $GITHUB_PATH + + - name: Meson Build + run: | + spin build -- -Dallow-noblas=false + + - name: Meson Log + if: always() + run: cat build/meson-logs/meson-log.txt + + - name: Run Tests + run: | + spin test -- --timeout=60 --durations=10 \ No newline at end of file From dc3e1e776e68cdd9445237ce19b14379bd49b2e4 Mon Sep 17 00:00:00 2001 From: Raghuveer Date: Tue, 9 Sep 2025 21:51:20 -0700 Subject: [PATCH 0423/1018] BUG: enable x86-simd-sort to build on KNL with -mavx512f --- numpy/_core/src/npysort/x86-simd-sort | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index c306ac581a59..fe2b5bf62275 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit c306ac581a59f89585d778254c4ed7197e64ba2d +Subproject commit fe2b5bf62275ba6173c53b6c5b747c362b641d5e From 65343f756c29cadef77ad9ffb2180eca9dd8f7b8 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 10 Sep 2025 13:27:51 +0200 Subject: [PATCH 0424/1018] DOC: update `doc/BRANCH_WALKTHROUGH.rst` for release process changes `pavement.py` was recently removed, and the replacement no longer needs a manual change. The git version no longer depends on `.dev0` tags, so remove that note. However, keep adding those tags, because why not - it's useful to navigate to the start of the development for a release series. --- doc/BRANCH_WALKTHROUGH.rst | 39 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 23 deletions(-) diff --git a/doc/BRANCH_WALKTHROUGH.rst b/doc/BRANCH_WALKTHROUGH.rst index 5767fb6e6a10..3f9db71a0282 100644 --- a/doc/BRANCH_WALKTHROUGH.rst +++ b/doc/BRANCH_WALKTHROUGH.rst @@ -1,6 +1,6 @@ -This guide contains a walkthrough of branching NumPy 1.21.x on Linux. The -commands can be copied into the command line, but be sure to replace 1.21 and -1.22 by the correct versions. It is good practice to make ``.mailmap`` as +This guide contains a walkthrough of branching NumPy 2.3.x on Linux. The +commands can be copied into the command line, but be sure to replace 2.3 and +2.4 by the correct versions. It is good practice to make ``.mailmap`` as current as possible before making the branch, that may take several weeks. This should be read together with the @@ -12,14 +12,13 @@ Branching Make the branch --------------- -This is only needed when starting a new maintenance branch. Because -NumPy now depends on tags to determine the version, the start of a new -development cycle in the main branch needs an annotated tag. That is done +This is only needed when starting a new maintenance branch. The start of a new +development cycle in the main branch should get an annotated tag. That is done as follows:: $ git checkout main $ git pull upstream main - $ git commit --allow-empty -m'REL: Begin NumPy 1.22.0 development' + $ git commit --allow-empty -m'REL: Begin NumPy 2.4.0 development' $ git push upstream HEAD If the push fails because new PRs have been merged, do:: @@ -28,20 +27,20 @@ If the push fails because new PRs have been merged, do:: and repeat the push. Once the push succeeds, tag it:: - $ git tag -a -s v1.22.0.dev0 -m'Begin NumPy 1.22.0 development' - $ git push upstream v1.22.0.dev0 + $ git tag -a -s v2.4.0.dev0 -m'Begin NumPy 2.4.0 development' + $ git push upstream v2.4.0.dev0 then make the new branch and push it:: - $ git branch maintenance/1.21.x HEAD^ - $ git push upstream maintenance/1.21.x + $ git branch maintenance/2.3.x HEAD^ + $ git push upstream maintenance/2.3.x Prepare the main branch for further development ----------------------------------------------- -Make a PR branch to prepare main for further development:: +Make a PR branch to prepare ``main`` for further development:: - $ git checkout -b 'prepare-main-for-1.22.0-development' v1.22.0.dev0 + $ git checkout -b 'prepare-main-for-2.4.0-development' v2.4.0.dev0 Delete the release note fragments:: @@ -49,18 +48,12 @@ Delete the release note fragments:: Create the new release notes skeleton and add to index:: - $ cp doc/source/release/template.rst doc/source/release/1.22.0-notes.rst - $ gvim doc/source/release/1.22.0-notes.rst # put the correct version - $ git add doc/source/release/1.22.0-notes.rst + $ cp doc/source/release/template.rst doc/source/release/2.4.0-notes.rst + $ gvim doc/source/release/2.4.0-notes.rst # put the correct version + $ git add doc/source/release/2.4.0-notes.rst $ gvim doc/source/release.rst # add new notes to notes index $ git add doc/source/release.rst -Update ``pavement.py`` and update the ``RELEASE_NOTES`` variable to point to -the new notes:: - - $ gvim pavement.py - $ git add pavement.py - Update ``cversions.txt`` to add current release. There should be no new hash to worry about at this early point, just add a comment following previous practice:: @@ -71,7 +64,7 @@ practice:: Check your work, commit it, and push:: $ git status # check work - $ git commit -m'REL: Prepare main for NumPy 1.22.0 development' + $ git commit -m'REL: Prepare main for NumPy 2.4.0 development' $ git push origin HEAD Now make a pull request. From ed2eab6f408009cbf4282842711a376c435ccd77 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 10 Sep 2025 14:24:49 +0200 Subject: [PATCH 0425/1018] DOC: update `doc/HOWTO_RELEASE.rst` for release process changes --- doc/HOWTO_RELEASE.rst | 188 ++++++++++++++++-------------------------- 1 file changed, 70 insertions(+), 118 deletions(-) diff --git a/doc/HOWTO_RELEASE.rst b/doc/HOWTO_RELEASE.rst index 53c3904703a4..d756a75a6bce 100644 --- a/doc/HOWTO_RELEASE.rst +++ b/doc/HOWTO_RELEASE.rst @@ -4,142 +4,113 @@ releases for NumPy. Current build and release info ============================== -Useful info can be found in the following locations: +Useful info can be found in `building-from-source` in the docs as well as in +these three files: -* **Source tree** - - - `INSTALL.rst `_ - - `pavement.py `_ - -* **NumPy docs** - - - `HOWTO_RELEASE.rst `_ - - `RELEASE_WALKTHROUGH.rst `_ - - `BRANCH_WALKTHROUGH.rst `_ +- `HOWTO_RELEASE.rst `_ +- `RELEASE_WALKTHROUGH.rst `_ +- `BRANCH_WALKTHROUGH.rst `_ Supported platforms and versions ================================ -:ref:`NEP 29 ` outlines which Python versions -are supported; For the first half of 2020, this will be Python >= 3.6. We test -NumPy against all these versions every time we merge code to main. Binary -installers may be available for a subset of these versions (see below). +:ref:`NEP 29 ` outlines which Python versions are supported *at a +minimum*. We usually decide to keep support for a given Python version slightly +longer than that minimum, to avoid giving other projects issues - this is at +the discretion of the release manager. -* **OS X** +* **macOS** - OS X versions >= 10.9 are supported, for Python version support see - :ref:`NEP 29 `. We build binary wheels for OSX that are compatible with - Python.org Python, system Python, homebrew and macports - see this - `OSX wheel building summary `_ - for details. + We aim to support the same set of macOS versions as are supported by + Python.org and `cibuildwheel`_ for any given Python version. + We build binary wheels for macOS that are compatible with common Python + installation methods, e.g., from python.org, ``python-build-standalone`` (the + ones ``uv`` installs), system Python, conda-forge, Homebrew and MacPorts. * **Windows** We build 32- and 64-bit wheels on Windows. Windows 7, 8 and 10 are supported. - We build NumPy using the `mingw-w64 toolchain`_, `cibuildwheels`_ and GitHub - actions. + We build NumPy using the most convenient compilers, which are (as of Aug + 2025) MSVC for x86/x86-64 and Clang-cl for arm64, `cibuildwheel`_ and GitHub + Actions. -.. _cibuildwheels: https://cibuildwheel.readthedocs.io/en/stable/ +.. _cibuildwheel: https://cibuildwheel.readthedocs.io/en/stable/ * **Linux** - We build and ship `manylinux_2_28 `_ - wheels for NumPy. Many Linux distributions include their own binary builds - of NumPy. + We build and ship ``manylinux`` and ``musllinux`` wheels for x86-64 and + aarch64 platforms on PyPI. Wheels for 32-bit platforms are not currently + provided. We aim to support the lowest non-EOL versions, and upgrade roughly + in sync with `cibuildwheel`_. See + `pypa/manylinux `__ and + `this distro compatibility table `__ + for more details. -* **BSD / Solaris** +* **BSD / Solaris / AIX** - No binaries are provided, but successful builds on Solaris and BSD have been - reported. + No binary wheels are provided on PyPI, however we expect building from source + on these platforms to work fine. -Tool chain +Toolchains ========== -We build all our wheels on cloud infrastructure - so this list of compilers is -for information and debugging builds locally. See the ``.travis.yml`` script -in the `numpy wheels`_ repo for an outdated source of the build recipes using -multibuild. - -.. _numpy wheels : https://github.com/MacPython/numpy-wheels - -Compilers ---------- -The same gcc version is used as the one with which Python itself is built on -each platform. At the moment this means: - -- OS X builds on travis currently use `clang`. It appears that binary wheels - for OSX >= 10.6 can be safely built from the travis-ci OSX 10.9 VMs - when building against the Python from the Python.org installers; -- Windows builds use the `mingw-w64 toolchain`_; -- Manylinux2014 wheels use the gcc provided on the Manylinux docker images. - -You will need Cython for building the binaries. Cython compiles the ``.pyx`` -files in the NumPy distribution to ``.c`` files. - -.. _mingw-w64 toolchain : https://mingwpy.github.io +For building wheels, we use the following toolchains: + +- Linux: we use the default compilers in the ``manylinux``/``musllinux`` Docker + images, which is usually a relatively recent GCC version. +- macOS: we use the Apple Clang compilers and XCode version installed on the + GitHub Actions runner image. +- Windows: for x86 and x86-64 we use the default MSVC and Visual Studio + toolchain installed on the relevant GitHub actions runner image. Note that in + the past it has sometimes been necessary to use an older toolchain to avoid + causing problems through the static ``libnpymath`` library for SciPy - please + inspect the `numpy/numpy-release `__ + code and CI logs in case the exact version numbers need to be determined. + +For building from source, minimum compiler versions are tracked in the top-level +``meson.build`` file. OpenBLAS -------- -All the wheels link to a version of OpenBLAS_ supplied via the openblas-libs_ repo. -The shared object (or DLL) is shipped with in the wheel, renamed to prevent name +Most wheels link to a version of OpenBLAS_ supplied via the openblas-libs_ repo. +The shared object (or DLL) is shipped within the wheel, renamed to prevent name collisions with other OpenBLAS shared objects that may exist in the filesystem. -.. _OpenBLAS: https://github.com/xianyi/OpenBLAS +.. _OpenBLAS: https://github.com/OpenMathLib/OpenBLAS .. _openblas-libs: https://github.com/MacPython/openblas-libs - -Building source archives and wheels ------------------------------------ -The NumPy wheels and sdist are now built using cibuildwheel with -github actions. - - Building docs ------------- -We are no longer building ``PDF`` files. All that will be needed is - -- virtualenv (pip). - -The other requirements will be filled automatically during the documentation -build process. - +We are no longer building ``pdf`` files. The requirements for building the +``html`` docs are no different than for regular development. See the README of +the `numpy/doc `__ repository and the step by +step instructions in ``doc/RELEASE_WALKTHROUGH.rst`` for more details. Uploading to PyPI ----------------- -The only application needed for uploading is - -- twine (pip). - -You will also need a PyPI token, which is best kept on a keyring. See the -twine keyring_ documentation for how to do that. - -.. _keyring: https://twine.readthedocs.io/en/stable/#keyring-support - +Creating a release on PyPI and uploading wheels and sdist is automated in CI +and uses `PyPI's trusted publishing `__. +See the README in the `numpy/numpy-release `__ +repository and the step by step instructions in ``doc/RELEASE_WALKTHROUGH.rst`` +for more details. Generating author/PR lists -------------------------- You will need a personal access token ``_ -so that scripts can access the github NumPy repository. - -- gitpython (pip) -- pygithub (pip) +so that scripts can access the GitHub NumPy repository. With that token, the +author/PR changelog content can be generated by running ``spin changelog``. It +may require a few extra packages, like ``gitpython`` and ``pygithub``. What is released ================ -* **Wheels** - We currently support Python 3.10-3.13 on Windows, OSX, and Linux. - - * Windows: 32-bit and 64-bit wheels built using Github actions; - * OSX: x64_86 and arm64 OSX wheels built using Github actions; - * Linux: x64_86 and aarch64 Manylinux2014 wheels built using Github actions. - -* **Other** - Release notes and changelog +On PyPI we release wheels for a number of platforms (as discussed higher up), +and an sdist. -* **Source distribution** - We build source releases in the .tar.gz format. +On GitHub Releases we release the same sdist (because the source archives which +are autogenerated by GitHub itself aren't complete), as well as the release +notes and changelog. Release process @@ -147,30 +118,11 @@ Release process Agree on a release schedule --------------------------- -A typical release schedule is one beta, two release candidates and a final -release. It's best to discuss the timing on the mailing list first, in order -for people to get their commits in on time, get doc wiki edits merged, etc. -After a date is set, create a new maintenance/x.y.z branch, add new empty -release notes for the next version in the main branch and update the Trac -Milestones. - - -Make sure current branch builds a package correctly ---------------------------------------------------- -The CI builds wheels when a PR header begins with ``REL``. Your last -PR before releasing should be so marked and all the tests should pass. -You can also do:: - - git clean -fxdq - python setup.py bdist_wheel - python setup.py sdist - -For details of the build process itself, it is best to read the -Step-by-Step Directions below. - -.. note:: The following steps are repeated for the beta(s), release - candidates(s) and the final release. - +A typical release schedule for a feature release is two release candidates and +a final release. It's best to discuss the timing on the mailing list first, in +order for people to get their commits in on time. After a date is set, create a +new ``maintenance/x.y.z`` branch, add new empty release notes for the next version +in the main branch and update the Milestones on the issue tracker. Check deprecations ------------------ From 60f8c87482518e5facab0dee3499ce5456a8e3bd Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 10 Sep 2025 15:44:44 +0200 Subject: [PATCH 0426/1018] DOC: update `doc/RELEASE_WALKTHROUGH.rst` for release process changes --- doc/RELEASE_WALKTHROUGH.rst | 228 +++++++++++++++--------------------- 1 file changed, 95 insertions(+), 133 deletions(-) diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index a3787ae95ee5..82f8d2c9e9f0 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -1,8 +1,9 @@ -This is a walkthrough of the NumPy 2.1.0 release on Linux, modified for -building with GitHub Actions and cibuildwheels and uploading to the -`anaconda.org staging repository for NumPy `_. -The commands can be copied into the command line, but be sure to replace 2.1.0 -by the correct version. This should be read together with the +This is a walkthrough of the NumPy 2.4.0 release on Linux, which will be the +first feature release using the `numpy/numpy-release +`__ repository. + +The commands can be copied into the command line, but be sure to replace 2.4.0 +with the correct version. This should be read together with the :ref:`general release guide `. Facility preparation @@ -26,29 +27,24 @@ Prior to release Add/drop Python versions ------------------------ -When adding or dropping Python versions, two files need to be edited: - -- .github/workflows/wheels.yml # for github cibuildwheel -- pyproject.toml # for classifier and minimum version check. - +When adding or dropping Python versions, multiple config and CI files need to +be edited in addition to changing the minimum version in ``pyproject.toml``. Make these changes in an ordinary PR against main and backport if necessary. -Add ``[wheel build]`` at the end of the title line of the commit summary so -that wheel builds will be run to test the changes. We currently release wheels -for new Python versions after the first Python rc once manylinux and -cibuildwheel support it. +We currently release wheels for new Python versions after the first Python RC +once manylinux and cibuildwheel support that new Python version. Backport pull requests ---------------------- Changes that have been marked for this release must be backported to the -maintenance/2.1.x branch. +maintenance/2.4.x branch. -Update 2.1.0 milestones +Update 2.4.0 milestones ----------------------- -Look at the issues/prs with 2.1.0 milestones and either push them off to a +Look at the issues/prs with 2.4.0 milestones and either push them off to a later version, or maybe remove the milestone. You may need to add a milestone. @@ -63,14 +59,13 @@ Four documents usually need to be updated or created for the release PR: - The ``pyproject.toml`` file These changes should be made in an ordinary PR against the maintenance branch. -The commit heading should contain a ``[wheel build]`` directive to test if the -wheels build. Other small, miscellaneous fixes may be part of this PR. The -commit message might be something like:: +Other small, miscellaneous fixes may be part of this PR. The commit message +might be something like:: - REL: Prepare for the NumPy 2.1.0 release [wheel build] + REL: Prepare for the NumPy 2.4.0 release - - Create 2.1.0-changelog.rst. - - Update 2.1.0-notes.rst. + - Create 2.4.0-changelog.rst. + - Update 2.4.0-notes.rst. - Update .mailmap. - Update pyproject.toml @@ -97,12 +92,12 @@ Generate the changelog The changelog is generated using the changelog tool:: - $ spin changelog $GITHUB v2.0.0..maintenance/2.1.x > doc/changelog/2.1.0-changelog.rst + $ spin changelog $GITHUB v2.3.0..maintenance/2.4.x > doc/changelog/2.4.0-changelog.rst where ``GITHUB`` contains your GitHub access token. The text will need to be checked for non-standard contributor names. It is also a good idea to remove any links that may be present in the PR titles as they don't translate well to -markdown, replace them with monospaced text. The non-standard contributor names +Markdown, replace them with monospaced text. The non-standard contributor names should be fixed by updating the ``.mailmap`` file, which is a lot of work. It is best to make several trial runs before reaching this point and ping the malefactors using a GitHub issue to get the needed information. @@ -116,7 +111,7 @@ run ``spin notes``, which will incorporate the snippets into the ``doc/source/release/notes-towncrier.rst`` file and delete the snippets:: $ spin notes - $ gvim doc/source/release/notes-towncrier.rst doc/source/release/2.1.0-notes.rst + $ gvim doc/source/release/notes-towncrier.rst doc/source/release/2.4.0-notes.rst Once the ``notes-towncrier`` contents has been incorporated into release note the ``.. include:: notes-towncrier.rst`` directive can be removed. The notes @@ -142,8 +137,8 @@ isn't already present. Checkout the branch for the release, make sure it is up to date, and clean the repository:: - $ git checkout maintenance/2.1.x - $ git pull upstream maintenance/2.1.x + $ git checkout maintenance/2.4.x + $ git pull upstream maintenance/2.4.x $ git submodule update $ git clean -xdfq @@ -154,100 +149,71 @@ Sanity check:: Tag the release and push the tag. This requires write permission for the numpy repository:: - $ git tag -a -s v2.1.0 -m"NumPy 2.1.0 release" - $ git push upstream v2.1.0 + $ git tag -a -s v2.4.0 -m"NumPy 2.4.0 release" + $ git push upstream v2.4.0 If you need to delete the tag due to error:: - $ git tag -d v2.1.0 - $ git push --delete upstream v2.1.0 - - -2. Build wheels ---------------- - -Tagging the build at the beginning of this process will trigger a wheel build -via cibuildwheel and upload wheels and an sdist to the staging repo. All wheels -are currently built on GitHub actions and take about 1 1/4 hours to build. - -If you wish to manually trigger a wheel build, you can do so: - -- On GitHub actions -> `Wheel builder`_ there is a "Run workflow" button, click - on it and choose the tag to build - -If some wheel builds fail for unrelated reasons, you can re-run them: - -- On GitHub actions select `Wheel builder`_ click on the task that contains - the build you want to re-run, it will have the tag as the branch. On the - upper right will be a re-run button, hit it and select "re-run failed" - -If some wheels fail to upload to anaconda, you can select those builds in the -`Wheel builder`_ and manually download the build artifact. This is a temporary -workaround, but sometimes the quickest way to get a release out. - -.. _`staging repository`: https://anaconda.org/multibuild-wheels-staging/numpy/files -.. _`Wheel builder`: https://github.com/numpy/numpy/actions/workflows/wheels.yml - - -3. Download wheels ------------------- + $ git tag -d v2.4.0 + $ git push --delete upstream v2.4.0 -When the wheels have all been successfully built and staged, download them from the -Anaconda staging directory using the ``tools/download-wheels.py`` script:: - $ cd ../numpy - $ mkdir -p release/installers - $ python3 tools/download-wheels.py 2.1.0 - - -4. Generate the README files ----------------------------- - -This needs to be done after all installers are downloaded, but before the pavement -file is updated for continued development:: - - $ python write_release 2.1.0 +2. Build wheels and sdist +------------------------- +Create a ``maintenance/2.4.x`` branch in the ``numpy-release`` repository, +and open a PR changing the ``SOURCE_REF_TO_BUILD`` identifier at the top of +``.github/workflows/wheels.yml`` to ``v2.4.0``. That will do a full set of +wheel builds on the PR, if everything looks good merge the PR. -5. Upload to PyPI ------------------ +All wheels are currently built in that repository on GitHub Actions, they take +about 1 hour to build. -Upload to PyPI using ``twine``:: +If you wish to manually trigger a wheel build, you can do so: in your browser, +go to `numpy-release/actions/workflows/wheels.yml `__ +and click on the "Run workflow" button, then choose the tag to build. If some +wheel builds fail for unrelated reasons, you can re-run them as normal +in the GitHub Actions UI with "re-run failed". - $ cd ../numpy - $ twine upload release/installers/*.whl - $ twine upload release/installers/*.gz # Upload last. +Once you are ready to publish a release to PyPI, use that same "Run workflow" +button and choose ``pypi`` in the *environment* dropdown. All wheels and the +sdist will build and be ready to release to PyPI after manual inspection that +everything passed. E.g., the number of artifacts is correct, and the wheel +filenames and sizes look as expected. If desired, you can also download an +artifact for local unzipping and inspection. You will get an email notification +as well with a "Review pending deployments" link. Once you're ready, press the +button to start the uploads to PyPI, which will complete the release. -The source file should be uploaded last to avoid synchronization problems that -might occur if pip users access the files while this is in process, causing pip -to build from source rather than downloading a binary wheel. PyPI only allows a -single source distribution, here we have chosen the gz version. If the -uploading breaks because of network related reasons, you can try re-running the -commands, possibly after a fix. Twine will now handle the error generated by -PyPI when the same file is uploaded twice. +3. Upload files to GitHub Releases +---------------------------------- -6. Upload files to GitHub -------------------------- - -Go to ``_, there should be a ``v2.1.0 +Go to ``_, there should be a ``v2.4.0 tag``, click on it and hit the edit button for that tag and update the title to -'v2.1.0 (). There are two ways to add files, using an editable text -window and as binary uploads. Start by editing the ``release/README.md`` that -is translated from the rst version using pandoc. Things that will need fixing: -PR lines from the changelog, if included, are wrapped and need unwrapping, -links should be changed to monospaced text. Then copy the contents to the -clipboard and paste them into the text window. It may take several tries to get -it look right. Then - -- Upload ``release/installers/numpy-2.1.0.tar.gz`` as a binary file. +"v2.4.0 ()". There are two ways to add files, using an editable text +window and as binary uploads. + +Start by running ``spin notes 2.4.0`` and then edit the ``release/README.md`` +that is translated from the rst version using pandoc. Things that will need +fixing: PR lines from the changelog, if included, are wrapped and need +unwrapping, links should be changed to monospaced text. Then copy the contents +to the clipboard and paste them into the text window. It may take several tries +to get it look right. Then + +- Download the sdist (``numpy-2.4.0.tar.gz``) from PyPI upload it to GitHub as + a binary file. - Upload ``release/README.rst`` as a binary file. -- Upload ``doc/changelog/2.1.0-changelog.rst`` as a binary file. +- Upload ``doc/changelog/2.4.0-changelog.rst`` as a binary file. - Check the pre-release button if this is a pre-releases. -- Hit the ``{Publish,Update} release`` button at the bottom. +- Hit the ``Publish release`` button at the bottom. + +.. note:: + Please ensure that all 3 files are uploaded are present and the + release text is complete. Releases are configured to be immutable, so + mistakes can't (easily) be fixed anymore. -7. Upload documents to numpy.org (skip for prereleases) +4. Upload documents to numpy.org (skip for prereleases) ------------------------------------------------------- .. note:: You will need a GitHub personal access token to push the update. @@ -257,7 +223,7 @@ and most patch releases. ``make merge-doc`` clones the ``numpy/doc`` repo into ``doc/build/merge`` and updates it with the new documentation:: $ git clean -xdfq - $ git co v2.1.0 + $ git co v2.4.0 $ rm -rf doc/build # want version to be current $ python -m spin docs merge-doc --build $ pushd doc/build/merge @@ -284,45 +250,41 @@ from ``numpy.org``:: Update the stable link and update:: - $ ln -sfn 2.1 stable + $ ln -sfn 2.4 stable $ ls -l # check the link Once everything seems satisfactory, update, commit and upload the changes:: - $ git commit -a -m"Add documentation for v2.1.0" + $ git commit -a -m"Add documentation for v2.4.0" $ git push git@github.com:numpy/doc $ popd -8. Reset the maintenance branch into a development state (skip for prereleases) +5. Reset the maintenance branch into a development state (skip for prereleases) ------------------------------------------------------------------------------- Create release notes for next release and edit them to set the version. These notes will be a skeleton and have little content:: - $ git checkout -b begin-2.1.1 maintenance/2.1.x - $ cp doc/source/release/template.rst doc/source/release/2.1.1-notes.rst - $ gvim doc/source/release/2.1.1-notes.rst - $ git add doc/source/release/2.1.1-notes.rst - -Add new release notes to the documentation release list and update the -``RELEASE_NOTES`` variable in ``pavement.py``:: + $ git checkout -b begin-2.4.1 maintenance/2.4.x + $ cp doc/source/release/template.rst doc/source/release/2.4.1-notes.rst + $ gvim doc/source/release/2.4.1-notes.rst + $ git add doc/source/release/2.4.1-notes.rst - $ gvim doc/source/release.rst pavement.py - -Update the ``version`` in ``pyproject.toml``:: +Add new release notes to the documentation release list. Then update the +``version`` in ``pyproject.toml``:: $ gvim pyproject.toml Commit the result:: - $ git commit -a -m"MAINT: Prepare 2.1.x for further development" + $ git commit -a -m"MAINT: Prepare 2.4.x for further development" $ git push origin HEAD Go to GitHub and make a PR. It should be merged quickly. -9. Announce the release on numpy.org (skip for prereleases) +6. Announce the release on numpy.org (skip for prereleases) ----------------------------------------------------------- This assumes that you have forked ``_:: @@ -330,7 +292,7 @@ This assumes that you have forked ``_:: $ cd ../numpy.org $ git checkout main $ git pull upstream main - $ git checkout -b announce-numpy-2.1.0 + $ git checkout -b announce-numpy-2.4.0 $ gvim content/en/news.md - For all releases, go to the bottom of the page and add a one line link. Look @@ -342,35 +304,35 @@ This assumes that you have forked ``_:: commit and push:: - $ git commit -a -m"announce the NumPy 2.1.0 release" + $ git commit -a -m"announce the NumPy 2.4.0 release" $ git push origin HEAD Go to GitHub and make a PR. -10. Announce to mailing lists ------------------------------ +7. Announce to mailing lists +---------------------------- -The release should be announced on the numpy-discussion, scipy-devel, and +The release should be announced on the numpy-discussion and python-announce-list mailing lists. Look at previous announcements for the basic template. The contributor and PR lists are the same as generated for the release notes above. If you crosspost, make sure that python-announce-list is BCC so that replies will not be sent to that list. -11. Post-release update main (skip for prereleases) ---------------------------------------------------- +8. Post-release update main (skip for prereleases) +-------------------------------------------------- Checkout main and forward port the documentation changes. You may also want to update these notes if procedures have changed or improved:: - $ git checkout -b post-2.1.0-release-update main - $ git checkout maintenance/2.1.x doc/source/release/2.1.0-notes.rst - $ git checkout maintenance/2.1.x doc/changelog/2.1.0-changelog.rst - $ git checkout maintenance/2.1.x .mailmap # only if updated for release. + $ git checkout -b post-2.4.0-release-update main + $ git checkout maintenance/2.4.x doc/source/release/2.4.0-notes.rst + $ git checkout maintenance/2.4.x doc/changelog/2.4.0-changelog.rst + $ git checkout maintenance/2.4.x .mailmap # only if updated for release. $ gvim doc/source/release.rst # Add link to new notes $ git status # check status before commit - $ git commit -a -m"MAINT: Update main after 2.1.0 release." + $ git commit -a -m"MAINT: Update main after 2.4.0 release." $ git push origin HEAD Go to GitHub and make a PR. From 8b5500e5ee9ff31cd3960b0c4c72875017ff0bee Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Sep 2025 18:04:03 +0200 Subject: [PATCH 0427/1018] TYP: fix ``np.floating`` method declarations --- numpy/__init__.pyi | 164 ++++++++++++++++-- numpy/_typing/_callable.pyi | 55 +----- numpy/typing/tests/data/reveal/arithmetic.pyi | 38 ++-- numpy/typing/tests/data/reveal/mod.pyi | 15 +- 4 files changed, 172 insertions(+), 100 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ed8fa9a93eda..be21a68a4892 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -134,9 +134,6 @@ from numpy._typing import ( ) from numpy._typing._callable import ( - _FloatOp, - _FloatMod, - _FloatDivMod, _ComparisonOpLT, _ComparisonOpLE, _ComparisonOpGT, @@ -4753,22 +4750,151 @@ class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]) class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... - __add__: _FloatOp[_NBit1] - __radd__: _FloatOp[_NBit1] - __sub__: _FloatOp[_NBit1] - __rsub__: _FloatOp[_NBit1] - __mul__: _FloatOp[_NBit1] - __rmul__: _FloatOp[_NBit1] - __truediv__: _FloatOp[_NBit1] - __rtruediv__: _FloatOp[_NBit1] - __floordiv__: _FloatOp[_NBit1] - __rfloordiv__: _FloatOp[_NBit1] - __pow__: _FloatOp[_NBit1] - __rpow__: _FloatOp[_NBit1] - __mod__: _FloatMod[_NBit1] - __rmod__: _FloatMod[_NBit1] - __divmod__: _FloatDivMod[_NBit1] - __rdivmod__: _FloatDivMod[_NBit1] + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: integer | floating, /) -> floating: ... + @overload + def __add__(self, other: float, /) -> Self: ... + @overload + def __add__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: integer | floating, /) -> floating: ... + @overload + def __radd__(self, other: float, /) -> Self: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: integer | floating, /) -> floating: ... + @overload + def __sub__(self, other: float, /) -> Self: ... + @overload + def __sub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: integer | floating, /) -> floating: ... + @overload + def __rsub__(self, other: float, /) -> Self: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: integer | floating, /) -> floating: ... + @overload + def __mul__(self, other: float, /) -> Self: ... + @overload + def __mul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmul__(self, other: float, /) -> Self: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | float16 | uint8 | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | float16 | uint8 | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __truediv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __truediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __truediv__(self, other: float, /) -> Self: ... + @overload + def __truediv__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rtruediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rtruediv__(self, other: float, /) -> Self: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating: ... + + # modular division ops + + @overload + def __floordiv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __floordiv__(self, other: float, /) -> Self: ... + + @overload + def __rfloordiv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rfloordiv__(self, other: float, /) -> Self: ... + + @overload + def __mod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: integer | floating, /) -> floating: ... + @overload + def __mod__(self, other: float, /) -> Self: ... + + @overload + def __rmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmod__(self, other: float, /) -> Self: ... + + @overload + def __divmod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[Self]: ... + + @overload + def __rdivmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[Self]: ... # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes def is_integer(self, /) -> builtins.bool: ... diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 96fa4000889f..a4d4f3452a12 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -8,69 +8,16 @@ See the `Mypy documentation`_ on protocols for more details. """ -from typing import Any, Protocol, TypeAlias, TypeVar, final, overload, type_check_only +from typing import Any, Protocol, TypeVar, final, overload, type_check_only import numpy as np -from numpy import complex128, complexfloating, float64, floating, integer -from . import NBitBase from ._array_like import NDArray -from ._nbit import _NBitInt from ._nested_sequence import _NestedSequence -_T = TypeVar("_T") _T1_contra = TypeVar("_T1_contra", contravariant=True) _T2_contra = TypeVar("_T2_contra", contravariant=True) -_2Tuple: TypeAlias = tuple[_T, _T] - -_NBit1 = TypeVar("_NBit1", bound=NBitBase) -_NBit2 = TypeVar("_NBit2", bound=NBitBase) - -@type_check_only -class _FloatOp(Protocol[_NBit1]): - @overload - def __call__(self, other: int, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: complex, / - ) -> complexfloating[_NBit1, _NBit1] | complex128: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> floating[_NBit1] | floating[_NBit2]: ... - -@type_check_only -class _FloatMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> floating[_NBit1] | floating[_NBit2]: ... - -class _FloatDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ... - @overload - def __call__( - self, other: int, / - ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBitInt]]: ... - @overload - def __call__( - self, other: float, / - ) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBit2]]: ... - @final @type_check_only class _SupportsLT(Protocol): diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 1c564df5de70..ac4114abadd4 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -486,7 +486,7 @@ assert_type(c8 / b_, np.complex64) # Complex -assert_type(c16 + f16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) +assert_type(c16 + f16, np.complexfloating) assert_type(c16 + c16, np.complex128) assert_type(c16 + f8, np.complex128) assert_type(c16 + i8, np.complex128) @@ -499,12 +499,12 @@ assert_type(c16 + c, np.complex128) assert_type(c16 + f, np.complex128) assert_type(c16 + AR_f, npt.NDArray[np.complex128]) -assert_type(f16 + c16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) +assert_type(f16 + c16, np.complexfloating) assert_type(c16 + c16, np.complex128) assert_type(f8 + c16, np.complex128) assert_type(i8 + c16, np.complex128) assert_type(c8 + c16, np.complex128 | np.complex64) -assert_type(f4 + c16, np.complex128 | np.complex64) +assert_type(f4 + c16, np.complexfloating) assert_type(i4 + c16, np.complex128) assert_type(b_ + c16, np.complex128) assert_type(b + c16, np.complex128) @@ -512,10 +512,10 @@ assert_type(c + c16, np.complex128) assert_type(f + c16, np.complex128) assert_type(AR_f + c16, npt.NDArray[np.complex128]) -assert_type(c8 + f16, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_128Bit, _128Bit]) +assert_type(c8 + f16, np.complex64 | np.complexfloating[_128Bit, _128Bit]) assert_type(c8 + c16, np.complex64 | np.complex128) assert_type(c8 + f8, np.complex64 | np.complex128) -assert_type(c8 + i8, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_64Bit, _64Bit]) +assert_type(c8 + i8, np.complex64 | np.complexfloating[_64Bit, _64Bit]) assert_type(c8 + c8, np.complex64) assert_type(c8 + f4, np.complex64) assert_type(c8 + i4, np.complex64) @@ -540,7 +540,7 @@ assert_type(AR_f + c8, npt.NDArray[np.complexfloating]) # Float -assert_type(f8 + f16, np.float64 | np.floating[_128Bit]) +assert_type(f8 + f16, np.floating) assert_type(f8 + f8, np.float64) assert_type(f8 + i8, np.float64) assert_type(f8 + f4, np.float64) @@ -551,10 +551,10 @@ assert_type(f8 + c, np.float64 | np.complex128) assert_type(f8 + f, np.float64) assert_type(f8 + AR_f, npt.NDArray[np.float64]) -assert_type(f16 + f8, np.floating[_128Bit] | np.float64) +assert_type(f16 + f8, np.floating) assert_type(f8 + f8, np.float64) assert_type(i8 + f8, np.float64) -assert_type(f4 + f8, np.float32 | np.float64) +assert_type(f4 + f8, np.floating) assert_type(i4 + f8, np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) @@ -562,26 +562,26 @@ assert_type(c + f8, np.complex128 | np.float64) assert_type(f + f8, np.float64) assert_type(AR_f + f8, npt.NDArray[np.float64]) -assert_type(f4 + f16, np.float32 | np.floating[_128Bit]) -assert_type(f4 + f8, np.float32 | np.float64) -assert_type(f4 + i8, np.float32 | np.floating[_64Bit]) +assert_type(f4 + f16, np.floating) +assert_type(f4 + f8, np.floating) +assert_type(f4 + i8, np.floating) assert_type(f4 + f4, np.float32) -assert_type(f4 + i4, np.float32) +assert_type(f4 + i4, np.floating) assert_type(f4 + b_, np.float32) assert_type(f4 + b, np.float32) -assert_type(f4 + c, np.complex64 | np.complex128) -assert_type(f4 + f, np.float32 | np.float64) +assert_type(f4 + c, np.complexfloating) +assert_type(f4 + f, np.float32) assert_type(f4 + AR_f, npt.NDArray[np.float64]) -assert_type(f16 + f4, np.floating[_128Bit] | np.float32) +assert_type(f16 + f4, np.floating) assert_type(f8 + f4, np.float64) -assert_type(i8 + f4, np.floating[_32Bit] | np.floating[_64Bit]) +assert_type(i8 + f4, np.floating) assert_type(f4 + f4, np.float32) -assert_type(i4 + f4, np.float32) +assert_type(i4 + f4, np.floating) assert_type(b_ + f4, np.float32) assert_type(b + f4, np.float32) -assert_type(c + f4, np.complex64 | np.complex128) -assert_type(f + f4, np.float64 | np.float32) +assert_type(c + f4, np.complexfloating) +assert_type(f + f4, np.float32) assert_type(AR_f + f4, npt.NDArray[np.float64]) # Int diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index 338a9a1ff729..ef07dc0c8c8a 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -3,7 +3,6 @@ from typing import Literal as L, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _64Bit f8: np.float64 i8: np.int64 @@ -113,7 +112,7 @@ assert_type(i8 % f8, np.float64) assert_type(i4 % i8, np.signedinteger) assert_type(i4 % f8, np.float64) assert_type(i4 % i4, np.int32) -assert_type(i4 % f4, np.float32) +assert_type(i4 % f4, np.floating) assert_type(i8 % AR_b, npt.NDArray[np.int64]) assert_type(divmod(i8, b), tuple[np.int64, np.int64]) @@ -122,9 +121,9 @@ assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) # workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(i8.__divmod__(f), tuple[np.float64, np.float64]) assert_type(i8.__divmod__(f8), tuple[np.float64, np.float64]) -assert_type(divmod(i8, f4), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) +assert_type(divmod(i8, f4), tuple[np.floating, np.floating]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) -assert_type(divmod(i4, f4), tuple[np.float32, np.float32]) +assert_type(divmod(i4, f4), tuple[np.floating, np.floating]) assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) assert_type(b % i8, np.int64) @@ -134,7 +133,7 @@ assert_type(f8 % i8, np.float64) assert_type(i8 % i4, np.signedinteger) assert_type(f8 % i4, np.float64) assert_type(i4 % i4, np.int32) -assert_type(f4 % i4, np.float32) +assert_type(f4 % i4, np.floating) assert_type(AR_b % i8, npt.NDArray[np.int64]) assert_type(divmod(b, i8), tuple[np.int64, np.int64]) @@ -144,15 +143,15 @@ assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) assert_type(divmod(i4, i8), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) # workarounds for https://github.com/microsoft/pyright/issues/9663 -assert_type(f4.__divmod__(i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) -assert_type(f4.__divmod__(i4), tuple[np.float32, np.float32]) +assert_type(f4.__divmod__(i8), tuple[np.floating, np.floating]) +assert_type(f4.__divmod__(i4), tuple[np.floating, np.floating]) assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) # float assert_type(f8 % b, np.float64) assert_type(f8 % f, np.float64) -assert_type(i8 % f4, np.floating[_64Bit] | np.float32) +assert_type(i8 % f4, np.floating) assert_type(f4 % f4, np.float32) assert_type(f8 % AR_b, npt.NDArray[np.float64]) From 44c7433a5ae5f1091edb18b2f74bccece9347bea Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Sep 2025 18:52:31 +0200 Subject: [PATCH 0428/1018] TYP: fix ``timedelta64`` and ``datetime64`` method declarations --- numpy/__init__.pyi | 95 +++++++++++++++---- numpy/_typing/_callable.pyi | 87 ----------------- numpy/typing/tests/data/reveal/arithmetic.pyi | 2 +- 3 files changed, 77 insertions(+), 107 deletions(-) delete mode 100644 numpy/_typing/_callable.pyi diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index be21a68a4892..a19cd8475c10 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -133,17 +133,6 @@ from numpy._typing import ( _GUFunc_Nin2_Nout1, ) -from numpy._typing._callable import ( - _ComparisonOpLT, - _ComparisonOpLE, - _ComparisonOpGT, - _ComparisonOpGE, - _SupportsLT, - _SupportsLE, - _SupportsGT, - _SupportsGE, -) - # NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform from numpy._typing._extended_precision import ( float96, @@ -1042,6 +1031,26 @@ class _FormerAttrsDict(TypedDict): ### Protocols (for internal use only) +@final +@type_check_only +class _SupportsLT(Protocol): + def __lt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsLE(Protocol): + def __le__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGT(Protocol): + def __gt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGE(Protocol): + def __ge__(self, other: Any, /) -> Any: ... + @type_check_only class _SupportsFileMethods(SupportsFlush, Protocol): # Protocol for representing file-like-objects accepted by `ndarray.tofile` and `fromfile` @@ -5365,10 +5374,35 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __rfloordiv__(self, a: timedelta64, /) -> int64: ... - __lt__: _ComparisonOpLT[_TD64Like_co, _ArrayLikeTD64_co] - __le__: _ComparisonOpLE[_TD64Like_co, _ArrayLikeTD64_co] - __gt__: _ComparisonOpGT[_TD64Like_co, _ArrayLikeTD64_co] - __ge__: _ComparisonOpGE[_TD64Like_co, _ArrayLikeTD64_co] + # comparison ops + + @overload + def __lt__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __gt__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __ge__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @property @@ -5465,10 +5499,33 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __rsub__(self, x: datetime64, /) -> timedelta64: ... - __lt__: _ComparisonOpLT[datetime64, _ArrayLikeDT64_co] - __le__: _ComparisonOpLE[datetime64, _ArrayLikeDT64_co] - __gt__: _ComparisonOpGT[datetime64, _ArrayLikeDT64_co] - __ge__: _ComparisonOpGE[datetime64, _ArrayLikeDT64_co] + @overload + def __lt__(self, other: datetime64, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: datetime64, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __gt__(self, other: datetime64, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __ge__(self, other: datetime64, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): @abstractmethod diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi deleted file mode 100644 index a4d4f3452a12..000000000000 --- a/numpy/_typing/_callable.pyi +++ /dev/null @@ -1,87 +0,0 @@ -""" -A module with various ``typing.Protocol`` subclasses that implement -the ``__call__`` magic method. - -See the `Mypy documentation`_ on protocols for more details. - -.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols - -""" - -from typing import Any, Protocol, TypeVar, final, overload, type_check_only - -import numpy as np - -from ._array_like import NDArray -from ._nested_sequence import _NestedSequence - -_T1_contra = TypeVar("_T1_contra", contravariant=True) -_T2_contra = TypeVar("_T2_contra", contravariant=True) - -@final -@type_check_only -class _SupportsLT(Protocol): - def __lt__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _SupportsLE(Protocol): - def __le__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _SupportsGT(Protocol): - def __gt__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _SupportsGE(Protocol): - def __ge__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _ComparisonOpLT(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsGT, /) -> np.bool: ... - -@final -@type_check_only -class _ComparisonOpLE(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsGE], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsGE, /) -> np.bool: ... - -@final -@type_check_only -class _ComparisonOpGT(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsLT], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsLT, /) -> np.bool: ... - -@final -@type_check_only -class _ComparisonOpGE(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsGT, /) -> np.bool: ... diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index ac4114abadd4..491bce43fdae 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -3,7 +3,7 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _32Bit, _64Bit, _128Bit +from numpy._typing import _64Bit, _128Bit b: bool c: complex From 31a84e11810fb8b4b7fea3e512e8e23e8e9d5fca Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Sep 2025 19:01:02 +0200 Subject: [PATCH 0429/1018] TYP: fix ``ndarray.strides`` decorator order --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ed8fa9a93eda..21693f4fd662 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2162,8 +2162,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def shape(self, value: _ShapeLike) -> None: ... @property def strides(self) -> _Shape: ... - @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") @strides.setter + @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") def strides(self, value: _ShapeLike) -> None: ... def byteswap(self, inplace: builtins.bool = ...) -> Self: ... def fill(self, value: Any, /) -> None: ... From dcb8abd45dab5c166ab7d9c5113e3d2f55ef5043 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Sep 2025 17:08:55 +0000 Subject: [PATCH 0430/1018] MAINT: Bump actions/checkout from 4.2.2 to 5.0.0 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.2 to 5.0.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4.2.2...08c6903cd8c0fde910a37f88322edcfb5dd907a8) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/linux-ppc64le.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index 223da69a7b8d..c561c3be4611 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -23,7 +23,7 @@ jobs: runs-on: ubuntu-24.04-ppc64le name: "Native PPC64LE" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true From ccbfe25034310d4d48598ae5b0d2e2f89c8e14bd Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Sep 2025 19:11:21 +0200 Subject: [PATCH 0431/1018] TYP: remove unused ``# type: ignore`` comments in ``__init__.pyi`` --- numpy/__init__.pyi | 58 +++++++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ed8fa9a93eda..f1194e0855ff 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2343,7 +2343,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def dot(self, b: _ScalarLike_co, out: None = ...) -> NDArray[Any]: ... @overload - def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc] + def dot(self, b: ArrayLike, out: None = ...) -> Any: ... @overload def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... @@ -2355,7 +2355,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... @overload - def searchsorted( # type: ignore[misc] + def searchsorted( self, # >= 1D array v: _ScalarLike_co, # 0D array-like side: _SortSide = ..., @@ -2409,7 +2409,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): ) -> _ArrayT: ... @overload - def take( # type: ignore[misc] + def take( self: NDArray[_ScalarT], indices: _IntLike_co, axis: SupportsIndex | None = ..., @@ -2417,7 +2417,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): mode: _ModeKind = ..., ) -> _ScalarT: ... @overload - def take( # type: ignore[misc] + def take( self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., @@ -3045,7 +3045,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload def __mul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload @@ -3087,7 +3087,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload def __rmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload @@ -3215,7 +3215,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __rfloordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... @overload @@ -3294,9 +3294,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... @overload - def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3305,9 +3305,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3316,9 +3316,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3327,9 +3327,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3338,9 +3338,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3349,9 +3349,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3360,9 +3360,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3371,9 +3371,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3382,9 +3382,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3393,9 +3393,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3651,7 +3651,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): ) -> Any: ... @overload - def take( # type: ignore[misc] + def take( self, indices: _IntLike_co, axis: SupportsIndex | None = ..., @@ -3659,7 +3659,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): mode: _ModeKind = ..., ) -> Self: ... @overload - def take( # type: ignore[misc] + def take( self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., From 84a88828a34581857ce5fb7fb0844fa4604ff348 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Sep 2025 19:15:38 +0200 Subject: [PATCH 0432/1018] TYP: ignore ``deprecated`` mypy errors in ``__init__.pyi`` --- numpy/__init__.pyi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index f1194e0855ff..700798001863 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -16,7 +16,7 @@ from numpy.__config__ import show as show_config from numpy._pytesttester import PytestTester from numpy._core._internal import _ctypes -from numpy._typing import ( +from numpy._typing import ( # type: ignore[deprecated] # Arrays ArrayLike, NDArray, @@ -431,7 +431,7 @@ from numpy.lib._arraypad_impl import ( pad, ) -from numpy.lib._arraysetops_impl import ( +from numpy.lib._arraysetops_impl import ( # type: ignore[deprecated] ediff1d, in1d, intersect1d, @@ -446,7 +446,7 @@ from numpy.lib._arraysetops_impl import ( unique_values, ) -from numpy.lib._function_base_impl import ( +from numpy.lib._function_base_impl import ( # type: ignore[deprecated] select, piecewise, trim_zeros, @@ -549,7 +549,7 @@ from numpy.lib._polynomial_impl import ( polyfit, ) -from numpy.lib._shape_base_impl import ( +from numpy.lib._shape_base_impl import ( # type: ignore[deprecated] column_stack, row_stack, dstack, From 7017d097a6785876934e163b42ed6e95d5f24029 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Sep 2025 19:18:57 +0200 Subject: [PATCH 0433/1018] TYP: ignore LSP errors for ``flatiter.__hash__ = None`` --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 700798001863..c04f12d86db7 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1605,7 +1605,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @final class flatiter(Generic[_ArrayT_co]): - __hash__: ClassVar[None] + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @property def base(self) -> _ArrayT_co: ... @property From b3a680080099158e48eea0e35d09bcfbfe6d0da8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Sep 2025 17:07:21 +0000 Subject: [PATCH 0434/1018] MAINT: Bump github/codeql-action from 3.30.2 to 3.30.3 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.2 to 3.30.3. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/d3678e237b9c32a6c9bffb3315c335f976f3549f...192325c86100d080feab897ff886c34abd4c83a3) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 13619ca762c3..6ea330ea2637 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@d3678e237b9c32a6c9bffb3315c335f976f3549f # v3.30.2 + uses: github/codeql-action/init@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@d3678e237b9c32a6c9bffb3315c335f976f3549f # v3.30.2 + uses: github/codeql-action/autobuild@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@d3678e237b9c32a6c9bffb3315c335f976f3549f # v3.30.2 + uses: github/codeql-action/analyze@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 9eb76bdc0525..5d8f1b91cbae 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@d3678e237b9c32a6c9bffb3315c335f976f3549f # v2.1.27 + uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v2.1.27 with: sarif_file: results.sarif From 9f7642d6ac96800366cbb62dff11ef02c069fc08 Mon Sep 17 00:00:00 2001 From: Raghuveer Date: Thu, 11 Sep 2025 10:40:41 -0700 Subject: [PATCH 0435/1018] Mark some functions [[maybe_unused]] --- numpy/_core/src/npysort/x86-simd-sort | 2 +- numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp | 8 -------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index fe2b5bf62275..8f80b71c1326 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit fe2b5bf62275ba6173c53b6c5b747c362b641d5e +Subproject commit 8f80b71c1326b76ce72faf9d86b399e2b81f5bd8 diff --git a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp index 063e713c5256..11509e79c68b 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp @@ -17,11 +17,7 @@ namespace np { namespace qsort_simd { */ template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(Half *arr, npy_intp num, npy_intp kth) { -#if defined(NPY_HAVE_AVX512_SPR) x86simdsortStatic::qselect(reinterpret_cast<_Float16*>(arr), kth, num, true); -#else - avx512_qselect_fp16(reinterpret_cast(arr), kth, num, true, false); -#endif } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint16_t *arr, npy_intp num, npy_intp kth) @@ -39,11 +35,7 @@ template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int16_t *arr, npy_intp num, npy_ */ template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, npy_intp size) { -#if defined(NPY_HAVE_AVX512_SPR) x86simdsortStatic::qsort(reinterpret_cast<_Float16*>(arr), size, true); -#else - avx512_qsort_fp16(reinterpret_cast(arr), size, true, false); -#endif } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t *arr, npy_intp size) { From cb207265fd03b32ffa5e004d00cb2358d6504113 Mon Sep 17 00:00:00 2001 From: Raghuveer Date: Thu, 11 Sep 2025 12:13:35 -0700 Subject: [PATCH 0436/1018] Revert "Mark some functions [[maybe_unused]]" This reverts commit 9f7642d6ac96800366cbb62dff11ef02c069fc08. --- numpy/_core/src/npysort/x86-simd-sort | 2 +- numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index 8f80b71c1326..fe2b5bf62275 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit 8f80b71c1326b76ce72faf9d86b399e2b81f5bd8 +Subproject commit fe2b5bf62275ba6173c53b6c5b747c362b641d5e diff --git a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp index 11509e79c68b..063e713c5256 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp @@ -17,7 +17,11 @@ namespace np { namespace qsort_simd { */ template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(Half *arr, npy_intp num, npy_intp kth) { +#if defined(NPY_HAVE_AVX512_SPR) x86simdsortStatic::qselect(reinterpret_cast<_Float16*>(arr), kth, num, true); +#else + avx512_qselect_fp16(reinterpret_cast(arr), kth, num, true, false); +#endif } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint16_t *arr, npy_intp num, npy_intp kth) @@ -35,7 +39,11 @@ template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int16_t *arr, npy_intp num, npy_ */ template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, npy_intp size) { +#if defined(NPY_HAVE_AVX512_SPR) x86simdsortStatic::qsort(reinterpret_cast<_Float16*>(arr), size, true); +#else + avx512_qsort_fp16(reinterpret_cast(arr), size, true, false); +#endif } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t *arr, npy_intp size) { From f9583306b74a11b09fc7cb016b5cb9429692a43a Mon Sep 17 00:00:00 2001 From: Raghuveer Date: Thu, 11 Sep 2025 12:14:28 -0700 Subject: [PATCH 0437/1018] x86-simd-sort: Mark some functions [[maybe_unused]] --- numpy/_core/src/npysort/x86-simd-sort | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index fe2b5bf62275..6a7a01da4b0d 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit fe2b5bf62275ba6173c53b6c5b747c362b641d5e +Subproject commit 6a7a01da4b0dfde108aa626a2364c954e2c50fe1 From b84208a68567d38f8605dfe396f25ba5a72a2039 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Sep 2025 13:23:44 -0600 Subject: [PATCH 0438/1018] TST: Simplify fixture that is equivalent to a function call --- numpy/_core/tests/string_testing.py | 10 ++++++++++ numpy/_core/tests/test_multithreading.py | 14 +++++--------- numpy/_core/tests/test_stringdtype.py | 8 +++++--- numpy/conftest.py | 8 -------- 4 files changed, 20 insertions(+), 20 deletions(-) create mode 100644 numpy/_core/tests/string_testing.py diff --git a/numpy/_core/tests/string_testing.py b/numpy/_core/tests/string_testing.py new file mode 100644 index 000000000000..9022b33bc41a --- /dev/null +++ b/numpy/_core/tests/string_testing.py @@ -0,0 +1,10 @@ +import string +import numpy as np + +def random_string_list(): + chars = list(string.ascii_letters + string.digits) + chars = np.array(chars, dtype="U1") + ret = np.random.choice(chars, size=100 * 10, replace=True) + return ret.view("U100") + + diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index b81c989ae5c7..17199ec75f6e 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -1,11 +1,11 @@ import concurrent.futures -import string import threading import pytest import numpy as np from numpy._core import _rational_tests +from numpy._core.tests.string_testing import random_string_list from numpy.testing import IS_64BIT, IS_WASM from numpy.testing._private.utils import run_threaded @@ -218,16 +218,12 @@ def func(arr): assert arr.dtype is dt -def test_stringdtype_multithreaded_access_and_mutation( - dtype, random_string_list): +def test_stringdtype_multithreaded_access_and_mutation(dtype): # this test uses an RNG and may crash or cause deadlocks if there is a # threading bug rng = np.random.default_rng(0x4D3D3D3) - chars = list(string.ascii_letters + string.digits) - chars = np.array(chars, dtype="U1") - ret = rng.choice(chars, size=100 * 10, replace=True) - random_string_list = ret.view("U100") + string_list = random_string_list() def func(arr): rnd = rng.random() @@ -247,10 +243,10 @@ def func(arr): else: np.multiply(arr, np.int64(2), out=arr) else: - arr[:] = random_string_list + arr[:] = string_list with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: - arr = np.array(random_string_list, dtype=dtype) + arr = np.array(string_list, dtype=dtype) futures = [tpe.submit(func, arr) for _ in range(500)] for f in futures: diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 5d2364bf8cfc..817c7d5846b6 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -9,6 +9,7 @@ import numpy as np from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype, pd_NA +from numpy._core.tests.string_testing import random_string_list from numpy.dtypes import StringDType from numpy.testing import IS_PYPY, assert_array_equal @@ -266,12 +267,13 @@ def test_bytes_casts(self, dtype, strings): sarr.astype("S20") -def test_additional_unicode_cast(random_string_list, dtype): - arr = np.array(random_string_list, dtype=dtype) +def test_additional_unicode_cast(dtype): + string_list = random_string_list() + arr = np.array(string_list, dtype=dtype) # test that this short-circuits correctly assert_array_equal(arr, arr.astype(arr.dtype)) # tests the casts via the comparison promoter - assert_array_equal(arr, arr.astype(random_string_list.dtype)) + assert_array_equal(arr, arr.astype(string_list.dtype)) def test_insert_scalar(dtype, string_list): diff --git a/numpy/conftest.py b/numpy/conftest.py index f411588748e0..5df9ac73baf4 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -230,14 +230,6 @@ def warnings_errors_and_rng(test=None): ] -@pytest.fixture -def random_string_list(): - chars = list(string.ascii_letters + string.digits) - chars = np.array(chars, dtype="U1") - ret = np.random.choice(chars, size=100 * 10, replace=True) - return ret.view("U100") - - @pytest.fixture(params=[True, False]) def coerce(request): return request.param From c416b8ee4219b481eadd40b76c9c5915c4c5c849 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Sep 2025 13:27:21 -0600 Subject: [PATCH 0439/1018] MAINT: delete unused variables in unary logical dispatch --- numpy/_core/src/umath/loops_logical.dispatch.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/src/umath/loops_logical.dispatch.cpp b/numpy/_core/src/umath/loops_logical.dispatch.cpp index a6f174218bd1..c05584f467aa 100644 --- a/numpy/_core/src/umath/loops_logical.dispatch.cpp +++ b/numpy/_core/src/umath/loops_logical.dispatch.cpp @@ -108,7 +108,6 @@ struct UnaryLogicalTraits; template<> struct UnaryLogicalTraits { - static constexpr bool is_not = true; static constexpr auto scalar_op = std::equal_to{}; #if NPY_HWY @@ -121,7 +120,6 @@ struct UnaryLogicalTraits { template<> struct UnaryLogicalTraits { - static constexpr bool is_not = false; static constexpr auto scalar_op = std::not_equal_to{}; #if NPY_HWY From 965a8ccb9b14e74822cb2f21c846743c4d7cef33 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Sep 2025 15:07:47 -0600 Subject: [PATCH 0440/1018] TST: move fixtures used by stringdtype tests to test_stringdtype.py --- numpy/_core/tests/_natype.py | 8 ---- numpy/_core/tests/string_testing.py | 10 ----- numpy/_core/tests/test_multithreading.py | 8 ++-- numpy/_core/tests/test_stringdtype.py | 48 ++++++++++++++++++++++-- numpy/conftest.py | 21 ----------- 5 files changed, 48 insertions(+), 47 deletions(-) delete mode 100644 numpy/_core/tests/string_testing.py diff --git a/numpy/_core/tests/_natype.py b/numpy/_core/tests/_natype.py index 1c2175b35933..767d4d3832ab 100644 --- a/numpy/_core/tests/_natype.py +++ b/numpy/_core/tests/_natype.py @@ -195,11 +195,3 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): pd_NA = NAType() - - -def get_stringdtype_dtype(na_object, coerce=True): - # explicit is check for pd_NA because != with pd_NA returns pd_NA - if na_object is pd_NA or na_object != "unset": - return np.dtypes.StringDType(na_object=na_object, coerce=coerce) - else: - return np.dtypes.StringDType(coerce=coerce) diff --git a/numpy/_core/tests/string_testing.py b/numpy/_core/tests/string_testing.py deleted file mode 100644 index 9022b33bc41a..000000000000 --- a/numpy/_core/tests/string_testing.py +++ /dev/null @@ -1,10 +0,0 @@ -import string -import numpy as np - -def random_string_list(): - chars = list(string.ascii_letters + string.digits) - chars = np.array(chars, dtype="U1") - ret = np.random.choice(chars, size=100 * 10, replace=True) - return ret.view("U100") - - diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 17199ec75f6e..6563abde8ff4 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -5,7 +5,7 @@ import numpy as np from numpy._core import _rational_tests -from numpy._core.tests.string_testing import random_string_list +from numpy._core.tests.test_stringdtype import random_unicode_string_list from numpy.testing import IS_64BIT, IS_WASM from numpy.testing._private.utils import run_threaded @@ -218,12 +218,12 @@ def func(arr): assert arr.dtype is dt -def test_stringdtype_multithreaded_access_and_mutation(dtype): +def test_stringdtype_multithreaded_access_and_mutation(): # this test uses an RNG and may crash or cause deadlocks if there is a # threading bug rng = np.random.default_rng(0x4D3D3D3) - string_list = random_string_list() + string_list = random_unicode_string_list() def func(arr): rnd = rng.random() @@ -246,7 +246,7 @@ def func(arr): arr[:] = string_list with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: - arr = np.array(string_list, dtype=dtype) + arr = np.array(string_list, dtype="T") futures = [tpe.submit(func, arr) for _ in range(500)] for f in futures: diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 817c7d5846b6..196931f0ef10 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -3,25 +3,63 @@ import os import pickle import sys +import string import tempfile import pytest import numpy as np -from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype, pd_NA -from numpy._core.tests.string_testing import random_string_list +from numpy._core.tests._natype import pd_NA from numpy.dtypes import StringDType from numpy.testing import IS_PYPY, assert_array_equal +def random_unicode_string_list(): + """Returns an array of 10 100-character strings containing random text""" + chars = list(string.ascii_letters + string.digits) + chars = np.array(chars, dtype="U1") + ret = np.random.choice(chars, size=100 * 10, replace=True) + return ret.view("U100") + + +def get_dtype(na_object, coerce=True): + """Helper to work around pd_NA boolean behavior""" + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object is pd_NA or na_object != "unset": + return np.dtypes.StringDType(na_object=na_object, coerce=coerce) + else: + return np.dtypes.StringDType(coerce=coerce) + + +@pytest.fixture(params=[True, False]) +def coerce(request): + """Coerce input to strings or raise an error for non-string input""" + return request.param + + +@pytest.fixture( + params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], + ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], +) +def na_object(request): + """Possible values for the missing data sentinel""" + return request.param + + +@pytest.fixture() +def dtype(na_object, coerce): + """Cartesian project of missing data sentinel and string coercion options""" + return get_dtype(na_object, coerce) + @pytest.fixture def string_list(): + """Mix of short and long strings, some with unicode, some without""" return ["abc", "def", "ghi" * 10, "A¢☃€ 😊" * 100, "Abc" * 1000, "DEF"] -# second copy for cast tests to do a cartesian product over dtypes @pytest.fixture(params=[True, False]) def coerce2(request): + """Second copy of the coerce fixture for tests that need two instances""" return request.param @@ -30,11 +68,13 @@ def coerce2(request): ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], ) def na_object2(request): + """Second copy of the na_object fixture for tests that need two instances""" return request.param @pytest.fixture() def dtype2(na_object2, coerce2): + """Second copy of the dtype fixture for tests that need two instances""" # explicit is check for pd_NA because != with pd_NA returns pd_NA if na_object2 is pd_NA or na_object2 != "unset": return StringDType(na_object=na_object2, coerce=coerce2) @@ -268,7 +308,7 @@ def test_bytes_casts(self, dtype, strings): def test_additional_unicode_cast(dtype): - string_list = random_string_list() + string_list = random_unicode_string_list() arr = np.array(string_list, dtype=dtype) # test that this short-circuits correctly assert_array_equal(arr, arr.astype(arr.dtype)) diff --git a/numpy/conftest.py b/numpy/conftest.py index 5df9ac73baf4..71ece01cc3b5 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -2,7 +2,6 @@ Pytest configuration and fixtures for the Numpy test suite. """ import os -import string import sys import tempfile import warnings @@ -12,9 +11,7 @@ import pytest import numpy -import numpy as np from numpy._core._multiarray_tests import get_fpu_mode -from numpy._core.tests._natype import get_stringdtype_dtype, pd_NA from numpy.testing._private.utils import NOGIL_BUILD try: @@ -228,21 +225,3 @@ def warnings_errors_and_rng(test=None): 'numpy/random/_examples', 'numpy/f2py/_backends/_distutils.py', ] - - -@pytest.fixture(params=[True, False]) -def coerce(request): - return request.param - - -@pytest.fixture( - params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], - ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], -) -def na_object(request): - return request.param - - -@pytest.fixture() -def dtype(na_object, coerce): - return get_stringdtype_dtype(na_object, coerce) From 7445147c273deaff9d82746ab413c05b90b10552 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Sep 2025 15:13:57 -0600 Subject: [PATCH 0441/1018] TST: simplify NAType code adapted from Pandas --- numpy/_core/tests/_natype.py | 55 +----------------------------------- 1 file changed, 1 insertion(+), 54 deletions(-) diff --git a/numpy/_core/tests/_natype.py b/numpy/_core/tests/_natype.py index 767d4d3832ab..539dfd2b36e1 100644 --- a/numpy/_core/tests/_natype.py +++ b/numpy/_core/tests/_natype.py @@ -74,8 +74,7 @@ def __bool__(self): raise TypeError("boolean value of NA is ambiguous") def __hash__(self): - exponent = 31 if is_32bit else 61 - return 2**exponent - 1 + return 2**61 - 1 def __reduce__(self): return "pd_NA" @@ -114,33 +113,6 @@ def __reduce__(self): __abs__ = _create_unary_propagating_op("__abs__") __invert__ = _create_unary_propagating_op("__invert__") - # pow has special - def __pow__(self, other): - if other is pd_NA: - return pd_NA - elif isinstance(other, (numbers.Number, np.bool)): - if other == 0: - # returning positive is correct for +/- 0. - return type(other)(1) - else: - return pd_NA - elif util.is_array(other): - return np.where(other == 0, other.dtype.type(1), pd_NA) - - return NotImplemented - - def __rpow__(self, other): - if other is pd_NA: - return pd_NA - elif isinstance(other, (numbers.Number, np.bool)): - if other == 1: - return other - else: - return pd_NA - elif util.is_array(other): - return np.where(other == 1, other, pd_NA) - return NotImplemented - # Logical ops using Kleene logic def __and__(self, other): @@ -168,30 +140,5 @@ def __xor__(self, other): __rxor__ = __xor__ - __array_priority__ = 1000 - _HANDLED_TYPES = (np.ndarray, numbers.Number, str, np.bool) - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - types = self._HANDLED_TYPES + (NAType,) - for x in inputs: - if not isinstance(x, types): - return NotImplemented - - if method != "__call__": - raise ValueError(f"ufunc method '{method}' not supported for NA") - result = maybe_dispatch_ufunc_to_dunder_op( - self, ufunc, method, *inputs, **kwargs - ) - if result is NotImplemented: - # For a NumPy ufunc that's not a binop, like np.logaddexp - index = next(i for i, x in enumerate(inputs) if x is pd_NA) - result = np.broadcast_arrays(*inputs)[index] - if result.ndim == 0: - result = result.item() - if ufunc.nout > 1: - result = (pd_NA,) * ufunc.nout - - return result - pd_NA = NAType() From bb914390981a524fa2485c33d56e2b69d28ceae8 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Sep 2025 15:14:57 -0600 Subject: [PATCH 0442/1018] MAINT: run linter --- numpy/_core/tests/test_stringdtype.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 196931f0ef10..7a253c981c4d 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -2,8 +2,8 @@ import itertools import os import pickle -import sys import string +import sys import tempfile import pytest From ef9fcc952905ba0c7636295ff413ee118bf5a0ee Mon Sep 17 00:00:00 2001 From: kibitzing Date: Fri, 12 Sep 2025 12:06:13 +0900 Subject: [PATCH 0443/1018] BUG: remove object dtype check from max_depth validation in PyArray_FromAny_int --- numpy/_core/src/multiarray/ctors.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 38da6f314848..5bcf1cdf7ba3 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1593,7 +1593,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, npy_free_coercion_cache(cache); goto cleanup; } - if (ndim > max_depth && (in_DType == NULL || in_DType->type_num != NPY_OBJECT)) { + if (ndim > max_depth) { PyErr_SetString(PyExc_ValueError, "object too deep for desired array"); npy_free_coercion_cache(cache); From eef7de6705144bbd271c4afb2455478433a97734 Mon Sep 17 00:00:00 2001 From: kibitzing Date: Fri, 12 Sep 2025 12:12:56 +0900 Subject: [PATCH 0444/1018] TST: add test for "too deep" error when ndmax < ndim --- numpy/_core/tests/test_multiarray.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 2d8587ad5ac1..943e9642ec3c 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -1354,6 +1354,12 @@ def test_ndmax_greather_than_NPY_MAXDIMS(self): with pytest.raises(ValueError, match="ndmax must be in the range"): np.array(data, ndmax=65) + def test_ndmax_less_than_ndim(self): + # np.array input bypasses recursive inference, allowing ndim > ndmax validation + data = np.array([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(ValueError, match="object too deep for desired array"): + np.array(data, ndmax=1, dtype=object) + class TestStructured: def test_subarray_field_access(self): From d82354a9cdb941163cc0320c913e66288d8d4720 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Sep 2025 02:09:00 +0200 Subject: [PATCH 0445/1018] MAINT: bump `mypy` to `1.18.1` --- environment.yml | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index dd7fa1a83e5b..61eed13bfd67 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.17.1 + - mypy=1.18.1 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 44f73a844591..95396afaf5a0 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -12,7 +12,7 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.17.1; platform_python_implementation != "PyPy" +mypy==1.18.1; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer From 1ccd4805b8502ee52cc3c94fc9567b0cc7418909 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Sep 2025 02:12:50 +0200 Subject: [PATCH 0446/1018] TYP: fix mypy errors --- numpy/typing/tests/data/reveal/array_constructors.pyi | 8 ++++---- numpy/typing/tests/data/reveal/testing.pyi | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index e95e85093f6a..e86d96c884c0 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -227,19 +227,19 @@ assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(np.atleast_3d(A, A, A), tuple[npt.NDArray[np.float64], ...]) -assert_type(np.vstack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.vstack([A, A]), npt.NDArray[np.float64]) assert_type(np.vstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.vstack([A, C]), npt.NDArray[Any]) assert_type(np.vstack([C, C]), npt.NDArray[Any]) -assert_type(np.hstack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.hstack([A, A]), npt.NDArray[np.float64]) assert_type(np.hstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.stack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.stack([A, A]), npt.NDArray[np.float64]) assert_type(np.stack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.stack([A, C]), npt.NDArray[Any]) assert_type(np.stack([C, C]), npt.NDArray[Any]) -assert_type(np.stack([A, A], axis=0), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.stack([A, A], axis=0), npt.NDArray[np.float64]) assert_type(np.stack([A, A], out=B), SubClass[np.float64]) assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 1533cd27955b..34fbc5feeb41 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -173,7 +173,7 @@ assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2), npt.NDArray assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32), npt.NDArray[Any]) assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) +assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] def func4(a: int, b: str) -> bool: ... From eb737242ead791065d400f124d60ad1d67bdcacb Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Sep 2025 02:19:32 +0200 Subject: [PATCH 0447/1018] STY: obey `ruff`-sama --- numpy/typing/tests/data/reveal/array_constructors.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index e86d96c884c0..2d8b5fa3fd17 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -235,7 +235,7 @@ assert_type(np.vstack([C, C]), npt.NDArray[Any]) assert_type(np.hstack([A, A]), npt.NDArray[np.float64]) assert_type(np.hstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.stack([A, A]), npt.NDArray[np.float64]) +assert_type(np.stack([A, A]), npt.NDArray[np.float64]) assert_type(np.stack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.stack([A, C]), npt.NDArray[Any]) assert_type(np.stack([C, C]), npt.NDArray[Any]) From 822665c9c3633e97fa599311b9b4dcc50bae2527 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Mon, 12 May 2025 14:38:03 +0300 Subject: [PATCH 0448/1018] ENH: Modulate dispatched x86 CPU features **IMPORTANT NOTE**: The default setting for `cpu-baseline`` on x86 has been raised to `x86-64-v2` microarchitecture. This can be changed to `cpu-baseline=none` during build time to support older CPUs, though manual SIMD optimizations for pre-2009 processors are no longer supported. This patch reorganizes CPU build options by replacing individual x86 features with microarchitecture levels. This change aligns with the Google Highway requirements and common Linux distribution practices. This patch: - Removes all individual x86 features and replaces them with three microarchitecture levels (`X86_V2`, `X86_V3`, `X86_V4`) commonly used by Linux distributions - Raises the baseline to microarchitecture level 2 (replacing `SSE3`) since all known x86 CPUs since 2009 support it. This improves performance and reduces binary size - Updates documentation to to reflect these changes and to fit the current meson build system. - Corrects the behavior of the `-` operator, which now excludes successor features that imply the excluded feature - Adds redirection via meson for removed feature names to avoid breaking builds - Removes compiler compatibility workarounds, so features like AVX512 without full mask operations will be considered unsupported rather than providing fallbacks Detailed CPU features changes: - Removes individual features (`SSE`, `SSE2`, `SSE3`, `SSSE3`, `SSE4_1`, `SSE4_2`, `POPCNT`) which are now part of the new group `X86_V2` - Removes AMD legacy features (`XOP`, `FMA4`) - Removes Xeon Phi support (`AVX512_KNL`, `AVX512_KNM`) which Intel has discontinued - Removes individual features (`AVX`, `AVX2`, `FMA3`, `F16C`) which are now part of the new group `X86_V3` - Removes individual features `AVX512F`, `AVX512CD` as a result of dropping Xeon Phi support - Renames group `AVX512_SKX` to `x86_v4` to align with microarchitecture level naming - Removes groups `AVX512_CLX` and `AVX512_CNL` (features available via `AVX512_ICL`) - Updates `AVX512_ICL` to include features (`VAES`, `GFNI`, `VPCLMULQDQ`) for alignment with Highway New Feature Group Hierarchy: ``` Name | Implies | Includes --------------|-------------|----------------------------------------------------------- X86_V2 | | SSE SSE2 SSE3 SSSE3 SSE4_1 SSE4_2 POPCNT CX16 LAHF X86_V3 | X86_V2 | AVX AVX2 FMA3 BMI BMI2 LZCNT F16C MOVBE X86_V4 | X86_V3 | AVX512F AVX512CD AVX512VL AVX512BW AVX512DQ AVX512_ICL | X86_V4 | AVX512VBMI AVX512VBMI2 AVX512VNNI AVX512BITALG | | AVX512VPOPCNTDQ AVX512IFMA VAES GFNI VPCLMULQDQ AVX512_SPR | AVX512_ICL | AVX512FP16 ``` These groups correspond to CPU generations: - `X86_V2`: x86-64-v2 microarchitectures (CPUs since 2009) - `X86_V3`: x86-64-v3 microarchitectures (CPUs since 2015) - `X86_V4`: x86-64-v4 microarchitectures (AVX-512 capable CPUs) - `AVX512_ICL`: Intel Ice Lake and similar CPUs - `AVX512_SPR`: Intel Sapphire Rapids and newer CPUs Note: On 32-bit x86, `cx16` is excluded from `X86_V2`. --- .github/workflows/linux_simd.yml | 6 +- doc/release/upcoming_changes/28896.change.rst | 56 ++ doc/source/reference/simd/build-options.rst | 602 ++++++++++-------- doc/source/reference/simd/gen_features.py | 196 ------ .../simd/generated_tables/compilers-diff.inc | 35 - .../simd/generated_tables/cpu_features.inc | 109 ---- doc/source/reference/simd/log_example.txt | 141 ++-- meson.options | 7 +- meson_cpu/meson.build | 50 +- meson_cpu/x86/meson.build | 262 ++------ meson_cpu/x86/test_x86_v2.c | 69 ++ meson_cpu/x86/test_x86_v3.c | 66 ++ meson_cpu/x86/test_x86_v4.c | 88 +++ numpy/_core/meson.build | 40 +- numpy/_core/src/common/npy_cpu_features.c | 169 +++-- numpy/_core/src/common/npy_cpu_features.h | 21 +- numpy/_core/tests/test_cpu_dispatcher.py | 2 +- numpy/_core/tests/test_cpu_features.py | 41 +- 18 files changed, 977 insertions(+), 983 deletions(-) create mode 100644 doc/release/upcoming_changes/28896.change.rst delete mode 100644 doc/source/reference/simd/gen_features.py delete mode 100644 doc/source/reference/simd/generated_tables/compilers-diff.inc delete mode 100644 doc/source/reference/simd/generated_tables/cpu_features.inc create mode 100644 meson_cpu/x86/test_x86_v2.c create mode 100644 meson_cpu/x86/test_x86_v3.c create mode 100644 meson_cpu/x86/test_x86_v4.c diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 5bd1eab7f797..c35093d63a14 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -162,12 +162,12 @@ jobs: ] - [ "without avx512", - "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C,AVX2,FMA3", + "-Dallow-noblas=true -Dcpu-dispatch=max-x86_v4", "3.11" ] - [ "without avx512/avx2/fma3", - "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C", + "-Dallow-noblas=true -Dcpu-dispatch=max-x86_v3", "3.11" ] @@ -212,7 +212,7 @@ jobs: python -m pip install pytest pytest-xdist hypothesis typing_extensions - name: Build - run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=avx512_skx -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' + run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=X86_V4 -Dtest-simd='BASELINE,AVX512_ICL,AVX512_SPR' - name: Meson Log if: always() diff --git a/doc/release/upcoming_changes/28896.change.rst b/doc/release/upcoming_changes/28896.change.rst new file mode 100644 index 000000000000..47538b7b22b2 --- /dev/null +++ b/doc/release/upcoming_changes/28896.change.rst @@ -0,0 +1,56 @@ +Modulate dispatched x86 CPU features +------------------------------------ + +**IMPORTANT**: The default setting for `cpu-baseline`` on x86 has been raised to `x86-64-v2` microarchitecture. +This can be changed to none during build time to support older CPUs, +though SIMD optimizations for pre-2009 processors are no longer maintained. + +NumPy has reorganized x86 CPU features into microarchitecture-based groups instead of individual features, +aligning with Linux distribution standards and Google Highway requirements. + +Key changes: +* Replaced individual x86 features with microarchitecture levels: ``X86_V2``, ``X86_V3``, and ``X86_V4`` +* Raised the baseline to ``X86_V2`` +* Improved ``-`` operator behavior to properly exclude successor features that imply the excluded feature +* Added meson redirections for removed feature names to maintain backward compatibility +* Removed compiler compatibility workarounds for partial feature support (e.g., AVX512 without mask operations) +* Removed legacy AMD features (XOP, FMA4) and discontinued Intel Xeon Phi support + +New Feature Group Hierarchy: + +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + - Includes + * - ``X86_V2`` + - + - ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE4_1`` ``SSE4_2`` ``POPCNT`` ``CX16`` ``LAHF`` + * - ``X86_V3`` + - ``X86_V2`` + - ``AVX`` ``AVX2`` ``FMA3`` ``BMI`` ``BMI2`` ``LZCNT`` ``F16C`` ``MOVBE`` + * - ``X86_V4`` + - ``X86_V3`` + - ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` + * - ``AVX512_ICL`` + - ``X86_V4`` + - ``AVX512VBMI`` ``AVX512VBMI2`` ``AVX512VNNI`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` ``AVX512IFMA`` ``VAES`` ``GFNI`` ``VPCLMULQDQ`` + * - ``AVX512_SPR`` + - ``AVX512_ICL`` + - ``AVX512FP16`` + + +These groups correspond to CPU generations: + +- ``X86_V2``: x86-64-v2 microarchitectures (CPUs since 2009) +- ``X86_V3``: x86-64-v3 microarchitectures (CPUs since 2015) +- ``X86_V4``: x86-64-v4 microarchitectures (AVX-512 capable CPUs) +- ``AVX512_ICL``: Intel Ice Lake and similar CPUs +- ``AVX512_SPR``: Intel Sapphire Rapids and newer CPUs + +.. note:: + On 32-bit x86, ``cx16`` is excluded from ``X86_V2``. + +Documentation has been updated with details on using these new feature groups with the current meson build system. diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 1f105e27077d..4bb925b05532 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -1,227 +1,387 @@ ***************** -CPU build options +CPU Build Options ***************** -Description ------------ +Overview +-------- -The following options are mainly used to change the default behavior of optimizations -that target certain CPU features: +NumPy provides configuration options to optimize performance based on CPU capabilities. +These options allow you to specify which CPU features to support, balancing performance, compatibility, and binary size. +This document explains how to use these options effectively across various CPU architectures. -- ``cpu-baseline``: minimal set of required CPU features. - Default value is ``min`` which provides the minimum CPU features that can - safely run on a wide range of platforms within the processor family. +Key Configuration Options +------------------------- - .. note:: +NumPy uses several build options to control CPU optimizations: - During the runtime, NumPy modules will fail to load if any of specified features - are not supported by the target CPU (raises Python runtime error). +- ``cpu-baseline``: The minimum set of CPU features required to run the compiled NumPy. + + * Default: ``min`` (provides compatibility across a wide range of platforms) + * If your target CPU doesn't support all specified baseline features, NumPy will fail to load with a Python runtime error - ``cpu-baseline-detect``: controls detection of CPU baseline based on compiler flags. Default value is ``auto`` that enables detection if ``-march=`` or a similar compiler flag is used. The other possible values are ``enabled`` and ``disabled`` to respective enable or disable it unconditionally. -- ``cpu-dispatch``: dispatched set of additional CPU features. - Default value is ``max -xop -fma4`` which enables all CPU - features, except for AMD legacy features (in case of X86). +- ``cpu-dispatch``: Additional CPU features for which optimized code paths will be generated. + + * Default: ``max`` (enables all available optimizations) + * At runtime, NumPy will automatically select the fastest available code path based on your CPU's capabilities - .. note:: +- ``disable-optimization``: Completely disables all CPU optimizations. + + * Default: ``false`` (optimizations are enabled) + * When set to ``true``, disables all CPU optimized code including dispatch, SIMD, and loop unrolling + * Useful for debugging, testing, or in environments where optimization causes issues - During the runtime, NumPy modules will skip any specified features - that are not available in the target CPU. +These options are specified at build time via meson-python arguments:: -These options are accessible at build time by passing setup arguments to meson-python -via the build frontend (e.g., ``pip`` or ``build``). -They accept a set of :ref:`CPU features ` -or groups of features that gather several features or -:ref:`special options ` that -perform a series of procedures. + pip install . -Csetup-args=-Dcpu-baseline="min" -Csetup-args=-Dcpu-dispatch="max" + # or through spin + spin build -- -Dcpu-baseline="min" -Dcpu-dispatch="max" -To customize CPU/build options:: +``cpu-baseline`` and ``cpu-dispatch`` can be set to specific :ref:`CPU groups, features`, or :ref:`special options ` +that perform specific actions. The following sections describe these options in detail. - pip install . -Csetup-args=-Dcpu-baseline="avx2 fma3" -Csetup-args=-Dcpu-dispatch="max" +Common Usage Scenarios +---------------------- -Quick start ------------ +Building for Local Use Only +~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In general, the default settings tend to not impose certain CPU features that -may not be available on some older processors. Raising the ceiling of the -baseline features will often improve performance and may also reduce -binary size. +When building for your machine only and not planning to distribute:: + python -m build --wheel -Csetup-args=-Dcpu-baseline="native" -Csetup-args=-Dcpu-dispatch="none" -The following are the most common scenarios that may require changing -the default settings: +This automatically detects and uses all CPU features available on your machine. +.. note:: + A fatal error will be raised if ``native`` isn't supported by the host platform. -I am building NumPy for my local use -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Excluding Specific Features +~~~~~~~~~~~~~~~~~~~~~~~~~~~ -And I do not intend to export the build to other users or target a -different CPU than what the host has. +You may want to exclude certain CPU features from the dispatched features:: -Set ``native`` for baseline, or manually specify the CPU features in case of option -``native`` isn't supported by your platform:: + # For x86-64: exclude all AVX-512 features + python -m build --wheel -Csetup-args=-Dcpu-dispatch="max -X86_V4" - python -m build --wheel -Csetup-args=-Dcpu-baseline="native" + # For ARM64: exclude SVE + python -m build --wheel -Csetup-args=-Dcpu-dispatch="max -SVE" -Building NumPy with extra CPU features isn't necessary for this case, -since all supported features are already defined within the baseline features:: +.. note:: + Excluding a feature will also exclude any successor features that are + implied by the excluded feature. For example, excluding ``X86_V4`` will + exclude ``AVX512_ICL`` and ``AVX512_SPR`` as well. - python -m build --wheel -Csetup-args=-Dcpu-baseline="native" \ - -Csetup-args=-Dcpu-dispatch="none" +Targeting Older CPUs +~~~~~~~~~~~~~~~~~~~~ -.. note:: +On ``x86-64``, by default the baseline is set to ``min`` which maps to ``X86_V2``. +This unsuitable for older CPUs (before 2009) or old virtual machines. +To address this, set the baseline to ``none``:: - A fatal error will be raised if ``native`` isn't supported by the host platform. + python -m build --wheel -Csetup-args=-Dcpu-baseline="none" -I do not want to support the old processors of the x86 architecture -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This will create a build that is compatible with all x86 CPUs, but +without any manual optimizations or SIMD code paths for the baseline. +The build will rely only on dispatched code paths for optimization. -Since most of the CPUs nowadays support at least ``AVX``, ``F16C`` features, you can use:: +Targeting Newer CPUs +~~~~~~~~~~~~~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-baseline="avx f16c" +Raising the baseline improves performance for two main reasons: + +1. Dispatched kernels don't cover all code paths +2. A higher baseline leads to smaller binary size as the compiler won't generate code paths for excluded dispatched features + +For CPUs from 2015 and newer, setting the baseline to ``X86_V3`` may be suitable:: + + python -m build --wheel -Csetup-args=-Dcpu-baseline="min+X86_V3" + +.. _opt-supported-features: + +Supported CPU Features By Architecture +-------------------------------------- + +NumPy supports optimized code paths for multiple CPU architectures. Below are the supported feature groups for each architecture. +The name of the feature group can be used in the build options ``cpu-baseline`` and ``cpu-dispatch``. + +X86 +~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + - Includes + * - ``X86_V2`` + - + - ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE4_1`` ``SSE4_2`` ``POPCNT`` ``CX16`` ``LAHF`` + * - ``X86_V3`` + - ``X86_V2`` + - ``AVX`` ``AVX2`` ``FMA3`` ``BMI`` ``BMI2`` ``LZCNT`` ``F16C`` ``MOVBE`` + * - ``X86_V4`` + - ``X86_V3`` + - ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` + * - ``AVX512_ICL`` + - ``X86_V4`` + - ``AVX512VBMI`` ``AVX512VBMI2`` ``AVX512VNNI`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` ``AVX512IFMA`` ``VAES`` ``GFNI`` ``VPCLMULQDQ`` + * - ``AVX512_SPR`` + - ``AVX512_ICL`` + - ``AVX512FP16`` + +These groups correspond to CPU generations: + +- ``X86_V2``: x86-64-v2 microarchitectures (CPUs since 2009) +- ``X86_V3``: x86-64-v3 microarchitectures (CPUs since 2015) +- ``X86_V4``: x86-64-v4 microarchitectures (AVX-512 capable CPUs) +- ``AVX512_ICL``: Intel Ice Lake and similar CPUs +- ``AVX512_SPR``: Intel Sapphire Rapids and newer CPUs .. note:: + On 32-bit x86, ``cx16`` is excluded from ``X86_V2``. + +On IBM/POWER big-endian +~~~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``VSX`` + - + * - ``VSX2`` + - ``VSX`` + * - ``VSX3`` + - ``VSX`` ``VSX2`` + * - ``VSX4`` + - ``VSX`` ``VSX2`` ``VSX3`` + +On IBM/POWER little-endian +~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``VSX`` + - ``VSX2`` + * - ``VSX2`` + - ``VSX`` + * - ``VSX3`` + - ``VSX`` ``VSX2`` + * - ``VSX4`` + - ``VSX`` ``VSX2`` ``VSX3`` + +On ARMv7/A32 +~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``NEON`` + - + * - ``NEON_FP16`` + - ``NEON`` + * - ``NEON_VFPV4`` + - ``NEON`` ``NEON_FP16`` + * - ``ASIMD`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` + * - ``ASIMDHP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDDP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDFHM`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` + +On ARMv8/A64 +~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``NEON`` + - ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``NEON_FP16`` + - ``NEON`` ``NEON_VFPV4`` ``ASIMD`` + * - ``NEON_VFPV4`` + - ``NEON`` ``NEON_FP16`` ``ASIMD`` + * - ``ASIMD`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` + * - ``ASIMDHP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDDP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDFHM`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` + +On IBM/ZSYSTEM(S390X) +~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``VX`` + - + * - ``VXE`` + - ``VX`` + * - ``VXE2`` + - ``VX`` ``VXE`` - ``cpu-baseline`` force combine all implied features, so there's no need - to add SSE features. +.. _opt-special-options: +Special Options +--------------- -I'm facing the same case above but with ppc64 architecture -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Beyond specific feature names, you can use these special values: -Then raise the ceiling of the baseline features to Power8:: +``NONE`` +~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-baseline="vsx2" +Enables no features (equivalent to an empty string). -Having issues with AVX512 features? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``NATIVE`` +~~~~~~~~~~ -You may have some reservations about including of ``AVX512`` or -any other CPU feature and you want to exclude from the dispatched features:: +Enables all features supported by the host CPU. - python -m build --wheel -Csetup-args=-Dcpu-dispatch="max -avx512f -avx512cd \ - -avx512_knl -avx512_knm -avx512_skx -avx512_clx -avx512_cnl -avx512_icl -avx512_spr" +``DETECT`` +~~~~~~~~~~ -.. _opt-supported-features: +Detects the features enabled by the compiler. This option is appended by default +to ``cpu-baseline`` if ``-march``, ``-mcpu``, ``-xhost``, or ``/QxHost`` is set in +the environment variable ``CFLAGS``. -Supported features ------------------- +``MIN`` +~~~~~~~ -The names of the features can express one feature or a group of features, -as shown in the following tables supported depend on the lowest interest: +Enables the minimum CPU features for each architecture: -.. note:: +.. list-table:: + :header-rows: 1 + :align: left - The following features may not be supported by all compilers, - also some compilers may produce different set of implied features - when it comes to features like ``AVX512``, ``AVX2``, and ``FMA3``. - See :ref:`opt-platform-differences` for more details. + * - For Arch + - Implies + * - x86 (32-bit) + - ``X86_V2`` + * - x86-64 + - ``X86_V2`` + * - IBM/POWER (big-endian) + - ``NONE`` + * - IBM/POWER (little-endian) + - ``VSX`` ``VSX2`` + * - ARMv7/ARMHF + - ``NONE`` + * - ARMv8/AArch64 + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - IBM/ZSYSTEM(S390X) + - ``NONE`` -.. include:: generated_tables/cpu_features.inc -.. _opt-special-options: +``MAX`` +~~~~~~~ -Special options ---------------- +Enables all features supported by the compiler and platform. -- ``NONE``: enable no features. +Operator Operators (``-``/``+``) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- ``NATIVE``: Enables all CPU features that supported by the host CPU, - this operation is based on the compiler flags (``-march=native``, ``-xHost``, ``/QxHost``) +Remove or add specific features, useful with ``MAX``, ``MIN``, and ``NATIVE``: -- ``MIN``: Enables the minimum CPU features that can safely run on a wide range of platforms: +- Adding a feature (``+``) includes all implied features +- Removing a feature (``-``) excludes all successor features that imply the removed feature - .. table:: - :align: left +<<<<<<< HEAD +- ``cpu-baseline`` will be treated as "native" if compiler native flag + ``-march=native`` or ``-xHost`` or ``/QxHost`` is enabled through environment variable + ``CFLAGS`` and ``cpu-baseline-detect`` is not ``disabled``:: +======= +Examples:: +>>>>>>> f68e178f88 (ENH: Modulate dispatched x86 CPU features) - ====================================== ======================================= - For Arch Implies - ====================================== ======================================= - x86 (32-bit mode) ``SSE`` ``SSE2`` - x86_64 ``SSE`` ``SSE2`` ``SSE3`` - IBM/POWER (big-endian mode) ``NONE`` - IBM/POWER (little-endian mode) ``VSX`` ``VSX2`` - ARMHF ``NONE`` - ARM64 A.K. AARCH64 ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` - ``ASIMD`` - IBM/ZSYSTEM(S390X) ``NONE`` - ====================================== ======================================= + python -m build --wheel -Csetup-args=-Dcpu-dispatch="max-X86_V4" + python -m build --wheel -Csetup-args=-Dcpu-baseline="min+X86_V4" -- ``MAX``: Enables all supported CPU features by the compiler and platform. +Usage And Behaviors +------------------- -- ``Operators-/+``: remove or add features, useful with options ``MAX``, ``MIN`` and ``NATIVE``. +Case Insensitivity +~~~~~~~~~~~~~~~~~~ -Behaviors ---------- +CPU features and options are case-insensitive:: -- CPU features and other options are case-insensitive, for example:: + python -m build --wheel -Csetup-args=-Dcpu-dispatch="X86_v4" - python -m build --wheel -Csetup-args=-Dcpu-dispatch="SSE41 avx2 FMA3" +Mixing Features across Architectures +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The order of the requested optimizations doesn't matter:: +You can mix features from different architectures:: - python -m build --wheel -Csetup-args=-Dcpu-dispatch="SSE41 AVX2 FMA3" - # equivalent to - python -m build --wheel -Csetup-args=-Dcpu-dispatch="FMA3 AVX2 SSE41" + python -m build --wheel -Csetup-args=-Dcpu-baseline="X86_V4 VSX4 SVE" -- Either commas or spaces or '+' can be used as a separator, - for example:: +Order Independence +~~~~~~~~~~~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-dispatch="avx2 avx512f" - # or - python -m build --wheel -Csetup-args=-Dcpu-dispatch=avx2,avx512f - # or - python -m build --wheel -Csetup-args=-Dcpu-dispatch="avx2+avx512f" +The order of specified features doesn't matter:: - all works but arguments should be enclosed in quotes or escaped - by backslash if any spaces are used. + python -m build --wheel -Csetup-args=-Dcpu-dispatch="SVE X86_V4 x86_v3" -- ``cpu-baseline`` combines all implied CPU features, for example:: +Separators +~~~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-baseline=sse42 - # equivalent to - python -m build --wheel -Csetup-args=-Dcpu-baseline="sse sse2 sse3 ssse3 sse41 popcnt sse42" +You can use spaces or commas as separators:: -- ``cpu-baseline`` will be treated as "native" if compiler native flag - ``-march=native`` or ``-xHost`` or ``/QxHost`` is enabled through environment variable - ``CFLAGS`` and ``cpu-baseline-detect`` is not ``disabled``:: + # All of these are equivalent + python -m build --wheel -Csetup-args=-Dcpu-dispatch="X86_V2 X86_V4" + python -m build --wheel -Csetup-args=-Dcpu-dispatch=X86_V2,X86_V4 + +Feature Combination +~~~~~~~~~~~~~~~~~~~ + +Features specified in options are automatically combined with all implied features:: + + python -m build --wheel -Csetup-args=-Dcpu-baseline=X86_V4 - export CFLAGS="-march=native" - pip install . - # is equivalent to - pip install . -Csetup-args=-Dcpu-baseline=native +Equivalent to:: -- ``cpu-baseline`` escapes any specified features that aren't supported - by the target platform or compiler rather than raising fatal errors. + python -m build --wheel -Csetup-args=-Dcpu-baseline="X86_V2 X86_V3 X86_V4" - .. note:: +Baseline Overlapping +~~~~~~~~~~~~~~~~~~~~ - Since ``cpu-baseline`` combines all implied features, the maximum - supported of implied features will be enabled rather than escape all of them. - For example:: +Features specified in ``cpu-baseline`` will be excluded from the ``cpu-dispatch`` features, +along with their implied features, but without excluding successor features that imply them. - # Requesting `AVX2,FMA3` but the compiler only support **SSE** features - python -m build --wheel -Csetup-args=-Dcpu-baseline="avx2 fma3" - # is equivalent to - python -m build --wheel -Csetup-args=-Dcpu-baseline="sse sse2 sse3 ssse3 sse41 popcnt sse42" +For instance, if you specify ``cpu-baseline="X86_V4"``, it will exclude ``X86_V4`` and its +implied features ``X86_V2`` and ``X86_V3`` from the ``cpu-dispatch`` features. -- ``cpu-dispatch`` does not combine any of implied CPU features, - so you must add them unless you want to disable one or all of them:: +Compile-time Detection +~~~~~~~~~~~~~~~~~~~~~~ - # Only dispatches AVX2 and FMA3 - python -m build --wheel -Csetup-args=-Dcpu-dispatch=avx2,fma3 - # Dispatches AVX and SSE features - python -m build --wheel -Csetup-args=-Dcpu-dispatch=ssse3,sse41,sse42,avx,avx2,fma3 +Specifying features to ``cpu-dispatch`` or ``cpu-baseline`` doesn't explicitly enable them. +Features are detected at compile time, and the maximum available features based on your +specified options will be enabled according to toolchain and platform support. -- ``cpu-dispatch`` escapes any specified baseline features and also escapes - any features not supported by the target platform or compiler without raising - fatal errors. +This detection occurs by testing feature availability in the compiler through compile-time +source files containing common intrinsics for the specified features. If both the compiler +and assembler support the feature, it will be enabled. + +For example, if you specify ``cpu-dispatch="AVX512_ICL"`` but your compiler doesn't support it, +the feature will be excluded from the build. However, any implied features will still be +enabled if they're supported. -Eventually, you should always check the final report through the build log -to verify the enabled features. See :ref:`opt-build-report` for more details. .. _opt-platform-differences: @@ -256,43 +416,6 @@ For example:: Please take a deep look at :ref:`opt-supported-features`, in order to determine the features that imply one another. -**Compilation compatibility** - -Some compilers don't provide independent support for all CPU features. For instance -**Intel**'s compiler doesn't provide separated flags for ``AVX2`` and ``FMA3``, -it makes sense since all Intel CPUs that comes with ``AVX2`` also support ``FMA3``, -but this approach is incompatible with other **x86** CPUs from **AMD** or **VIA**. - -For example:: - - # Specify AVX2 will force enables FMA3 on Intel compilers - python -m build --wheel -Csetup-args=-Dcpu-baseline=avx2 - # which is equivalent to - python -m build --wheel -Csetup-args=-Dcpu-baseline="avx2 fma3" - - -The following tables only show the differences imposed by some compilers from the -general context that been shown in the :ref:`opt-supported-features` tables: - -.. note:: - - Features names with strikeout represent the unsupported CPU features. - -.. raw:: html - - - -.. role:: enabled - :class: enabled-feature - -.. role:: disabled - :class: disabled-feature - -.. include:: generated_tables/compilers-diff.inc - .. _opt-build-report: Build report @@ -305,7 +428,7 @@ expected CPU features by the compiler. So we strongly recommend checking the final report log, to be aware of what kind of CPU features are enabled and what are not. -You can find the final report of CPU optimizations at the end of the build log, +You can find the final report of CPU optimizations by tracing meson build log, and here is how it looks on x86_64/gcc: .. raw:: html @@ -315,94 +438,63 @@ and here is how it looks on x86_64/gcc: .. literalinclude:: log_example.txt :language: bash -There is a separate report for each of ``build_ext`` and ``build_clib`` -that includes several sections, and each section has several values, representing the following: - -**Platform**: - -- :enabled:`Architecture`: The architecture name of target CPU. It should be one of - ``x86``, ``x64``, ``ppc64``, ``ppc64le``, ``armhf``, ``aarch64``, ``s390x`` or ``unknown``. - -- :enabled:`Compiler`: The compiler name. It should be one of - gcc, clang, msvc, icc, iccw or unix-like. - -**CPU baseline**: - -- :enabled:`Requested`: The specific features and options to ``cpu-baseline`` as-is. -- :enabled:`Enabled`: The final set of enabled CPU features. -- :enabled:`Flags`: The compiler flags that were used to all NumPy C/C++ sources - during the compilation except for temporary sources that have been used for generating - the binary objects of dispatched features. -- :enabled:`Extra checks`: list of internal checks that activate certain functionality - or intrinsics related to the enabled features, useful for debugging when it comes - to developing SIMD kernels. - -**CPU dispatch**: - -- :enabled:`Requested`: The specific features and options to ``cpu-dispatch`` as-is. -- :enabled:`Enabled`: The final set of enabled CPU features. -- :enabled:`Generated`: At the beginning of the next row of this property, - the features for which optimizations have been generated are shown in the - form of several sections with similar properties explained as follows: - - - :enabled:`One or multiple dispatched feature`: The implied CPU features. - - :enabled:`Flags`: The compiler flags that been used for these features. - - :enabled:`Extra checks`: Similar to the baseline but for these dispatched features. - - :enabled:`Detect`: Set of CPU features that need be detected in runtime in order to - execute the generated optimizations. - - The lines that come after the above property and end with a ':' on a separate line, - represent the paths of c/c++ sources that define the generated optimizations. .. _runtime-simd-dispatch: -Runtime dispatch +Runtime Dispatch ---------------- + Importing NumPy triggers a scan of the available CPU features from the set -of dispatchable features. This can be further restricted by setting the +of dispatchable features. You can restrict this scan by setting the environment variable ``NPY_DISABLE_CPU_FEATURES`` to a comma-, tab-, or -space-separated list of features to disable. This will raise an error if -parsing fails or if the feature was not enabled. For instance, on ``x86_64`` -this will disable ``AVX2`` and ``FMA3``:: +space-separated list of features to disable. + +For instance, on ``x86_64`` this will disable ``X86_V4``:: - NPY_DISABLE_CPU_FEATURES="AVX2,FMA3" + NPY_DISABLE_CPU_FEATURES="X86_V4" -If the feature is not available, a warning will be emitted. +This will raise an error if parsing fails or if the feature was not enabled through the ``cpu-dispatch`` build option. +If the feature is supported by the build but not available on the current CPU, a warning will be emitted instead. -Tracking dispatched functions +Tracking Dispatched Functions ----------------------------- -Discovering which CPU targets are enabled for different optimized functions is achievable -through the Python function ``numpy.lib.introspect.opt_func_info``. -This function offers the flexibility of applying filters using two optional arguments: -one for refining function names and the other for specifying data types in the signatures. + +You can discover which CPU targets are enabled for different optimized functions using +the Python function ``numpy.lib.introspect.opt_func_info``. + +This function offers two optional arguments for filtering results: + +1. ``func_name`` - For refining function names +2. ``signature`` - For specifying data types in the signatures For example:: >> func_info = numpy.lib.introspect.opt_func_info(func_name='add|abs', signature='float64|complex64') >> print(json.dumps(func_info, indent=2)) { - "absolute": { - "dd": { - "current": "SSE41", - "available": "SSE41 baseline(SSE SSE2 SSE3)" - }, - "Ff": { - "current": "FMA3__AVX2", - "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" - }, - "Dd": { - "current": "FMA3__AVX2", - "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" - } - }, - "add": { - "ddd": { - "current": "FMA3__AVX2", - "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" - }, - "FFF": { - "current": "FMA3__AVX2", - "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" - } + "absolute": { + "dd": { + "current": "baseline(X86_V2)", + "available": "baseline(X86_V2)" + }, + "Ff": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + }, + "Dd": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + } + }, + "add": { + "ddd": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + }, + "FFF": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + } } } diff --git a/doc/source/reference/simd/gen_features.py b/doc/source/reference/simd/gen_features.py deleted file mode 100644 index 3394f67f23ef..000000000000 --- a/doc/source/reference/simd/gen_features.py +++ /dev/null @@ -1,196 +0,0 @@ -""" -Generate CPU features tables from CCompilerOpt -""" -from os import path - -from numpy.distutils.ccompiler_opt import CCompilerOpt - - -class FakeCCompilerOpt(CCompilerOpt): - # disable caching no need for it - conf_nocache = True - - def __init__(self, arch, cc, *args, **kwargs): - self.fake_info = (arch, cc, '') - CCompilerOpt.__init__(self, None, **kwargs) - - def dist_compile(self, sources, flags, **kwargs): - return sources - - def dist_info(self): - return self.fake_info - - @staticmethod - def dist_log(*args, stderr=False): - # avoid printing - pass - - def feature_test(self, name, force_flags=None, macros=[]): - # To speed up - return True - -class Features: - def __init__(self, arch, cc): - self.copt = FakeCCompilerOpt(arch, cc, cpu_baseline="max") - - def names(self): - return self.copt.cpu_baseline_names() - - def serialize(self, features_names): - result = [] - for f in self.copt.feature_sorted(features_names): - gather = self.copt.feature_supported.get(f, {}).get("group", []) - implies = self.copt.feature_sorted(self.copt.feature_implies(f)) - result.append((f, implies, gather)) - return result - - def table(self, **kwargs): - return self.gen_table(self.serialize(self.names()), **kwargs) - - def table_diff(self, vs, **kwargs): - fnames = set(self.names()) - fnames_vs = set(vs.names()) - common = fnames.intersection(fnames_vs) - extra = fnames.difference(fnames_vs) - notavl = fnames_vs.difference(fnames) - iextra = {} - inotavl = {} - idiff = set() - for f in common: - implies = self.copt.feature_implies(f) - implies_vs = vs.copt.feature_implies(f) - e = implies.difference(implies_vs) - i = implies_vs.difference(implies) - if not i and not e: - continue - if e: - iextra[f] = e - if i: - inotavl[f] = e - idiff.add(f) - - def fbold(f): - if f in extra: - return f':enabled:`{f}`' - if f in notavl: - return f':disabled:`{f}`' - return f - - def fbold_implies(f, i): - if i in iextra.get(f, {}): - return f':enabled:`{i}`' - if f in notavl or i in inotavl.get(f, {}): - return f':disabled:`{i}`' - return i - - diff_all = self.serialize(idiff.union(extra)) - diff_all += vs.serialize(notavl) - content = self.gen_table( - diff_all, fstyle=fbold, fstyle_implies=fbold_implies, **kwargs - ) - return content - - def gen_table(self, serialized_features, fstyle=None, fstyle_implies=None, - **kwargs): - - if fstyle is None: - fstyle = lambda ft: f'``{ft}``' - if fstyle_implies is None: - fstyle_implies = lambda origin, ft: fstyle(ft) - - rows = [] - have_gather = False - for f, implies, gather in serialized_features: - if gather: - have_gather = True - name = fstyle(f) - implies = ' '.join([fstyle_implies(f, i) for i in implies]) - gather = ' '.join([fstyle_implies(f, i) for i in gather]) - rows.append((name, implies, gather)) - if not rows: - return '' - fields = ["Name", "Implies", "Gathers"] - if not have_gather: - del fields[2] - rows = [(name, implies) for name, implies, _ in rows] - return self.gen_rst_table(fields, rows, **kwargs) - - def gen_rst_table(self, field_names, rows, tab_size=4): - assert not rows or len(field_names) == len(rows[0]) - rows.append(field_names) - fld_len = len(field_names) - cls_len = [max(len(c[i]) for c in rows) for i in range(fld_len)] - del rows[-1] - cformat = ' '.join('{:<%d}' % i for i in cls_len) - border = cformat.format(*['=' * i for i in cls_len]) - - rows = [cformat.format(*row) for row in rows] - # header - rows = [border, cformat.format(*field_names), border] + rows - # footer - rows += [border] - # add left margin - rows = [(' ' * tab_size) + r for r in rows] - return '\n'.join(rows) - -def wrapper_section(title, content, tab_size=4): - tab = ' ' * tab_size - if content: - return ( - f"{title}\n{'~' * len(title)}" - f"\n.. table::\n{tab}:align: left\n\n" - f"{content}\n\n" - ) - return '' - -def wrapper_tab(title, table, tab_size=4): - tab = ' ' * tab_size - if table: - ('\n' + tab).join(( - '.. tab:: ' + title, - tab + '.. table::', - tab + 'align: left', - table + '\n\n' - )) - return '' - - -if __name__ == '__main__': - - pretty_names = { - "PPC64": "IBM/POWER big-endian", - "PPC64LE": "IBM/POWER little-endian", - "S390X": "IBM/ZSYSTEM(S390X)", - "ARMHF": "ARMv7/A32", - "AARCH64": "ARMv8/A64", - "ICC": "Intel Compiler", - # "ICCW": "Intel Compiler msvc-like", - "MSVC": "Microsoft Visual C/C++" - } - gen_path = path.join( - path.dirname(path.realpath(__file__)), "generated_tables" - ) - with open(path.join(gen_path, 'cpu_features.inc'), 'w') as fd: - fd.write(f'.. generated via {__file__}\n\n') - for arch in ( - ("x86", "PPC64", "PPC64LE", "ARMHF", "AARCH64", "S390X") - ): - title = "On " + pretty_names.get(arch, arch) - table = Features(arch, 'gcc').table() - fd.write(wrapper_section(title, table)) - - with open(path.join(gen_path, 'compilers-diff.inc'), 'w') as fd: - fd.write(f'.. generated via {__file__}\n\n') - for arch, cc_names in ( - ("x86", ("clang", "ICC", "MSVC")), - ("PPC64", ("clang",)), - ("PPC64LE", ("clang",)), - ("ARMHF", ("clang",)), - ("AARCH64", ("clang",)), - ("S390X", ("clang",)) - ): - arch_pname = pretty_names.get(arch, arch) - for cc in cc_names: - title = f"On {arch_pname}::{pretty_names.get(cc, cc)}" - table = Features(arch, cc).table_diff(Features(arch, "gcc")) - fd.write(wrapper_section(title, table)) diff --git a/doc/source/reference/simd/generated_tables/compilers-diff.inc b/doc/source/reference/simd/generated_tables/compilers-diff.inc deleted file mode 100644 index d5a87da3c617..000000000000 --- a/doc/source/reference/simd/generated_tables/compilers-diff.inc +++ /dev/null @@ -1,35 +0,0 @@ -.. generated via /numpy/numpy/./doc/source/reference/simd/gen_features.py - -On x86::Intel Compiler -~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ====================== ================================================================================================================================================================================================================================================================================================================================== ====================== - Name Implies Gathers - ====================== ================================================================================================================================================================================================================================================================================================================================== ====================== - FMA3 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`AVX2` - AVX2 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`FMA3` - AVX512F SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 :enabled:`AVX512CD` - :disabled:`XOP` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` - :disabled:`FMA4` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` - :disabled:`AVX512_SPR` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512_SKX` :disabled:`AVX512_CLX` :disabled:`AVX512_CNL` :disabled:`AVX512_ICL` :disabled:`AVX512FP16` - ====================== ================================================================================================================================================================================================================================================================================================================================== ====================== - -On x86::Microsoft Visual C/C++ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ====================== ================================================================================================================================================================================================================================================================================================================================== ============================================================================= - Name Implies Gathers - ====================== ================================================================================================================================================================================================================================================================================================================================== ============================================================================= - FMA3 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`AVX2` - AVX2 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`FMA3` - AVX512F SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 :enabled:`AVX512CD` :enabled:`AVX512_SKX` - AVX512CD SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F :enabled:`AVX512_SKX` - :disabled:`AVX512_KNL` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512ER` :disabled:`AVX512PF` - :disabled:`AVX512_KNM` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512_KNL` :disabled:`AVX5124FMAPS` :disabled:`AVX5124VNNIW` :disabled:`AVX512VPOPCNTDQ` - :disabled:`AVX512_SPR` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512_SKX` :disabled:`AVX512_CLX` :disabled:`AVX512_CNL` :disabled:`AVX512_ICL` :disabled:`AVX512FP16` - ====================== ================================================================================================================================================================================================================================================================================================================================== ============================================================================= - diff --git a/doc/source/reference/simd/generated_tables/cpu_features.inc b/doc/source/reference/simd/generated_tables/cpu_features.inc deleted file mode 100644 index 603370e21545..000000000000 --- a/doc/source/reference/simd/generated_tables/cpu_features.inc +++ /dev/null @@ -1,109 +0,0 @@ -.. generated via /numpy/numpy/./doc/source/reference/simd/gen_features.py - -On x86 -~~~~~~ -.. table:: - :align: left - - ============== ========================================================================================================================================================================================== ===================================================== - Name Implies Gathers - ============== ========================================================================================================================================================================================== ===================================================== - ``SSE`` ``SSE2`` - ``SSE2`` ``SSE`` - ``SSE3`` ``SSE`` ``SSE2`` - ``SSSE3`` ``SSE`` ``SSE2`` ``SSE3`` - ``SSE41`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` - ``POPCNT`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` - ``SSE42`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` - ``AVX`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` - ``XOP`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` - ``FMA4`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` - ``F16C`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` - ``FMA3`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` - ``AVX2`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` - ``AVX512F`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` - ``AVX512CD`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` - ``AVX512_KNL`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512ER`` ``AVX512PF`` - ``AVX512_KNM`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_KNL`` ``AVX5124FMAPS`` ``AVX5124VNNIW`` ``AVX512VPOPCNTDQ`` - ``AVX512_SKX`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` - ``AVX512_CLX`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512VNNI`` - ``AVX512_CNL`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512IFMA`` ``AVX512VBMI`` - ``AVX512_ICL`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512_CLX`` ``AVX512_CNL`` ``AVX512VBMI2`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` - ``AVX512_SPR`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512_CLX`` ``AVX512_CNL`` ``AVX512_ICL`` ``AVX512FP16`` - ============== ========================================================================================================================================================================================== ===================================================== - -On IBM/POWER big-endian -~~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ======== ========================= - Name Implies - ======== ========================= - ``VSX`` - ``VSX2`` ``VSX`` - ``VSX3`` ``VSX`` ``VSX2`` - ``VSX4`` ``VSX`` ``VSX2`` ``VSX3`` - ======== ========================= - -On IBM/POWER little-endian -~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ======== ========================= - Name Implies - ======== ========================= - ``VSX`` ``VSX2`` - ``VSX2`` ``VSX`` - ``VSX3`` ``VSX`` ``VSX2`` - ``VSX4`` ``VSX`` ``VSX2`` ``VSX3`` - ======== ========================= - -On ARMv7/A32 -~~~~~~~~~~~~ -.. table:: - :align: left - - ============== =========================================================== - Name Implies - ============== =========================================================== - ``NEON`` - ``NEON_FP16`` ``NEON`` - ``NEON_VFPV4`` ``NEON`` ``NEON_FP16`` - ``ASIMD`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` - ``ASIMDHP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDDP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDFHM`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` - ============== =========================================================== - -On ARMv8/A64 -~~~~~~~~~~~~ -.. table:: - :align: left - - ============== =========================================================== - Name Implies - ============== =========================================================== - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``NEON_FP16`` ``NEON`` ``NEON_VFPV4`` ``ASIMD`` - ``NEON_VFPV4`` ``NEON`` ``NEON_FP16`` ``ASIMD`` - ``ASIMD`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` - ``ASIMDHP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDDP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDFHM`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` - ============== =========================================================== - -On IBM/ZSYSTEM(S390X) -~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ======== ============== - Name Implies - ======== ============== - ``VX`` - ``VXE`` ``VX`` - ``VXE2`` ``VX`` ``VXE`` - ======== ============== - diff --git a/doc/source/reference/simd/log_example.txt b/doc/source/reference/simd/log_example.txt index 79c5c6c253ca..c71306d42aae 100644 --- a/doc/source/reference/simd/log_example.txt +++ b/doc/source/reference/simd/log_example.txt @@ -1,79 +1,64 @@ -########### EXT COMPILER OPTIMIZATION ########### -Platform : - Architecture: x64 - Compiler : gcc +Test features "X86_V2" : Supported +Test features "X86_V3" : Supported +Test features "X86_V4" : Supported +Test features "AVX512_ICL" : Supported +Test features "AVX512_SPR" : Supported +Configuring npy_cpu_dispatch_config.h using configuration +Message: +CPU Optimization Options + baseline: + Requested : min + Enabled : X86_V2 + dispatch: + Requested : max + Enabled : X86_V3 X86_V4 AVX512_ICL AVX512_SPR -CPU baseline : - Requested : 'min' - Enabled : SSE SSE2 SSE3 - Flags : -msse -msse2 -msse3 - Extra checks: none - -CPU dispatch : - Requested : 'max -xop -fma4' - Enabled : SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD AVX512_KNL AVX512_KNM AVX512_SKX AVX512_CLX AVX512_CNL AVX512_ICL - Generated : - : - SSE41 : SSE SSE2 SSE3 SSSE3 - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 - Extra checks: none - Detect : SSE SSE2 SSE3 SSSE3 SSE41 - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : numpy/_core/src/umath/_umath_tests.dispatch.c - : - SSE42 : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 - Extra checks: none - Detect : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : - AVX2 : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mavx2 - Extra checks: none - Detect : AVX F16C AVX2 - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithm_fp.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : numpy/_core/src/umath/_umath_tests.dispatch.c - : - (FMA3 AVX2) : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mfma -mavx2 - Extra checks: none - Detect : AVX F16C FMA3 AVX2 - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_exponent_log.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_trigonometric.dispatch.c - : - AVX512F : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mfma -mavx2 -mavx512f - Extra checks: AVX512F_REDUCE - Detect : AVX512F - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithm_fp.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_exponent_log.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_trigonometric.dispatch.c - : - AVX512_SKX : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mfma -mavx2 -mavx512f -mavx512cd -mavx512vl -mavx512bw -mavx512dq - Extra checks: AVX512BW_MASK AVX512DQ_MASK - Detect : AVX512_SKX - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_exponent_log.dispatch.c -CCompilerOpt.cache_flush[804] : write cache to path -> /home/seiko/work/repos/numpy/build/temp.linux-x86_64-3.9/ccompiler_opt_cache_ext.py - -########### CLIB COMPILER OPTIMIZATION ########### -Platform : - Architecture: x64 - Compiler : gcc - -CPU baseline : - Requested : 'min' - Enabled : SSE SSE2 SSE3 - Flags : -msse -msse2 -msse3 - Extra checks: none - -CPU dispatch : - Requested : 'max -xop -fma4' - Enabled : SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD AVX512_KNL AVX512_KNM AVX512_SKX AVX512_CLX AVX512_CNL AVX512_ICL - Generated : none +Generating multi-targets for "_umath_tests.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "argfunc.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "x86_simd_argsort.dispatch.h" + Enabled targets: X86_V4, X86_V3 +Generating multi-targets for "x86_simd_qsort.dispatch.h" + Enabled targets: X86_V4, X86_V3 +Generating multi-targets for "x86_simd_qsort_16bit.dispatch.h" + Enabled targets: AVX512_SPR, AVX512_ICL +Generating multi-targets for "highway_qsort.dispatch.h" + Enabled targets: +Generating multi-targets for "highway_qsort_16bit.dispatch.h" + Enabled targets: +Generating multi-targets for "loops_arithm_fp.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "loops_arithmetic.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_comparison.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_exponent_log.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_hyperbolic.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_logical.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_minmax.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_modulo.dispatch.h" + Enabled targets: baseline +Generating multi-targets for "loops_trigonometric.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_umath_fp.dispatch.h" + Enabled targets: X86_V4, baseline +Generating multi-targets for "loops_unary.dispatch.h" + Enabled targets: X86_V4, baseline +Generating multi-targets for "loops_unary_fp.dispatch.h" + Enabled targets: baseline +Generating multi-targets for "loops_unary_fp_le.dispatch.h" + Enabled targets: baseline +Generating multi-targets for "loops_unary_complex.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "loops_autovec.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "loops_half.dispatch.h" + Enabled targets: AVX512_SPR, X86_V4, baseline +WARNING: Project targets '>=1.5.2' but uses feature deprecated since '1.3.0': Source file src/umath/svml/linux/avx512/svml_z0_acos_d_la.s in the 'objects' kwarg is not an object.. +Generating multi-targets for "_simd.dispatch.h" + Enabled targets: X86_V3, X86_V4, baseline diff --git a/meson.options b/meson.options index f17f9901664a..236f44af6d6c 100644 --- a/meson.options +++ b/meson.options @@ -28,14 +28,17 @@ option('disable-optimization', type: 'boolean', value: false, description: 'Disable CPU optimized code (dispatch,simd,unroll...)') option('cpu-baseline', type: 'string', value: 'min', description: 'Minimal set of required CPU features') +<<<<<<< HEAD option('cpu-baseline-detect', type: 'feature', value: 'auto', description: 'Detect CPU baseline from the compiler flags') option('cpu-dispatch', type: 'string', value: 'max -xop -fma4', +======= +option('cpu-dispatch', type: 'string', value: 'max', +>>>>>>> f68e178f88 (ENH: Modulate dispatched x86 CPU features) description: 'Dispatched set of additional CPU features') option('test-simd', type: 'array', value: [ - 'BASELINE', 'SSE2', 'SSE42', 'XOP', 'FMA4', - 'AVX2', 'FMA3', 'AVX2,FMA3', 'AVX512F', 'AVX512_SKX', + 'BASELINE', 'X86_V2', 'X86_V3', 'X86_V4', 'VSX', 'VSX2', 'VSX3', 'VSX4', 'NEON', 'ASIMD', 'VX', 'VXE', 'VXE2', diff --git a/meson_cpu/meson.build b/meson_cpu/meson.build index 1c4c6eecb308..02bbe5f7618e 100644 --- a/meson_cpu/meson.build +++ b/meson_cpu/meson.build @@ -88,13 +88,16 @@ CPU_FEATURES += S390X_FEATURES CPU_FEATURES += RV64_FEATURES CPU_FEATURES += LOONGARCH64_FEATURES +CPU_FEATURES_REDIRECT = {} +CPU_FEATURES_REDIRECT += X86_REDIRECT + # Parse the requested baseline (CPU_CONF_BASELINE) and dispatch features # (CPU_CONF_DISPATCH). cpu_family = host_machine.cpu_family() # Used by build option 'min' min_features = { - 'x86': [SSE2], - 'x86_64': [SSE3], + 'x86': [X86_V2], + 'x86_64': [X86_V2], 'ppc64': [], 's390x': [], 'arm': [], @@ -191,15 +194,31 @@ foreach opt_name, conf : parse_options accumulate = min_features elif tok == 'MAX' accumulate = max_features - elif tok in CPU_FEATURES - tokobj = CPU_FEATURES[tok] - if tokobj not in max_features - ignored += tok - continue - endif - accumulate = [tokobj] else - error('Invalid token "'+tok+'" within option --'+opt_name) + if tok in CPU_FEATURES_REDIRECT + ntok = CPU_FEATURES_REDIRECT[tok] + if ntok == '' + warning('Ignoring CPU feature "@0@" in --@1@ option - feature is no longer supported.'.format(tok, opt_name)) + else + warning('CPU Feature "@0@" is no longer explicitly supported, redirecting to "@1@".'.format(tok, ntok)) + endif + warning('Please check the latest documentation for build options.') + if ntok == '' or not append # redirected features not safe to be execluded + continue + endif + tok = ntok + endif + if tok not in CPU_FEATURES + error('Invalid token "'+tok+'" within option --'+opt_name) + endif + if tok in CPU_FEATURES + tokobj = CPU_FEATURES[tok] + if tokobj not in max_features + ignored += tok + continue + endif + accumulate = [tokobj] + endif endif if append foreach fet : accumulate @@ -209,8 +228,17 @@ foreach opt_name, conf : parse_options endforeach else filterd = [] + # filter out the features that are in the accumulate list + # including any successor features foreach fet : result - if fet not in accumulate + escape = false + foreach fet2 : accumulate + if fet2 in mod_features.implicit_c(fet) + escape = true + break + endif + endforeach + if not escape filterd += fet endif endforeach diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 1276e922ff2a..bd26c615624c 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -1,230 +1,98 @@ source_root = meson.project_source_root() +current_dir = meson.current_source_dir() +cpu_family = host_machine.cpu_family() mod_features = import('features') -SSE = mod_features.new( - 'SSE', 1, args: '-msse', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse.c')[0] -) -SSE2 = mod_features.new( - 'SSE2', 2, implies: SSE, - args: '-msse2', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse2.c')[0] -) -# enabling SSE without SSE2 is useless also it's non-optional for x86_64 -SSE.update(implies: SSE2) -SSE3 = mod_features.new( - 'SSE3', 3, implies: SSE2, - args: '-msse3', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse3.c')[0] -) -SSSE3 = mod_features.new( - 'SSSE3', 4, implies: SSE3, - args: '-mssse3', - test_code: files(source_root + '/numpy/distutils/checks/cpu_ssse3.c')[0] -) -SSE41 = mod_features.new( - 'SSE41', 5, implies: SSSE3, - args: '-msse4.1', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse41.c')[0] -) -POPCNT = mod_features.new( - 'POPCNT', 6, implies: SSE41, - args: '-mpopcnt', - test_code: files(source_root + '/numpy/distutils/checks/cpu_popcnt.c')[0] -) -SSE42 = mod_features.new( - 'SSE42', 7, implies: POPCNT, args: '-msse4.2', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse42.c')[0] -) -# 7-20 left as margin for any extra features -AVX = mod_features.new( - 'AVX', 20, implies: SSE42, args: '-mavx', - detect: {'val': 'AVX', 'match': '.*SSE.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx.c')[0] -) -XOP = mod_features.new( - 'XOP', 21, implies: AVX, args: '-mxop', - test_code: files(source_root + '/numpy/distutils/checks/cpu_xop.c')[0] -) -FMA4 = mod_features.new( - 'FMA4', 22, implies: AVX, args: '-mfma4', - test_code: files(source_root + '/numpy/distutils/checks/cpu_fma4.c')[0] -) -# x86 half-precision -F16C = mod_features.new( - 'F16C', 23, implies: AVX, args: '-mf16c', - test_code: files(source_root + '/numpy/distutils/checks/cpu_f16c.c')[0] -) -FMA3 = mod_features.new( - 'FMA3', 24, implies: F16C, args: '-mfma', - test_code: files(source_root + '/numpy/distutils/checks/cpu_fma3.c')[0] -) -# match this to HWY_AVX2 -AVX2 = mod_features.new( - 'AVX2', 25, implies: FMA3, args: ['-mavx2', '-maes', '-mpclmul', '-mbmi', '-mbmi2'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx2.c')[0] -) -# 25-40 left as margin for any extra features -AVX512F = mod_features.new( - 'AVX512F', 40, implies: [AVX2], - # Disables mmx because of stack corruption that may happen during mask - # conversions. - # TODO (seiko2plus): provide more clarification - args: ['-mno-mmx', '-mavx512f'], - detect: {'val': 'AVX512F', 'match': '.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512f.c')[0], - extra_tests: { - 'AVX512F_REDUCE': files(source_root + '/numpy/distutils/checks/extra_avx512f_reduce.c')[0] - } -) -AVX512CD = mod_features.new( - 'AVX512CD', 41, implies: AVX512F, args: '-mavx512cd', - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512cd.c')[0] -) -AVX512_KNL = mod_features.new( - 'AVX512_KNL', 42, implies: AVX512CD, args: ['-mavx512er', '-mavx512pf'], - group: ['AVX512ER', 'AVX512PF'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_knl.c')[0] -) -AVX512_KNM = mod_features.new( - 'AVX512_KNM', 43, implies: AVX512_KNL, - args: ['-mavx5124fmaps', '-mavx5124vnniw', '-mavx512vpopcntdq'], - group: ['AVX5124FMAPS', 'AVX5124VNNIW', 'AVX512VPOPCNTDQ'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_knm.c')[0] -) -AVX512_SKX = mod_features.new( - 'AVX512_SKX', 50, implies: AVX512CD, - args: ['-mavx512vl', '-mavx512bw', '-mavx512dq'], - group: ['AVX512VL', 'AVX512BW', 'AVX512DQ'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_skx.c')[0], - extra_tests: { - 'AVX512BW_MASK': files(source_root + '/numpy/distutils/checks/extra_avx512bw_mask.c')[0], - 'AVX512DQ_MASK': files(source_root + '/numpy/distutils/checks/extra_avx512dq_mask.c')[0] - } -) -AVX512_CLX = mod_features.new( - 'AVX512_CLX', 51, implies: AVX512_SKX, args: '-mavx512vnni', - group: ['AVX512VNNI'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_clx.c')[0] -) -AVX512_CNL = mod_features.new( - 'AVX512_CNL', 52, implies: AVX512_SKX, - args: ['-mavx512ifma', '-mavx512vbmi'], - group: ['AVX512IFMA', 'AVX512VBMI'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_cnl.c')[0] -) +HWY_SSE4_FLAGS = ['-DHWY_WANT_SSE4', '-DHWY_DISABLE_PCLMUL_AES'] +X86_64_V2_FLAGS = cpu_family == 'x86'? [] : ['-mcx16'] +X86_64_V2_NAMES = cpu_family == 'x86'? [] : ['CX16'] +X86_V2 = mod_features.new( + 'X86_V2', 1, args: ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', + '-mpopcnt', '-msahf'] + X86_64_V2_FLAGS + HWY_SSE4_FLAGS, + # Adds compiler definitions `NPY_HAVE_SSE*` + group: ['SSE', 'SSE2', 'SSE3', 'SSSE3', 'SSE41', 'SSE42', 'POPCNT', 'LAHF'] + X86_64_V2_NAMES, + detect: 'X86_V2', + test_code: files(current_dir + '/test_x86_v2.c')[0], +) +X86_V3 = mod_features.new( + 'X86_V3', 10, implies: X86_V2, + args: ['-mavx', '-mavx2', '-mfma', '-mbmi', '-mbmi2', '-mlzcnt', '-mf16c', '-mmovbe'], + group: ['AVX', 'AVX2', 'FMA3', 'BMI', 'BMI2', 'LZCNT', 'F16C', 'MOVBE'], + detect: 'X86_V3', + test_code: files(current_dir + '/test_x86_v3.c')[0], +) +X86_V4 = mod_features.new( + 'X86_V4', 20, implies: X86_V3, + args: ['-mavx512f', '-mavx512cd', '-mavx512vl', '-mavx512bw', '-mavx512dq'], + group: ['AVX512F', 'AVX512CD', 'AVX512VL', 'AVX512BW', 'AVX512DQ', 'AVX512_SKX', + 'AVX512F_REDUCE', 'AVX512BW_MASK', 'AVX512DQ_MASK'], + detect: 'X86_V4', + test_code: files(current_dir + '/test_x86_v4.c')[0], +) +if cpu_family == 'x86' + X86_V4.update(disable: 'not supported on x86-32') +endif AVX512_ICL = mod_features.new( - 'AVX512_ICL', 53, implies: [AVX512_CLX, AVX512_CNL], - args: ['-mavx512vbmi2', '-mavx512bitalg', '-mavx512vpopcntdq'], - group: ['AVX512VBMI2', 'AVX512BITALG', 'AVX512VPOPCNTDQ'], + 'AVX512_ICL', 30, implies: X86_V4, + args: ['-mavx512vbmi', '-mavx512vbmi2', '-mavx512vnni', '-mavx512bitalg', + '-mavx512vpopcntdq', '-mavx512ifma', '-mvaes', '-mgfni', '-mvpclmulqdq'], + group: ['AVX512VBMI', 'AVX512VBMI2', 'AVX512VNNI', 'AVX512BITALG', 'AVX512VPOPCNTDQ', + 'AVX512IFMA', 'VAES', 'GFNI', 'VPCLMULQDQ'], + detect: 'AVX512_ICL', test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_icl.c')[0] ) -# TODO add support for zen4 AVX512_SPR = mod_features.new( - 'AVX512_SPR', 55, implies: AVX512_ICL, + 'AVX512_SPR', 35, implies: AVX512_ICL, args: ['-mavx512fp16'], group: ['AVX512FP16'], + detect: 'AVX512_SPR', test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_spr.c')[0] ) # Specializations for non unix-like compilers # ------------------------------------------- -cpu_family = host_machine.cpu_family() -compiler_id = meson.get_compiler('c').get_id() +cc = meson.get_compiler('c') +compiler_id = cc.get_id() if compiler_id not in ['gcc', 'clang'] AVX512_SPR.update(disable: compiler_id + ' compiler does not support it') endif -# Common specializations between both Intel compilers (unix-like and msvc-like) -if compiler_id in ['intel', 'intel-cl'] - # POPCNT, and F16C don't own private FLAGS however the compiler still - # provides ISA capability for them. - POPCNT.update(args: '') - F16C.update(args: '') - # Intel compilers don't support the following features independently - FMA3.update(implies: [F16C, AVX2]) - AVX2.update(implies: [F16C, FMA3]) - AVX512F.update(implies: [AVX2, AVX512CD]) - AVX512CD.update(implies: [AVX512F]) - XOP.update(disable: 'Intel Compiler does not support it') - FMA4.update(disable: 'Intel Compiler does not support it') -endif - if compiler_id == 'intel-cl' - foreach fet : [SSE, SSE2, SSE3, SSSE3, AVX] - fet.update(args: {'val': '/arch:' + fet.get('name'), 'match': '/arch:.*'}) - endforeach - SSE41.update(args: {'val': '/arch:SSE4.1', 'match': '/arch:.*'}) - SSE42.update(args: {'val': '/arch:SSE4.2', 'match': '/arch:.*'}) - FMA3.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) - AVX2.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) - AVX512F.update(args: {'val': '/Qx:COMMON-AVX512', 'match': '/arch:.*'}) - AVX512CD.update(args: {'val': '/Qx:COMMON-AVX512', 'match': '/arch:.*'}) - AVX512_KNL.update(args: {'val': '/Qx:KNL', 'match': '/[arch|Qx]:.*'}) - AVX512_KNM.update(args: {'val': '/Qx:KNM', 'match': '/[arch|Qx]:.*'}) - AVX512_SKX.update(args: {'val': '/Qx:SKYLAKE-AVX512', 'match': '/[arch|Qx]:.*'}) - AVX512_CLX.update(args: {'val': '/Qx:CASCADELAKE', 'match': '/[arch|Qx]:.*'}) - AVX512_CNL.update(args: {'val': '/Qx:CANNONLAKE', 'match': '/[arch|Qx]:.*'}) + X86_V2.update(args: [{'val': '/arch:SSE4.2', 'match': '/arch:.*'}] + HWY_SSE4_FLAGS) + X86_V3.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) + X86_V4.update(args: {'val': '/Qx:SKYLAKE-AVX512', 'match': '/[arch|Qx]:.*'}) AVX512_ICL.update(args: {'val': '/Qx:ICELAKE-CLIENT', 'match': '/[arch|Qx]:.*'}) endif if compiler_id == 'intel' - clear_m = '^(-mcpu=|-march=)' clear_any = '^(-mcpu=|-march=|-x[A-Z0-9\-])' - FMA3.update(args: {'val': '-xCORE-AVX2', 'match': clear_m}) - AVX2.update(args: {'val': '-xCORE-AVX2', 'match': clear_m}) - AVX512F.update(args: {'val': '-xCOMMON-AVX512', 'match': clear_m}) - AVX512CD.update(args: {'val': '-xCOMMON-AVX512', 'match': clear_m}) - AVX512_KNL.update(args: {'val': '-xKNL', 'match': clear_any}) - AVX512_KNM.update(args: {'val': '-xKNM', 'match': clear_any}) - AVX512_SKX.update(args: {'val': '-xSKYLAKE-AVX512', 'match': clear_any}) - AVX512_CLX.update(args: {'val': '-xCASCADELAKE', 'match': clear_any}) - AVX512_CNL.update(args: {'val': '-xCANNONLAKE', 'match': clear_any}) + X86_V2.update(args: [{'val': '-xSSE4.2', 'match': clear_any}] + HWY_SSE4_FLAGS) + X86_V3.update(args: {'val': '-xCORE-AVX2', 'match': clear_any}) + X86_V4.update(args: {'val': '-xSKYLAKE-AVX512', 'match': clear_any}) AVX512_ICL.update(args: {'val': '-xICELAKE-CLIENT', 'match': clear_any}) endif if compiler_id == 'msvc' - # MSVC compiler doesn't support the following features - foreach fet : [AVX512_KNL, AVX512_KNM] - fet.update(disable: compiler_id + ' compiler does not support it') - endforeach - # The following features don't own private FLAGS, however the compiler still - # provides ISA capability for them. - foreach fet : [ - SSE3, SSSE3, SSE41, POPCNT, SSE42, AVX, F16C, XOP, FMA4, - AVX512F, AVX512CD, AVX512_CLX, AVX512_CNL, - AVX512_ICL - ] - fet.update(args: '') - endforeach - # MSVC compiler doesn't support the following features independently - FMA3.update(implies: [F16C, AVX2]) - AVX2.update(implies: [F16C, FMA3]) - AVX512F.update(implies: [AVX2, AVX512CD, AVX512_SKX]) - AVX512CD.update(implies: [AVX512F, AVX512_SKX]) + MSVC_SSE4 = cpu_family == 'x86' ? ['/arch:SSE2'] : [] + MSVC_SSE4 = cc.version().version_compare('>=19.40') ? ['/arch:SSE4.2'] : MSVC_SSE4 + X86_V2.update(args: MSVC_SSE4 + HWY_SSE4_FLAGS + ['/fp:contract']) clear_arch = '/arch:.*' - # only available on 32-bit. Its enabled by default on 64-bit mode - foreach fet : [SSE, SSE2] - if cpu_family == 'x86' - fet.update(args: {'val': '/arch:' + fet.get('name'), 'match': clear_arch}) - else - fet.update(args: '') - endif - endforeach - FMA3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) - # Add floating-point contract flag to fixes transcendental function accuracy on Windows Server 2022 - FMA3.update(args: {'val': '/fp:contract'}) - AVX2.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) - AVX512_SKX.update(args: {'val': '/arch:AVX512', 'match': clear_arch}) + X86_V3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) + X86_V4.update(args: {'val': '/arch:AVX512', 'match': clear_arch}) + AVX512_ICL.update(args: '') endif +# legacy CPU features +X86_REDIRECT = { + 'SSE': 'X86_V2', 'SSE2': 'X86_V2', 'SSE3': 'X86_V2', 'SSSE3': 'X86_V2', + 'SSE41': 'X86_V2', 'SSE42': 'X86_V2', 'XOP': 'X86_V2', 'FMA4': 'X86_V2', + 'FMA3': 'X86_V3', 'AVX': 'X86_V3', 'F16C': 'X86_V3', + 'AVX512F': 'X86_V3', 'AVX512CD': 'X86_V3', + 'AVX512_KNL': 'X86_V3', 'AVX512_KNM': 'X86_V3', + 'AVX512_SKX': 'X86_V4', 'AVX512_CLX': 'X86_V4', 'AVX512_CNL': 'X86_V4', +} + X86_FEATURES = { - 'SSE': SSE, 'SSE2': SSE2, 'SSE3': SSE3, 'SSSE3': SSSE3, - 'SSE41': SSE41, 'POPCNT': POPCNT, 'SSE42': SSE42, 'AVX': AVX, - 'XOP': XOP, 'FMA4': FMA4, 'F16C': F16C, 'FMA3': FMA3, - 'AVX2': AVX2, 'AVX512F': AVX512F, 'AVX512CD': AVX512CD, - 'AVX512_KNL': AVX512_KNL, 'AVX512_KNM': AVX512_KNM, - 'AVX512_SKX': AVX512_SKX, 'AVX512_CLX': AVX512_CLX, - 'AVX512_CNL': AVX512_CNL, 'AVX512_ICL': AVX512_ICL, - 'AVX512_SPR': AVX512_SPR + 'X86_V2': X86_V2, 'X86_V3': X86_V3, 'X86_V4': X86_V4, + 'AVX512_ICL': AVX512_ICL, 'AVX512_SPR': AVX512_SPR } diff --git a/meson_cpu/x86/test_x86_v2.c b/meson_cpu/x86/test_x86_v2.c new file mode 100644 index 000000000000..f897957224d5 --- /dev/null +++ b/meson_cpu/x86/test_x86_v2.c @@ -0,0 +1,69 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__SSE__) || !defined(__SSE2__) || !defined(__SSE3__) || \ + !defined(__SSSE3__) || !defined(__SSE4_1__) || !defined(__SSE4_2__) || !defined(__POPCNT__) + #error HOST/ARCH does not support x86_v2 + #endif +#endif + +#include // SSE +#include // SSE2 +#include // SSE3 +#include // SSSE3 +#include // SSE4.1 +#ifdef _MSC_VER + #include // SSE4.2 and POPCNT for MSVC +#else + #include // SSE4.2 + #include // POPCNT +#endif + +int main(int argc, char **argv) +{ + // to prevent optimization + int seed = (int)argv[argc-1][0]; + volatile int result = 0; + + // SSE test + __m128 a = _mm_set1_ps((float)seed); + __m128 b = _mm_set1_ps(2.0f); + __m128 c = _mm_add_ps(a, b); + result += (int)_mm_cvtss_f32(c); + + // SSE2 test + __m128i ai = _mm_set1_epi32(seed); + __m128i bi = _mm_set1_epi32(2); + __m128i ci = _mm_add_epi32(ai, bi); + result += _mm_cvtsi128_si32(ci); + + // SSE3 test + __m128 d = _mm_movehdup_ps(a); + result += (int)_mm_cvtss_f32(d); + + // SSSE3 test + __m128i di = _mm_abs_epi16(_mm_set1_epi16((short)seed)); + result += _mm_cvtsi128_si32(di); + + // SSE4.1 test + __m128i ei = _mm_max_epi32(ai, bi); + result += _mm_cvtsi128_si32(ei); + + // SSE4.2 test + __m128i str1 = _mm_set1_epi8((char)seed); + __m128i str2 = _mm_set1_epi8((char)(seed + 1)); + int res4_2 = _mm_cmpestra(str1, 4, str2, 4, 0); + result += res4_2; + + // POPCNT test + unsigned int test_val = (unsigned int)seed | 0x01234567; + int pcnt = _mm_popcnt_u32(test_val); + result += pcnt; + + return result; +} diff --git a/meson_cpu/x86/test_x86_v3.c b/meson_cpu/x86/test_x86_v3.c new file mode 100644 index 000000000000..0bc496a93ad0 --- /dev/null +++ b/meson_cpu/x86/test_x86_v3.c @@ -0,0 +1,66 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX__) || !defined(__AVX2__) || !defined(__FMA__) || \ + !defined(__BMI__) || !defined(__BMI2__) || !defined(__LZCNT__) || !defined(__F16C__) + #error HOST/ARCH does not support x86_v3 + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + // to prevent optimization + int seed = (int)argv[argc-1][0]; + volatile int result = 0; + + // AVX test + __m256 avx_a = _mm256_set1_ps((float)seed); + __m256 avx_b = _mm256_set1_ps(2.0f); + __m256 avx_c = _mm256_add_ps(avx_a, avx_b); + float avx_result = _mm256_cvtss_f32(avx_c); + result += (int)avx_result; + + // AVX2 test + __m256i avx2_a = _mm256_set1_epi32(seed); + __m256i avx2_b = _mm256_set1_epi32(2); + __m256i avx2_c = _mm256_add_epi32(avx2_a, avx2_b); + result += _mm256_extract_epi32(avx2_c, 0); + + // FMA test + __m256 fma_a = _mm256_set1_ps((float)seed); + __m256 fma_b = _mm256_set1_ps(2.0f); + __m256 fma_c = _mm256_set1_ps(3.0f); + __m256 fma_result = _mm256_fmadd_ps(fma_a, fma_b, fma_c); + result += (int)_mm256_cvtss_f32(fma_result); + + // BMI1 tests + unsigned int bmi1_src = (unsigned int)seed; + unsigned int tzcnt_result = _tzcnt_u32(bmi1_src); + result += tzcnt_result; + + // BMI2 tests + unsigned int bzhi_result = _bzhi_u32(bmi1_src, 17); + result += (int)bzhi_result; + + unsigned int pdep_result = _pdep_u32(bmi1_src, 0x10101010); + result += pdep_result; + + // LZCNT test + unsigned int lzcnt_result = _lzcnt_u32(bmi1_src); + result += lzcnt_result; + + // F16C tests + __m128 f16c_src = _mm_set1_ps((float)seed); + __m128i f16c_half = _mm_cvtps_ph(f16c_src, 0); + __m128 f16c_restored = _mm_cvtph_ps(f16c_half); + result += (int)_mm_cvtss_f32(f16c_restored); + + return result; +} diff --git a/meson_cpu/x86/test_x86_v4.c b/meson_cpu/x86/test_x86_v4.c new file mode 100644 index 000000000000..d49c3a78e3b3 --- /dev/null +++ b/meson_cpu/x86/test_x86_v4.c @@ -0,0 +1,88 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512F__) || !defined(__AVX512CD__) || !defined(__AVX512VL__) || \ + !defined(__AVX512BW__) || !defined(__AVX512DQ__) + #error HOST/ARCH does not support x86_v4 + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + // to prevent optimization + int seed = (int)argv[argc-1][0]; + volatile int result = 0; + + // AVX512F tests (Foundation) + __m512 avx512f_a = _mm512_set1_ps((float)seed); + __m512 avx512f_b = _mm512_set1_ps(2.0f); + __m512 avx512f_c = _mm512_add_ps(avx512f_a, avx512f_b); + float avx512f_result = _mm512_cvtss_f32(avx512f_c); + result += (int)avx512f_result; + + // Test AVX512F mask operations + __mmask16 k1 = _mm512_cmpeq_ps_mask(avx512f_a, avx512f_b); + __m512 masked_result = _mm512_mask_add_ps(avx512f_a, k1, avx512f_b, avx512f_c); + result += _mm512_mask2int(k1); + + // AVX512CD tests (Conflict Detection) + __m512i avx512cd_a = _mm512_set1_epi32(seed); + __m512i avx512cd_b = _mm512_conflict_epi32(avx512cd_a); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512cd_b, 0)); + + __m512i avx512cd_lzcnt = _mm512_lzcnt_epi32(avx512cd_a); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512cd_lzcnt, 0)); + + // AVX512VL tests (Vector Length Extensions - 128/256-bit vectors with AVX512 features) + __m256 avx512vl_a = _mm256_set1_ps((float)seed); + __m256 avx512vl_b = _mm256_set1_ps(2.0f); + __mmask8 k2 = _mm256_cmp_ps_mask(avx512vl_a, avx512vl_b, _CMP_EQ_OQ); + __m256 avx512vl_c = _mm256_mask_add_ps(avx512vl_a, k2, avx512vl_a, avx512vl_b); + result += (int)_mm256_cvtss_f32(avx512vl_c); + + __m128 avx512vl_sm_a = _mm_set1_ps((float)seed); + __m128 avx512vl_sm_b = _mm_set1_ps(2.0f); + __mmask8 k3 = _mm_cmp_ps_mask(avx512vl_sm_a, avx512vl_sm_b, _CMP_EQ_OQ); + __m128 avx512vl_sm_c = _mm_mask_add_ps(avx512vl_sm_a, k3, avx512vl_sm_a, avx512vl_sm_b); + result += (int)_mm_cvtss_f32(avx512vl_sm_c); + + // AVX512BW tests (Byte and Word) + __m512i avx512bw_a = _mm512_set1_epi16((short)seed); + __m512i avx512bw_b = _mm512_set1_epi16(2); + __mmask32 k4 = _mm512_cmpeq_epi16_mask(avx512bw_a, avx512bw_b); + __m512i avx512bw_c = _mm512_mask_add_epi16(avx512bw_a, k4, avx512bw_a, avx512bw_b); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512bw_c, 0)); + + // Test byte operations + __m512i avx512bw_bytes_a = _mm512_set1_epi8((char)seed); + __m512i avx512bw_bytes_b = _mm512_set1_epi8(2); + __mmask64 k5 = _mm512_cmpeq_epi8_mask(avx512bw_bytes_a, avx512bw_bytes_b); + result += (k5 & 1); + + // AVX512DQ tests (Doubleword and Quadword) + __m512d avx512dq_a = _mm512_set1_pd((double)seed); + __m512d avx512dq_b = _mm512_set1_pd(2.0); + __mmask8 k6 = _mm512_cmpeq_pd_mask(avx512dq_a, avx512dq_b); + __m512d avx512dq_c = _mm512_mask_add_pd(avx512dq_a, k6, avx512dq_a, avx512dq_b); + double avx512dq_result = _mm512_cvtsd_f64(avx512dq_c); + result += (int)avx512dq_result; + + // Test integer to/from floating point conversion + __m512i avx512dq_back = _mm512_cvtps_epi32(masked_result); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512dq_back, 0)); + + // Test 64-bit integer operations + __m512i avx512dq_i64_a = _mm512_set1_epi64(seed); + __m512i avx512dq_i64_b = _mm512_set1_epi64(2); + __m512i avx512dq_i64_c = _mm512_add_epi64(avx512dq_i64_a, avx512dq_i64_b); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512dq_i64_c, 0)); + + return result; +} diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 61a9d53a42d0..2d239d0f5852 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -89,7 +89,7 @@ cpu_family = host_machine.cpu_family() use_svml = ( host_machine.system() == 'linux' and cpu_family == 'x86_64' and - ('AVX512_SKX' in CPU_DISPATCH_NAMES or 'AVX512_SKX' in CPU_BASELINE_NAMES) and + ('X86_V4' in CPU_DISPATCH_NAMES or 'X86_V4' in CPU_BASELINE_NAMES) and not get_option('disable-svml') ) if use_svml @@ -774,7 +774,7 @@ _umath_tests_mtargets = mod_features.multi_targets( '_umath_tests.dispatch.h', 'src/umath/_umath_tests.dispatch.c', dispatch: [ - AVX2, SSE41, SSE2, + X86_V3, X86_V2, ASIMDHP, ASIMD, NEON, VSX3, VSX2, VSX, VXE, VX, RVV, @@ -819,7 +819,7 @@ foreach gen_mtargets : [ 'argfunc.dispatch.h', src_file.process('src/multiarray/argfunc.dispatch.c.src'), [ - AVX512_SKX, AVX2, XOP, SSE42, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, ASIMD, NEON, VXE, VX @@ -856,12 +856,12 @@ foreach gen_mtargets : [ [ 'x86_simd_argsort.dispatch.h', 'src/npysort/x86_simd_argsort.dispatch.cpp', - use_intel_sort ? [AVX512_SKX, AVX2] : [] + use_intel_sort ? [X86_V4, X86_V3] : [] ], [ 'x86_simd_qsort.dispatch.h', 'src/npysort/x86_simd_qsort.dispatch.cpp', - use_intel_sort ? [AVX512_SKX, AVX2] : [] + use_intel_sort ? [X86_V4, X86_V3] : [] ], [ 'x86_simd_qsort_16bit.dispatch.h', @@ -923,7 +923,7 @@ foreach gen_mtargets : [ 'loops_arithm_fp.dispatch.h', src_file.process('src/umath/loops_arithm_fp.dispatch.c.src'), [ - [AVX2, FMA3], SSE2, + X86_V3, X86_V2, ASIMD, NEON, VSX3, VSX2, VXE, VX, @@ -934,7 +934,7 @@ foreach gen_mtargets : [ 'loops_arithmetic.dispatch.h', src_file.process('src/umath/loops_arithmetic.dispatch.c.src'), [ - AVX512_SKX, AVX512F, AVX2, SSE41, SSE2, + X86_V4, X86_V3, X86_V2, NEON, VSX4, VSX2, VX, @@ -945,7 +945,7 @@ foreach gen_mtargets : [ 'loops_comparison.dispatch.h', src_file.process('src/umath/loops_comparison.dispatch.c.src'), [ - AVX512_SKX, AVX512F, AVX2, SSE42, SSE2, + X86_V4, X86_V3, X86_V2, VSX3, VSX2, NEON, VXE, VX, @@ -956,14 +956,14 @@ foreach gen_mtargets : [ 'loops_exponent_log.dispatch.h', src_file.process('src/umath/loops_exponent_log.dispatch.c.src'), [ - AVX512_SKX, AVX512F, [AVX2, FMA3] + X86_V4, X86_V3, ] ], [ 'loops_hyperbolic.dispatch.h', src_file.process('src/umath/loops_hyperbolic.dispatch.cpp.src'), [ - AVX512_SKX, [AVX2, FMA3], + X86_V4, X86_V3, VSX4, VSX2, NEON_VFPV4, VXE, @@ -975,7 +975,7 @@ foreach gen_mtargets : [ 'src/umath/loops_logical.dispatch.cpp', [ ASIMD, NEON, - AVX512_SKX, AVX2, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, VX, LSX, @@ -987,7 +987,7 @@ foreach gen_mtargets : [ src_file.process('src/umath/loops_minmax.dispatch.c.src'), [ ASIMD, NEON, - AVX512_SKX, AVX2, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, VXE, VX, LSX, @@ -1004,7 +1004,7 @@ foreach gen_mtargets : [ 'loops_trigonometric.dispatch.h', 'src/umath/loops_trigonometric.dispatch.cpp', [ - AVX512_SKX, [AVX2, FMA3], + X86_V4, X86_V3, VSX4, VSX3, VSX2, NEON_VFPV4, VXE2, VXE, @@ -1014,14 +1014,14 @@ foreach gen_mtargets : [ [ 'loops_umath_fp.dispatch.h', src_file.process('src/umath/loops_umath_fp.dispatch.c.src'), - [AVX512_SKX] + [X86_V4] ], [ 'loops_unary.dispatch.h', src_file.process('src/umath/loops_unary.dispatch.c.src'), [ ASIMD, NEON, - AVX512_SKX, AVX2, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, VXE, VX, LSX, @@ -1031,7 +1031,7 @@ foreach gen_mtargets : [ 'loops_unary_fp.dispatch.h', src_file.process('src/umath/loops_unary_fp.dispatch.c.src'), [ - SSE41, SSE2, + X86_V2, VSX2, ASIMD, NEON, VXE, VX, @@ -1042,7 +1042,7 @@ foreach gen_mtargets : [ 'loops_unary_fp_le.dispatch.h', src_file.process('src/umath/loops_unary_fp_le.dispatch.c.src'), [ - SSE41, SSE2, + X86_V2, VSX2, ASIMD, NEON, LSX, @@ -1052,7 +1052,7 @@ foreach gen_mtargets : [ 'loops_unary_complex.dispatch.h', src_file.process('src/umath/loops_unary_complex.dispatch.c.src'), [ - AVX512F, [AVX2, FMA3], SSE2, + X86_V3, X86_V2, ASIMD, NEON, VSX3, VSX2, VXE, VX, @@ -1063,7 +1063,7 @@ foreach gen_mtargets : [ 'loops_autovec.dispatch.h', src_file.process('src/umath/loops_autovec.dispatch.c.src'), [ - AVX2, SSE2, + X86_V3, X86_V2, NEON, VSX2, VX, @@ -1074,7 +1074,7 @@ foreach gen_mtargets : [ [ 'loops_half.dispatch.h', src_file.process('src/umath/loops_half.dispatch.c.src'), - [AVX512_SPR, AVX512_SKX] + [AVX512_SPR, X86_V4] ], ] mtargets = mod_features.multi_targets( diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index e9239a257181..5c53d85ad2ea 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -80,12 +80,23 @@ static struct { {NPY_CPU_FEATURE_SSE41, "SSE41"}, {NPY_CPU_FEATURE_POPCNT, "POPCNT"}, {NPY_CPU_FEATURE_SSE42, "SSE42"}, + {NPY_CPU_FEATURE_X86_V2, "X86_V2"}, {NPY_CPU_FEATURE_AVX, "AVX"}, {NPY_CPU_FEATURE_F16C, "F16C"}, {NPY_CPU_FEATURE_XOP, "XOP"}, {NPY_CPU_FEATURE_FMA4, "FMA4"}, {NPY_CPU_FEATURE_FMA3, "FMA3"}, {NPY_CPU_FEATURE_AVX2, "AVX2"}, + {NPY_CPU_FEATURE_LAHF, "LAHF"}, + {NPY_CPU_FEATURE_CX16, "CX16"}, + {NPY_CPU_FEATURE_MOVBE, "MOVBE"}, + {NPY_CPU_FEATURE_BMI, "BMI"}, + {NPY_CPU_FEATURE_BMI2, "BMI2"}, + {NPY_CPU_FEATURE_LZCNT, "LZCNT"}, + {NPY_CPU_FEATURE_GFNI, "GFNI"}, + {NPY_CPU_FEATURE_VPCLMULQDQ, "VPCLMULQDQ"}, + {NPY_CPU_FEATURE_VAES, "VAES"}, + {NPY_CPU_FEATURE_X86_V3, "X86_V3"}, {NPY_CPU_FEATURE_AVX512F, "AVX512F"}, {NPY_CPU_FEATURE_AVX512CD, "AVX512CD"}, {NPY_CPU_FEATURE_AVX512ER, "AVX512ER"}, @@ -105,6 +116,7 @@ static struct { {NPY_CPU_FEATURE_AVX512_KNL, "AVX512_KNL"}, {NPY_CPU_FEATURE_AVX512_KNM, "AVX512_KNM"}, {NPY_CPU_FEATURE_AVX512_SKX, "AVX512_SKX"}, + {NPY_CPU_FEATURE_X86_V4, "X86_V4"}, {NPY_CPU_FEATURE_AVX512_CLX, "AVX512_CLX"}, {NPY_CPU_FEATURE_AVX512_CNL, "AVX512_CNL"}, {NPY_CPU_FEATURE_AVX512_ICL, "AVX512_ICL"}, @@ -441,7 +453,13 @@ npy__cpu_init_features(void) #ifdef NPY_CPU_AMD64 npy__cpu_have[NPY_CPU_FEATURE_SSE3] = 1; #endif - return; + // For unsupported compilers, we default to NPY_CPU_X86_V2 availability + // as this is the minimum baseline required to bypass initial capability checks. + // However, we deliberately don't set any additional CPU feature flags, + // allowing us to detect this fallback behavior later via the Python + // __cpu_features__ dictionary. + npy__cpu_have[NPY_CPU_FEATURE_X86_V2] = 1; + return; } npy__cpu_cpuid(reg, 1); @@ -453,36 +471,42 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_SSE41] = (reg[2] & (1 << 19)) != 0; npy__cpu_have[NPY_CPU_FEATURE_POPCNT] = (reg[2] & (1 << 23)) != 0; npy__cpu_have[NPY_CPU_FEATURE_SSE42] = (reg[2] & (1 << 20)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_CX16] = (reg[2] & (1 << 13)) != 0; npy__cpu_have[NPY_CPU_FEATURE_F16C] = (reg[2] & (1 << 29)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_MOVBE] = (reg[2] & (1 << 22)) != 0; - // check OSXSAVE - if ((reg[2] & (1 << 27)) == 0) - return; - // check AVX OS support - int xcr = npy__cpu_getxcr0(); - if ((xcr & 6) != 6) - return; - npy__cpu_have[NPY_CPU_FEATURE_AVX] = (reg[2] & (1 << 28)) != 0; - if (!npy__cpu_have[NPY_CPU_FEATURE_AVX]) - return; + int osxsave = (reg[2] & (1 << 27)) != 0; + int xcr = 0; + if (osxsave) { + xcr = npy__cpu_getxcr0(); + } + int avx_os = (xcr & 6) == 6; + npy__cpu_have[NPY_CPU_FEATURE_AVX] = (reg[2] & (1 << 28)) != 0 && avx_os; npy__cpu_have[NPY_CPU_FEATURE_FMA3] = (reg[2] & (1 << 12)) != 0; // second call to the cpuid to get extended AMD feature bits npy__cpu_cpuid(reg, 0x80000001); - npy__cpu_have[NPY_CPU_FEATURE_XOP] = (reg[2] & (1 << 11)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_FMA4] = (reg[2] & (1 << 16)) != 0; +#ifdef NPY_CPU_AMD64 + // long mode only + npy__cpu_have[NPY_CPU_FEATURE_LAHF] = (reg[2] & (1 << 0)) != 0; +#else + // alawys available + npy__cpu_have[NPY_CPU_FEATURE_LAHF] = 1; +#endif + npy__cpu_have[NPY_CPU_FEATURE_LZCNT] = (reg[2] & (1 << 5)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_POPCNT] |= npy__cpu_have[NPY_CPU_FEATURE_LZCNT]; + npy__cpu_have[NPY_CPU_FEATURE_XOP] = (reg[2] & (1 << 11)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_AVX]; + npy__cpu_have[NPY_CPU_FEATURE_FMA4] = (reg[2] & (1 << 16)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_AVX]; // third call to the cpuid to get extended AVX2 & AVX512 feature bits npy__cpu_cpuid(reg, 7); - npy__cpu_have[NPY_CPU_FEATURE_AVX2] = (reg[1] & (1 << 5)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX2] = npy__cpu_have[NPY_CPU_FEATURE_AVX2] && - npy__cpu_have[NPY_CPU_FEATURE_FMA3]; - if (!npy__cpu_have[NPY_CPU_FEATURE_AVX2]) - return; - // detect AVX2 & FMA3 - npy__cpu_have[NPY_CPU_FEATURE_FMA] = npy__cpu_have[NPY_CPU_FEATURE_FMA3]; + npy__cpu_have[NPY_CPU_FEATURE_AVX2] = (reg[1] & (1 << 5)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_AVX]; + npy__cpu_have[NPY_CPU_FEATURE_BMI] = (reg[1] & (1 << 3)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_BMI2] = (reg[1] & (1 << 8)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_BMI]; + npy__cpu_have[NPY_CPU_FEATURE_GFNI] = (reg[2] & (1 << 8)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_VAES] = (reg[2] & (1 << 9)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_VPCLMULQDQ] = (reg[2] & (1 << 10)) != 0; - // check AVX512 OS support int avx512_os = (xcr & 0xe6) == 0xe6; #if defined(__APPLE__) && defined(__x86_64__) /** @@ -494,7 +518,7 @@ npy__cpu_init_features(void) * - https://github.com/golang/go/issues/43089 * - https://github.com/numpy/numpy/issues/19319 */ - if (!avx512_os) { + if (!avx512_os && avx_os) { npy_uintp commpage64_addr = 0x00007fffffe00000ULL; npy_uint16 commpage64_ver = *((npy_uint16*)(commpage64_addr + 0x01E)); // cpu_capabilities64 undefined in versions < 13 @@ -504,58 +528,97 @@ npy__cpu_init_features(void) } } #endif - if (!avx512_os) { - return; - } - npy__cpu_have[NPY_CPU_FEATURE_AVX512F] = (reg[1] & (1 << 16)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] = (reg[1] & (1 << 28)) != 0; - if (npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && npy__cpu_have[NPY_CPU_FEATURE_AVX512CD]) { + npy__cpu_have[NPY_CPU_FEATURE_AVX512F] = (reg[1] & (1 << 16)) != 0 && avx512_os; + if (npy__cpu_have[NPY_CPU_FEATURE_AVX512F]) { + npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] = (reg[1] & (1 << 28)) != 0; // Knights Landing npy__cpu_have[NPY_CPU_FEATURE_AVX512PF] = (reg[1] & (1 << 26)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] = (reg[1] & (1 << 27)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512PF]; // Knights Mill npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ] = (reg[2] & (1 << 14)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] = (reg[3] & (1 << 2)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] = (reg[3] & (1 << 3)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNM] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] && - npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ]; - // Skylake-X npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] = (reg[1] & (1 << 17)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] = (reg[1] & (1 << 30)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512VL] = (reg[1] & (1 << 31)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] = npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VL]; // Cascade Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI] = (reg[2] & (1 << 11)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI]; - // Cannon Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] = (reg[1] & (1 << 21)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI] = (reg[2] & (1 << 1)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI]; // Ice Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] = (reg[2] & (1 << 6)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] = (reg[2] & (1 << 12)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ]; // Sapphire Rapids - npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] = (reg[3] & (1 << 23)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_SPR] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16]; - + npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] = (reg[3] & (1 << 23)) != 0; } + + // Groups + npy__cpu_have[NPY_CPU_FEATURE_X86_V2] = npy__cpu_have[NPY_CPU_FEATURE_SSE] && + npy__cpu_have[NPY_CPU_FEATURE_SSE2] && + npy__cpu_have[NPY_CPU_FEATURE_SSE3] && + npy__cpu_have[NPY_CPU_FEATURE_SSSE3] && + npy__cpu_have[NPY_CPU_FEATURE_SSE41] && + npy__cpu_have[NPY_CPU_FEATURE_SSE42] && + npy__cpu_have[NPY_CPU_FEATURE_POPCNT] && + #ifdef NPY_CPU_AMD64 + npy__cpu_have[NPY_CPU_FEATURE_CX16] && + #endif + npy__cpu_have[NPY_CPU_FEATURE_LAHF]; + + npy__cpu_have[NPY_CPU_FEATURE_X86_V3] = npy__cpu_have[NPY_CPU_FEATURE_X86_V2] && + npy__cpu_have[NPY_CPU_FEATURE_AVX] && + npy__cpu_have[NPY_CPU_FEATURE_AVX2] && + npy__cpu_have[NPY_CPU_FEATURE_F16C] && + npy__cpu_have[NPY_CPU_FEATURE_FMA3] && + npy__cpu_have[NPY_CPU_FEATURE_BMI] && + npy__cpu_have[NPY_CPU_FEATURE_BMI2] && + npy__cpu_have[NPY_CPU_FEATURE_LZCNT] && + npy__cpu_have[NPY_CPU_FEATURE_MOVBE]; + + + npy__cpu_have[NPY_CPU_FEATURE_X86_V4] = npy__cpu_have[NPY_CPU_FEATURE_X86_V3] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VL]; + + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] = npy__cpu_have[NPY_CPU_FEATURE_X86_V4] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ] && + npy__cpu_have[NPY_CPU_FEATURE_GFNI] && + npy__cpu_have[NPY_CPU_FEATURE_VAES] && + npy__cpu_have[NPY_CPU_FEATURE_VPCLMULQDQ]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_SPR] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16]; + + + // Legacy groups + npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512PF]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNM] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] && + npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] && + npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] = npy__cpu_have[NPY_CPU_FEATURE_X86_V4] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] = npy__cpu_have[NPY_CPU_FEATURE_X86_V4] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI]; + } /***************** POWER ******************/ diff --git a/numpy/_core/src/common/npy_cpu_features.h b/numpy/_core/src/common/npy_cpu_features.h index 7d6a406f8789..fa3c3c809015 100644 --- a/numpy/_core/src/common/npy_cpu_features.h +++ b/numpy/_core/src/common/npy_cpu_features.h @@ -26,8 +26,15 @@ enum npy_cpu_features NPY_CPU_FEATURE_FMA4 = 12, NPY_CPU_FEATURE_FMA3 = 13, NPY_CPU_FEATURE_AVX2 = 14, - NPY_CPU_FEATURE_FMA = 15, // AVX2 & FMA3, provides backward compatibility - + NPY_CPU_FEATURE_LAHF = 15, + NPY_CPU_FEATURE_CX16 = 16, + NPY_CPU_FEATURE_MOVBE = 17, + NPY_CPU_FEATURE_BMI = 18, + NPY_CPU_FEATURE_BMI2 = 19, + NPY_CPU_FEATURE_LZCNT = 20, + NPY_CPU_FEATURE_GFNI = 21, + NPY_CPU_FEATURE_VAES = 22, + NPY_CPU_FEATURE_VPCLMULQDQ = 23, NPY_CPU_FEATURE_AVX512F = 30, NPY_CPU_FEATURE_AVX512CD = 31, NPY_CPU_FEATURE_AVX512ER = 32, @@ -45,6 +52,7 @@ enum npy_cpu_features NPY_CPU_FEATURE_AVX512BITALG = 44, NPY_CPU_FEATURE_AVX512FP16 = 45, + // X86 CPU Groups // Knights Landing (F,CD,ER,PF) NPY_CPU_FEATURE_AVX512_KNL = 101, @@ -56,10 +64,17 @@ enum npy_cpu_features NPY_CPU_FEATURE_AVX512_CLX = 104, // Cannon Lake (F,CD,BW,DQ,VL,IFMA,VBMI) NPY_CPU_FEATURE_AVX512_CNL = 105, - // Ice Lake (F,CD,BW,DQ,VL,IFMA,VBMI,VNNI,VBMI2,BITALG,VPOPCNTDQ) + // Ice Lake (F,CD,BW,DQ,VL,IFMA,VBMI,VNNI,VBMI2,BITALG,VPOPCNTDQ,GFNI,VPCLMULDQ,VAES) NPY_CPU_FEATURE_AVX512_ICL = 106, // Sapphire Rapids (Ice Lake, AVX512FP16) NPY_CPU_FEATURE_AVX512_SPR = 107, + // x86-64-v2 microarchitectures (SSE[1-4.*], POPCNT, LAHF, CX16) + // On 32-bit, cx16 is not available so it is not included + NPY_CPU_FEATURE_X86_V2 = 108, + // x86-64-v3 microarchitectures (X86_V2, AVX, AVX2, FMA3, BMI, BMI2, LZCNT, F16C, MOVBE) + NPY_CPU_FEATURE_X86_V3 = 109, + // x86-64-v4 microarchitectures (X86_V3, AVX512F, AVX512CD, AVX512VL, AVX512BW, AVX512DQ) + NPY_CPU_FEATURE_X86_V4 = NPY_CPU_FEATURE_AVX512_SKX, // IBM/POWER VSX // POWER7 diff --git a/numpy/_core/tests/test_cpu_dispatcher.py b/numpy/_core/tests/test_cpu_dispatcher.py index f665fa6464ef..01fd53a3dbd0 100644 --- a/numpy/_core/tests/test_cpu_dispatcher.py +++ b/numpy/_core/tests/test_cpu_dispatcher.py @@ -12,7 +12,7 @@ def test_dispatcher(): Testing the utilities of the CPU dispatcher """ targets = ( - "SSE2", "SSE41", "AVX2", + "X86_V2", "X86_V3", "VSX", "VSX2", "VSX3", "NEON", "ASIMD", "ASIMDHP", "VX", "VXE", "LSX", "RVV" diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index ecc806e9c0e5..60e60e3f7c9d 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -338,30 +338,31 @@ def test_impossible_feature_enable(self): not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86" ) class Test_X86_Features(AbstractTest): - features = [ - "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42", - "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD", - "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ", - "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA", - "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16", - ] + features = [] + features_groups = { - "AVX512_KNL": ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"], - "AVX512_KNM": ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS", - "AVX5124VNNIW", "AVX512VPOPCNTDQ"], - "AVX512_SKX": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"], - "AVX512_CLX": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"], - "AVX512_CNL": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", - "AVX512VBMI"], - "AVX512_ICL": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", - "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"], - "AVX512_SPR": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", - "AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", - "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", - "AVX512FP16"], + "X86_V2": [ + "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "SSE42", + "POPCNT", "LAHF", "CX16" + ], } + features_groups["X86_V3"] = features_groups["X86_V2"] + [ + "AVX", "AVX2", "FMA3", "BMI", "BMI2", + "LZCNT", "F16C", "MOVBE" + ] + features_groups["X86_V4"] = features_groups["X86_V3"] + [ + "AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL" + ] + features_groups["AVX512_ICL"] = features_groups["X86_V4"] + [ + "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", + "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", + "VAES", "VPCLMULQDQ", "GFNI" + ] + features_groups["AVX512_SPR"] = features_groups["AVX512_ICL"] + ["AVX512FP16"] + features_map = { "SSE3": "PNI", "SSE41": "SSE4_1", "SSE42": "SSE4_2", "FMA3": "FMA", + "BMI": "BMI1", "LZCNT": "ABM", "LAHF": "LAHF_LM", "AVX512VNNI": "AVX512_VNNI", "AVX512BITALG": "AVX512_BITALG", "AVX512VBMI2": "AVX512_VBMI2", "AVX5124FMAPS": "AVX512_4FMAPS", "AVX5124VNNIW": "AVX512_4VNNIW", "AVX512VPOPCNTDQ": "AVX512_VPOPCNTDQ", From 077c00962aeffe83e023ea826f7f479a0e51613f Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 14 May 2025 19:24:33 +0300 Subject: [PATCH 0449/1018] Fix: Clear FP status to prevent invalid left_shift warnings with `x86-64-v2` baseline Prevents "invalid value encountered in left_shift" warnings on clang-cl when testing bit shifts with long types under `x86-64-v2` baseline. --- numpy/_core/src/umath/loops_autovec.dispatch.c.src | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/src/umath/loops_autovec.dispatch.c.src b/numpy/_core/src/umath/loops_autovec.dispatch.c.src index 983fa1b5eb80..7449308a8a2a 100644 --- a/numpy/_core/src/umath/loops_autovec.dispatch.c.src +++ b/numpy/_core/src/umath/loops_autovec.dispatch.c.src @@ -24,6 +24,8 @@ */ #define INT_left_shift_needs_clear_floatstatus #define UINT_left_shift_needs_clear_floatstatus +#define LONG_left_shift_needs_clear_floatstatus +#define ULONG_left_shift_needs_clear_floatstatus /**begin repeat * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, From f2ad1f1f64cd930adc304334c48e244eb3609f5b Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 14 May 2025 19:30:11 +0300 Subject: [PATCH 0450/1018] Set `-mfpmath=sse` on `x86` for gcc/clang numeric consistency Force SSE-based floating-point on 32-bit x86 systems to fix inconsistent results between einsum and other math functions. Prevents test failures with int16 operations by avoiding the x87 FPU's extended precision. --- meson_cpu/x86/meson.build | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index bd26c615624c..609d53a2d7c1 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -4,7 +4,11 @@ cpu_family = host_machine.cpu_family() mod_features = import('features') HWY_SSE4_FLAGS = ['-DHWY_WANT_SSE4', '-DHWY_DISABLE_PCLMUL_AES'] -X86_64_V2_FLAGS = cpu_family == 'x86'? [] : ['-mcx16'] +# Use SSE for floating-point on x86-32 to ensure numeric consistency. +# The x87 FPU's 80-bit internal precision causes unpredictable rounding +# and overflow behavior when converting to smaller types. SSE maintains +# strict 32/64-bit precision throughout all calculations. +X86_64_V2_FLAGS = cpu_family == 'x86'? ['-mfpmath=sse'] : ['-mcx16'] X86_64_V2_NAMES = cpu_family == 'x86'? [] : ['CX16'] X86_V2 = mod_features.new( 'X86_V2', 1, args: ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', From 6a9644fbf06a117e143175a6458861cfbad3b4fe Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Mon, 19 May 2025 12:48:23 +0300 Subject: [PATCH 0451/1018] Address review feedback on CPU features and CI workflows - Disable X86_V4 (AVX-512) for MSVC builds due to Highway incompatibility - Add FIXME comment for future MSVC compatibility investigation - Update SIMD CI workflow to reflect `x86-64-v2` baseline - Remove redundant test configurations - Add missing X86_V4 support to unary complex loops --- .github/workflows/linux_simd.yml | 11 +-------- doc/source/reference/simd/build-options.rst | 8 +----- meson.options | 4 --- meson_cpu/x86/meson.build | 18 +++++++++----- numpy/_core/meson.build | 2 +- numpy/_core/src/common/npy_cpu_features.c | 27 +++++++++++++++++---- numpy/_core/src/common/npy_cpu_features.h | 3 ++- numpy/_core/tests/test_cpu_features.py | 4 +-- 8 files changed, 41 insertions(+), 36 deletions(-) diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index c35093d63a14..8002e9f3c749 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -7,7 +7,7 @@ name: Linux SIMD tests # # - baseline_only: # Focuses on completing as quickly as possible and acts as a filter for other, more resource-intensive jobs. -# Utilizes only the default baseline targets (e.g., SSE3 on X86_64) without enabling any runtime dispatched features. +# Utilizes only the default baseline targets (e.g., X86_V2 on X86_64) without enabling any runtime dispatched features. # # - old_gcc: # Tests the oldest supported GCC version with default CPU/baseline/dispatch settings. @@ -19,10 +19,6 @@ name: Linux SIMD tests # Tests against the host CPU features set as the baseline without enabling any runtime dispatched features. # Intended to assess the entire NumPy codebase against host flags, even for code sections lacking handwritten SIMD intrinsics. # -# - without_avx512/avx2/fma3: -# Uses runtime SIMD dispatching but disables AVX2, FMA3, and AVX512. -# Intended to evaluate 128-bit SIMD extensions without FMA support. -# # - without_avx512: # Uses runtime SIMD dispatching but disables AVX512. # Intended to evaluate 128-bit/256-bit SIMD extensions. @@ -165,11 +161,6 @@ jobs: "-Dallow-noblas=true -Dcpu-dispatch=max-x86_v4", "3.11" ] - - [ - "without avx512/avx2/fma3", - "-Dallow-noblas=true -Dcpu-dispatch=max-x86_v3", - "3.11" - ] env: MESON_ARGS: ${{ matrix.BUILD_PROP[1] }} diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 4bb925b05532..6cf27510103c 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -261,7 +261,7 @@ Enables all features supported by the host CPU. Detects the features enabled by the compiler. This option is appended by default to ``cpu-baseline`` if ``-march``, ``-mcpu``, ``-xhost``, or ``/QxHost`` is set in -the environment variable ``CFLAGS``. +the environment variable ``CFLAGS`` unless ``cpu-baseline-detect`` is ``disabled``. ``MIN`` ~~~~~~~ @@ -303,13 +303,7 @@ Remove or add specific features, useful with ``MAX``, ``MIN``, and ``NATIVE``: - Adding a feature (``+``) includes all implied features - Removing a feature (``-``) excludes all successor features that imply the removed feature -<<<<<<< HEAD -- ``cpu-baseline`` will be treated as "native" if compiler native flag - ``-march=native`` or ``-xHost`` or ``/QxHost`` is enabled through environment variable - ``CFLAGS`` and ``cpu-baseline-detect`` is not ``disabled``:: -======= Examples:: ->>>>>>> f68e178f88 (ENH: Modulate dispatched x86 CPU features) python -m build --wheel -Csetup-args=-Dcpu-dispatch="max-X86_V4" python -m build --wheel -Csetup-args=-Dcpu-baseline="min+X86_V4" diff --git a/meson.options b/meson.options index 236f44af6d6c..e7011a3b2f2e 100644 --- a/meson.options +++ b/meson.options @@ -28,13 +28,9 @@ option('disable-optimization', type: 'boolean', value: false, description: 'Disable CPU optimized code (dispatch,simd,unroll...)') option('cpu-baseline', type: 'string', value: 'min', description: 'Minimal set of required CPU features') -<<<<<<< HEAD option('cpu-baseline-detect', type: 'feature', value: 'auto', description: 'Detect CPU baseline from the compiler flags') -option('cpu-dispatch', type: 'string', value: 'max -xop -fma4', -======= option('cpu-dispatch', type: 'string', value: 'max', ->>>>>>> f68e178f88 (ENH: Modulate dispatched x86 CPU features) description: 'Dispatched set of additional CPU features') option('test-simd', type: 'array', value: [ diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 609d53a2d7c1..8035883215e9 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -47,8 +47,8 @@ AVX512_ICL = mod_features.new( ) AVX512_SPR = mod_features.new( 'AVX512_SPR', 35, implies: AVX512_ICL, - args: ['-mavx512fp16'], - group: ['AVX512FP16'], + args: ['-mavx512fp16', '-mavx512bf16'], + group: ['AVX512FP16', 'AVX512BF16'], detect: 'AVX512_SPR', test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_spr.c')[0] ) @@ -77,13 +77,19 @@ if compiler_id == 'intel' endif if compiler_id == 'msvc' + cc_ver = cc.version() MSVC_SSE4 = cpu_family == 'x86' ? ['/arch:SSE2'] : [] - MSVC_SSE4 = cc.version().version_compare('>=19.40') ? ['/arch:SSE4.2'] : MSVC_SSE4 - X86_V2.update(args: MSVC_SSE4 + HWY_SSE4_FLAGS + ['/fp:contract']) + MSVC_SSE4 = cc_ver.version_compare('>=19.40') ? ['/arch:SSE4.2'] : MSVC_SSE4 + MSVC_SSE4 = cc_ver.version_compare('>=19.30') ? MSVC_SSE4 + ['/fp:contract'] : MSVC_SSE4 + X86_V2.update(args: MSVC_SSE4) clear_arch = '/arch:.*' X86_V3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) - X86_V4.update(args: {'val': '/arch:AVX512', 'match': clear_arch}) - AVX512_ICL.update(args: '') + # FIXME: After completing transition from universal intrinsics to Highway, + # investigate which MSVC versions are incompatible with Highway's AVX-512 implementation. + X86_V4.update(disable: 'Considered broken by Highway on MSVC') + # To force enable AVX-512, use: + # X86_V4.update(args: [{'val': '/arch:AVX512', 'match': clear_arch}, '-DHWY_BROKEN_MSVC=0']) + AVX512_ICL.update(disable: 'unsupported by Highway on MSVC') endif # legacy CPU features diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 2d239d0f5852..e6871140492d 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1052,7 +1052,7 @@ foreach gen_mtargets : [ 'loops_unary_complex.dispatch.h', src_file.process('src/umath/loops_unary_complex.dispatch.c.src'), [ - X86_V3, X86_V2, + X86_V4, X86_V3, X86_V2, ASIMD, NEON, VSX3, VSX2, VXE, VX, diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 5c53d85ad2ea..46ac4a9d8362 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -113,6 +113,7 @@ static struct { {NPY_CPU_FEATURE_AVX512VBMI2, "AVX512VBMI2"}, {NPY_CPU_FEATURE_AVX512BITALG, "AVX512BITALG"}, {NPY_CPU_FEATURE_AVX512FP16 , "AVX512FP16"}, + {NPY_CPU_FEATURE_AVX512BF16 , "AVX512BF16"}, {NPY_CPU_FEATURE_AVX512_KNL, "AVX512_KNL"}, {NPY_CPU_FEATURE_AVX512_KNM, "AVX512_KNM"}, {NPY_CPU_FEATURE_AVX512_SKX, "AVX512_SKX"}, @@ -410,12 +411,18 @@ npy__cpu_getxcr0(void) } static void -npy__cpu_cpuid(int reg[4], int func_id) +npy__cpu_cpuid_count(int reg[4], int func_id, int count) { #if defined(_MSC_VER) - __cpuidex(reg, func_id, 0); + __cpuidex(reg, func_id, count); #elif defined(__INTEL_COMPILER) __cpuid(reg, func_id); + // classic Intel compilers do not support count + if (count != 0) { + for (int i = 0; i < 4; i++) { + reg[i] = 0; + } + } #elif defined(__GNUC__) || defined(__clang__) #if defined(NPY_CPU_X86) && defined(__PIC__) // %ebx may be the PIC register @@ -424,13 +431,13 @@ npy__cpu_cpuid(int reg[4], int func_id) "xchg{l}\t{%%}ebx, %1\n\t" : "=a" (reg[0]), "=r" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) - : "a" (func_id), "c" (0) + : "a" (func_id), "c" (count) ); #else __asm__("cpuid\n\t" : "=a" (reg[0]), "=b" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) - : "a" (func_id), "c" (0) + : "a" (func_id), "c" (count) ); #endif #else @@ -438,6 +445,12 @@ npy__cpu_cpuid(int reg[4], int func_id) #endif } +static void +npy__cpu_cpuid(int reg[4], int func_id) +{ + return npy__cpu_cpuid_count(reg, func_id, 0); +} + static void npy__cpu_init_features(void) { @@ -552,6 +565,8 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] = (reg[2] & (1 << 12)) != 0; // Sapphire Rapids npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] = (reg[3] & (1 << 23)) != 0; + npy__cpu_cpuid_count(reg, 7, 1); + npy__cpu_have[NPY_CPU_FEATURE_AVX512BF16] = (reg[0] & (1 << 5)) != 0; } // Groups @@ -598,7 +613,9 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_VPCLMULQDQ]; npy__cpu_have[NPY_CPU_FEATURE_AVX512_SPR] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16]; + npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512BF16]; + // Legacy groups diff --git a/numpy/_core/src/common/npy_cpu_features.h b/numpy/_core/src/common/npy_cpu_features.h index fa3c3c809015..de05a17afdb8 100644 --- a/numpy/_core/src/common/npy_cpu_features.h +++ b/numpy/_core/src/common/npy_cpu_features.h @@ -51,6 +51,7 @@ enum npy_cpu_features NPY_CPU_FEATURE_AVX512VBMI2 = 43, NPY_CPU_FEATURE_AVX512BITALG = 44, NPY_CPU_FEATURE_AVX512FP16 = 45, + NPY_CPU_FEATURE_AVX512BF16 = 46, // X86 CPU Groups @@ -66,7 +67,7 @@ enum npy_cpu_features NPY_CPU_FEATURE_AVX512_CNL = 105, // Ice Lake (F,CD,BW,DQ,VL,IFMA,VBMI,VNNI,VBMI2,BITALG,VPOPCNTDQ,GFNI,VPCLMULDQ,VAES) NPY_CPU_FEATURE_AVX512_ICL = 106, - // Sapphire Rapids (Ice Lake, AVX512FP16) + // Sapphire Rapids (Ice Lake, AVX512FP16, AVX512BF16) NPY_CPU_FEATURE_AVX512_SPR = 107, // x86-64-v2 microarchitectures (SSE[1-4.*], POPCNT, LAHF, CX16) // On 32-bit, cx16 is not available so it is not included diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index 60e60e3f7c9d..ebf5b49fc357 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -358,7 +358,7 @@ class Test_X86_Features(AbstractTest): "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", "VAES", "VPCLMULQDQ", "GFNI" ] - features_groups["AVX512_SPR"] = features_groups["AVX512_ICL"] + ["AVX512FP16"] + features_groups["AVX512_SPR"] = features_groups["AVX512_ICL"] + ["AVX512FP16", "AVX512BF16"] features_map = { "SSE3": "PNI", "SSE41": "SSE4_1", "SSE42": "SSE4_2", "FMA3": "FMA", @@ -366,7 +366,7 @@ class Test_X86_Features(AbstractTest): "AVX512VNNI": "AVX512_VNNI", "AVX512BITALG": "AVX512_BITALG", "AVX512VBMI2": "AVX512_VBMI2", "AVX5124FMAPS": "AVX512_4FMAPS", "AVX5124VNNIW": "AVX512_4VNNIW", "AVX512VPOPCNTDQ": "AVX512_VPOPCNTDQ", - "AVX512FP16": "AVX512_FP16", + "AVX512FP16": "AVX512_FP16", "AVX512BF16": "AVX512_BF16" } def load_flags(self): From 37966c6e179d6432f79ee69e2d02300bddc8abe5 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 13 Sep 2025 09:51:01 +0300 Subject: [PATCH 0452/1018] fix up msvc --- meson_cpu/x86/meson.build | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 8035883215e9..add073376d98 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -78,10 +78,11 @@ endif if compiler_id == 'msvc' cc_ver = cc.version() - MSVC_SSE4 = cpu_family == 'x86' ? ['/arch:SSE2'] : [] - MSVC_SSE4 = cc_ver.version_compare('>=19.40') ? ['/arch:SSE4.2'] : MSVC_SSE4 + MSVC_SSE4 = cc_ver.version_compare('>=19.40') ? ['/arch:SSE4.2'] : [] + # 32-bit MSVC does not support /arch:SSE4.2 + MSVC_SSE4 = cpu_family == 'x86' ? ['/arch:SSE2'] : MSVC_SSE4 MSVC_SSE4 = cc_ver.version_compare('>=19.30') ? MSVC_SSE4 + ['/fp:contract'] : MSVC_SSE4 - X86_V2.update(args: MSVC_SSE4) + X86_V2.update(args: MSVC_SSE4 + HWY_SSE4_FLAGS) clear_arch = '/arch:.*' X86_V3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) # FIXME: After completing transition from universal intrinsics to Highway, From df9770aa302f025d365fa077a42a55797395820b Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 12 Sep 2025 17:46:31 +0200 Subject: [PATCH 0453/1018] BLD: change file extension for libnpymath on win-arm64 from .a to .lib Closes gh-29577. --- doc/release/upcoming_changes/29750.change.rst | 5 +++++ numpy/meson.build | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 doc/release/upcoming_changes/29750.change.rst diff --git a/doc/release/upcoming_changes/29750.change.rst b/doc/release/upcoming_changes/29750.change.rst new file mode 100644 index 000000000000..5c72ef13db0f --- /dev/null +++ b/doc/release/upcoming_changes/29750.change.rst @@ -0,0 +1,5 @@ +The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a +``.a`` file extension, for compatibility for building with MSVC and +``setuptools``. Please note that using these static libraries is discouraged +and for existing projects using it, it's best to use it with a matching +compiler toolchain, which is ``clang-cl`` on Windows on Arm. diff --git a/numpy/meson.build b/numpy/meson.build index 67e4861d7ad6..45d5a2b52eb8 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -32,7 +32,7 @@ endif # than a `.a` file extension in order not to break including them in a # distutils-based build (see gh-23981 and # https://mesonbuild.com/FAQ.html#why-does-building-my-project-with-msvc-output-static-libraries-called-libfooa) -if is_windows and cc.get_id() == 'msvc' +if is_windows and cc.get_id() in ['msvc', 'clang-cl'] name_prefix_staticlib = '' name_suffix_staticlib = 'lib' else From 8cfbad023a871ebc18b1871fa331dd750474730e Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Sat, 13 Sep 2025 16:12:11 +0100 Subject: [PATCH 0454/1018] TYP: Add missing defaults to stubs (#29634) --- numpy/_array_api_info.pyi | 24 +++--- numpy/ctypeslib/_ctypeslib.pyi | 32 ++++---- numpy/fft/_pocketfft.pyi | 112 +++++++++++++------------- numpy/linalg/_linalg.pyi | 132 ++++++++++++++++--------------- numpy/matrixlib/defmatrix.pyi | 6 +- numpy/polynomial/_polybase.pyi | 74 ++++++++--------- numpy/polynomial/chebyshev.pyi | 4 +- numpy/polynomial/polyutils.pyi | 56 ++++++------- numpy/testing/_private/utils.pyi | 8 +- 9 files changed, 228 insertions(+), 220 deletions(-) diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index b4592ba2c2ee..069ef478de92 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -125,7 +125,7 @@ class __array_namespace_info__: def default_dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, ) -> _DefaultDTypes: ... def devices(self) -> list[_Device]: ... @@ -133,49 +133,49 @@ class __array_namespace_info__: def dtypes( self, *, - device: _DeviceLike = ..., - kind: None = ..., + device: _DeviceLike = None, + kind: None = None, ) -> _DTypes: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindBool], ) -> _DTypesBool: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindInt], ) -> _DTypesInt: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindUInt], ) -> _DTypesUInt: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindFloat], ) -> _DTypesFloat: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindComplex], ) -> _DTypesComplex: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: ( _Permute1[_KindInteger] | _Permute2[_KindInt, _KindUInt] @@ -185,7 +185,7 @@ class __array_namespace_info__: def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: ( _Permute1[_KindNumber] | _Permute3[_KindInteger, _KindFloat, _KindComplex] @@ -195,13 +195,13 @@ class __array_namespace_info__: def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: tuple[()], ) -> _EmptyDict: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: tuple[_Kind, ...], ) -> _DTypesUnion: ... diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index e10e9fb51ab7..ebe9dbf91f04 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -96,40 +96,40 @@ c_intp = _c_intp @overload def ndpointer( - dtype: None = ..., - ndim: int = ..., - shape: _ShapeLike | None = ..., - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., + dtype: None = None, + ndim: int | None = None, + shape: _ShapeLike | None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_ndptr[None]]: ... @overload def ndpointer( dtype: _DTypeLike[_ScalarT], - ndim: int = ..., + ndim: int | None = None, *, shape: _ShapeLike, - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_concrete_ndptr[dtype[_ScalarT]]]: ... @overload def ndpointer( dtype: DTypeLike, - ndim: int = ..., + ndim: int | None = None, *, shape: _ShapeLike, - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_concrete_ndptr[dtype]]: ... @overload def ndpointer( dtype: _DTypeLike[_ScalarT], - ndim: int = ..., - shape: None = ..., - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., + ndim: int | None = None, + shape: None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_ndptr[dtype[_ScalarT]]]: ... @overload def ndpointer( dtype: DTypeLike, - ndim: int = ..., - shape: None = ..., - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., + ndim: int | None = None, + shape: None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_ndptr[dtype]]: ... @overload @@ -170,9 +170,9 @@ def as_ctypes_type(dtype: str) -> type[Any]: ... @overload def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... @overload -def as_array(obj: _ArrayLike[_ScalarT], shape: _ShapeLike | None = ...) -> NDArray[_ScalarT]: ... +def as_array(obj: _ArrayLike[_ScalarT], shape: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... @overload -def as_array(obj: object, shape: _ShapeLike | None = ...) -> NDArray[Any]: ... +def as_array(obj: object, shape: _ShapeLike | None = None) -> NDArray[Any]: ... @overload def as_ctypes(obj: np.bool) -> ctypes.c_bool: ... diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 4f5e5c944b4c..81064bb174fe 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -25,113 +25,113 @@ _NormKind: TypeAlias = L["backward", "ortho", "forward"] | None def fft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[float64] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... # Input array must be compatible with `np.conjugate` def hfft( a: _ArrayLikeNumber_co, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[float64] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... def ihfft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def fftn( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifftn( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfftn( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfftn( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[float64] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... def fft2( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = ..., # = (-2, -1) + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifft2( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = ..., # = (-2, -1) + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfft2( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = ..., # = (-2, -1) + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfft2( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[float64] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = ..., # = (-2, -1) + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 51844817e2dc..a63492013ade 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -113,19 +113,19 @@ class SVDResult(NamedTuple): def tensorsolve( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, - axes: Iterable[int] | None = ..., + axes: Iterable[int] | None = None, ) -> NDArray[float64]: ... @overload def tensorsolve( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - axes: Iterable[int] | None = ..., + axes: Iterable[int] | None = None, ) -> NDArray[floating]: ... @overload def tensorsolve( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, - axes: Iterable[int] | None = ..., + axes: Iterable[int] | None = None, ) -> NDArray[complexfloating]: ... @overload @@ -147,17 +147,17 @@ def solve( @overload def tensorinv( a: _ArrayLikeInt_co, - ind: int = ..., + ind: int = 2, ) -> NDArray[float64]: ... @overload def tensorinv( a: _ArrayLikeFloat_co, - ind: int = ..., + ind: int = 2, ) -> NDArray[floating]: ... @overload def tensorinv( a: _ArrayLikeComplex_co, - ind: int = ..., + ind: int = 2, ) -> NDArray[complexfloating]: ... @overload @@ -211,11 +211,11 @@ def outer( ) -> _ArrayT: ... @overload -def qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> QRResult: ... +def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload -def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> QRResult: ... +def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload -def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> QRResult: ... +def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... @@ -225,9 +225,9 @@ def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating] | NDArray[complexfloatin def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload -def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]: ... +def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[float64]: ... @overload -def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating]: ... +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[floating]: ... @overload def eig(a: _ArrayLikeInt_co) -> EigResult: ... @@ -239,69 +239,69 @@ def eig(a: _ArrayLikeComplex_co) -> EigResult: ... @overload def eigh( a: _ArrayLikeInt_co, - UPLO: L["L", "U", "l", "u"] = ..., + UPLO: L["L", "U", "l", "u"] = "L", ) -> EighResult: ... @overload def eigh( a: _ArrayLikeFloat_co, - UPLO: L["L", "U", "l", "u"] = ..., + UPLO: L["L", "U", "l", "u"] = "L", ) -> EighResult: ... @overload def eigh( a: _ArrayLikeComplex_co, - UPLO: L["L", "U", "l", "u"] = ..., + UPLO: L["L", "U", "l", "u"] = "L", ) -> EighResult: ... @overload def svd( a: _ArrayLikeInt_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., + full_matrices: bool = True, + compute_uv: L[True] = True, + hermitian: bool = False, ) -> SVDResult: ... @overload def svd( a: _ArrayLikeFloat_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., + full_matrices: bool = True, + compute_uv: L[True] = True, + hermitian: bool = False, ) -> SVDResult: ... @overload def svd( a: _ArrayLikeComplex_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., + full_matrices: bool = True, + compute_uv: L[True] = True, + hermitian: bool = False, ) -> SVDResult: ... @overload def svd( a: _ArrayLikeInt_co, - full_matrices: bool = ..., + full_matrices: bool = True, *, compute_uv: L[False], - hermitian: bool = ..., + hermitian: bool = False, ) -> NDArray[float64]: ... @overload def svd( a: _ArrayLikeInt_co, full_matrices: bool, compute_uv: L[False], - hermitian: bool = ..., + hermitian: bool = False, ) -> NDArray[float64]: ... @overload def svd( a: _ArrayLikeComplex_co, - full_matrices: bool = ..., + full_matrices: bool = True, *, compute_uv: L[False], - hermitian: bool = ..., + hermitian: bool = False, ) -> NDArray[floating]: ... @overload def svd( a: _ArrayLikeComplex_co, full_matrices: bool, compute_uv: L[False], - hermitian: bool = ..., + hermitian: bool = False, ) -> NDArray[floating]: ... def svdvals( @@ -310,34 +310,34 @@ def svdvals( # TODO: Returns a scalar for 2D arrays and # a `(x.ndim - 2)`` dimensionl array otherwise -def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = ...) -> Any: ... +def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = None) -> Any: ... # TODO: Returns `int` for <2D arrays and `intp` otherwise def matrix_rank( A: _ArrayLikeComplex_co, - tol: _ArrayLikeFloat_co | None = ..., - hermitian: bool = ..., + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, *, - rtol: _ArrayLikeFloat_co | None = ..., + rtol: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def pinv( a: _ArrayLikeInt_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., + rcond: _ArrayLikeFloat_co = None, + hermitian: bool = False, ) -> NDArray[float64]: ... @overload def pinv( a: _ArrayLikeFloat_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., + rcond: _ArrayLikeFloat_co = None, + hermitian: bool = False, ) -> NDArray[floating]: ... @overload def pinv( a: _ArrayLikeComplex_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., + rcond: _ArrayLikeFloat_co = None, + hermitian: bool = False, ) -> NDArray[complexfloating]: ... # TODO: Returns a 2-tuple of scalars for 2D arrays and @@ -349,21 +349,21 @@ def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... def det(a: _ArrayLikeComplex_co) -> Any: ... @overload -def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = ...) -> tuple[ +def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = None) -> tuple[ NDArray[float64], NDArray[float64], int32, NDArray[float64], ]: ... @overload -def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = ...) -> tuple[ +def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None) -> tuple[ NDArray[floating], NDArray[floating], int32, NDArray[floating], ]: ... @overload -def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = ...) -> tuple[ +def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None) -> tuple[ NDArray[complexfloating], NDArray[floating], int32, @@ -373,16 +373,24 @@ def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None @overload def norm( x: ArrayLike, - ord: float | L["fro", "nuc"] | None = ..., - axis: None = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None = None, + axis: None = None, + keepdims: L[False] = False, ) -> floating: ... @overload def norm( x: ArrayLike, - ord: float | L["fro", "nuc"] | None = ..., - axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None, + axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, + keepdims: bool = False, +) -> Any: ... +@overload +def norm( + x: ArrayLike, + ord: float | L["fro", "nuc"] | None = None, + *, + axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, + keepdims: bool = False, ) -> Any: ... @overload @@ -390,16 +398,16 @@ def matrix_norm( x: ArrayLike, /, *, - ord: float | L["fro", "nuc"] | None = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None = "fro", + keepdims: L[False] = False, ) -> floating: ... @overload def matrix_norm( x: ArrayLike, /, *, - ord: float | L["fro", "nuc"] | None = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None = "fro", + keepdims: bool = False, ) -> Any: ... @overload @@ -407,40 +415,40 @@ def vector_norm( x: ArrayLike, /, *, - axis: None = ..., - ord: float | None = ..., - keepdims: bool = ..., + axis: None = None, + ord: float | None = 2, + keepdims: L[False] = False, ) -> floating: ... @overload def vector_norm( x: ArrayLike, /, *, - axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., - ord: float | None = ..., - keepdims: bool = ..., + axis: SupportsInt | SupportsIndex | tuple[int, ...], + ord: float | None = 2, + keepdims: bool = False, ) -> Any: ... # TODO: Returns a scalar or array def multi_dot( arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], *, - out: NDArray[Any] | None = ..., + out: NDArray[Any] | None = None, ) -> Any: ... def diagonal( x: ArrayLike, # >= 2D array /, *, - offset: SupportsIndex = ..., + offset: SupportsIndex = 0, ) -> NDArray[Any]: ... def trace( x: ArrayLike, # >= 2D array /, *, - offset: SupportsIndex = ..., - dtype: DTypeLike | None = ..., + offset: SupportsIndex = 0, + dtype: DTypeLike | None = None, ) -> Any: ... @overload diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index ee8f83746998..05e4c77303c0 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -8,10 +8,10 @@ __all__ = ["asmatrix", "bmat", "matrix"] def bmat( obj: str | Sequence[ArrayLike] | NDArray[Any], - ldict: Mapping[str, Any] | None = ..., - gdict: Mapping[str, Any] | None = ..., + ldict: Mapping[str, Any] | None = None, + gdict: Mapping[str, Any] | None = None, ) -> matrix[tuple[int, int], Any]: ... def asmatrix( - data: ArrayLike, dtype: DTypeLike = ... + data: ArrayLike, dtype: DTypeLike = None ) -> matrix[tuple[int, int], Any]: ... diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 30c906fa3b4b..e82e441e4c64 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -148,7 +148,7 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): def copy(self, /) -> Self: ... def degree(self, /) -> int: ... def cutdeg(self, /) -> Self: ... - def trim(self, /, tol: _FloatLike_co = ...) -> Self: ... + def trim(self, /, tol: _FloatLike_co = 0) -> Self: ... def truncate(self, /, size: _AnyInt) -> Self: ... @overload @@ -157,24 +157,24 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): /, domain: _SeriesLikeCoef_co | None, kind: type[_Other], - window: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = None, ) -> _Other: ... @overload def convert( self, /, - domain: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, *, kind: type[_Other], - window: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = None, ) -> _Other: ... @overload def convert( self, /, - domain: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, kind: None = None, - window: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = None, ) -> Self: ... def mapparms(self, /) -> _Tuple2[Any]: ... @@ -182,20 +182,20 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): def integ( self, /, - m: SupportsIndex = ..., - k: _CoefLike_co | _SeriesLikeCoef_co = ..., - lbnd: _CoefLike_co | None = ..., + m: SupportsIndex = 1, + k: _CoefLike_co | _SeriesLikeCoef_co = ..., # = [] + lbnd: _CoefLike_co | None = None, ) -> Self: ... - def deriv(self, /, m: SupportsIndex = ...) -> Self: ... + def deriv(self, /, m: SupportsIndex = 1) -> Self: ... def roots(self, /) -> _CoefSeries: ... def linspace( self, /, - n: SupportsIndex = ..., - domain: _SeriesLikeCoef_co | None = ..., + n: SupportsIndex = 100, + domain: _SeriesLikeCoef_co | None = None, ) -> _Tuple2[_Series[np.float64 | np.complex128]]: ... @overload @@ -205,12 +205,12 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: int | _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None = ..., - rcond: _FloatLike_co = ..., - full: Literal[False] = ..., - w: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = None, + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> Self: ... @overload @classmethod @@ -219,13 +219,13 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: int | _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None = ..., - rcond: _FloatLike_co = ..., + domain: _SeriesLikeCoef_co | None = None, + rcond: _FloatLike_co | None = None, *, full: Literal[True], - w: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... @overload @classmethod @@ -237,43 +237,43 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): domain: _SeriesLikeCoef_co | None, rcond: _FloatLike_co, full: Literal[True], /, - w: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... @classmethod def fromroots( cls, roots: _ArrayLikeCoef_co, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = ..., # = [] + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> Self: ... @classmethod def identity( cls, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> Self: ... @classmethod def basis( cls, deg: _AnyInt, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> Self: ... @classmethod def cast( cls, series: ABCPolyBase, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, ) -> Self: ... @classmethod diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 85c92816a261..30db7dfb9e6b 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -150,7 +150,7 @@ class Chebyshev(ABCPolyBase[L["T"]]): cls, func: Callable[[npt.NDArray[np.float64]], _CoefSeries], deg: _IntLike_co, - domain: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, args: tuple[()] = ..., ) -> Self: ... @overload @@ -162,7 +162,7 @@ class Chebyshev(ABCPolyBase[L["T"]]): _CoefSeries, ], deg: _IntLike_co, - domain: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, *, args: Iterable[Any], ) -> Self: ... diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 65ae4e5503b2..8938c3cc8259 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -59,47 +59,47 @@ _AnyVanderF: TypeAlias = Callable[ @overload def as_series( alist: npt.NDArray[np.integer] | _FloatArray, - trim: bool = ..., + trim: bool = True, ) -> list[_FloatSeries]: ... @overload def as_series( alist: _ComplexArray, - trim: bool = ..., + trim: bool = True, ) -> list[_ComplexSeries]: ... @overload def as_series( alist: _ObjectArray, - trim: bool = ..., + trim: bool = True, ) -> list[_ObjectSeries]: ... @overload def as_series( # type: ignore[overload-overlap] alist: Iterable[_FloatArray | npt.NDArray[np.integer]], - trim: bool = ..., + trim: bool = True, ) -> list[_FloatSeries]: ... @overload def as_series( alist: Iterable[_ComplexArray], - trim: bool = ..., + trim: bool = True, ) -> list[_ComplexSeries]: ... @overload def as_series( alist: Iterable[_ObjectArray], - trim: bool = ..., + trim: bool = True, ) -> list[_ObjectSeries]: ... @overload def as_series( # type: ignore[overload-overlap] alist: Iterable[_SeriesLikeFloat_co | float], - trim: bool = ..., + trim: bool = True, ) -> list[_FloatSeries]: ... @overload def as_series( alist: Iterable[_SeriesLikeComplex_co | complex], - trim: bool = ..., + trim: bool = True, ) -> list[_ComplexSeries]: ... @overload def as_series( alist: Iterable[_SeriesLikeCoef_co | object], - trim: bool = ..., + trim: bool = True, ) -> list[_ObjectSeries]: ... _T_seq = TypeVar("_T_seq", bound=_CoefArray | Sequence[_CoefLike_co]) @@ -108,32 +108,32 @@ def trimseq(seq: _T_seq) -> _T_seq: ... @overload def trimcoef( # type: ignore[overload-overlap] c: npt.NDArray[np.integer] | _FloatArray, - tol: _FloatLike_co = ..., + tol: _FloatLike_co = 0, ) -> _FloatSeries: ... @overload def trimcoef( c: _ComplexArray, - tol: _FloatLike_co = ..., + tol: _FloatLike_co = 0, ) -> _ComplexSeries: ... @overload def trimcoef( c: _ObjectArray, - tol: _FloatLike_co = ..., + tol: _FloatLike_co = 0, ) -> _ObjectSeries: ... @overload def trimcoef( # type: ignore[overload-overlap] c: _SeriesLikeFloat_co | float, - tol: _FloatLike_co = ..., + tol: _FloatLike_co = 0, ) -> _FloatSeries: ... @overload def trimcoef( c: _SeriesLikeComplex_co | complex, - tol: _FloatLike_co = ..., + tol: _FloatLike_co = 0, ) -> _ComplexSeries: ... @overload def trimcoef( c: _SeriesLikeCoef_co | object, - tol: _FloatLike_co = ..., + tol: _FloatLike_co = 0, ) -> _ObjectSeries: ... @overload @@ -361,9 +361,9 @@ def _fit( # type: ignore[overload-overlap] y: _ArrayLikeFloat_co, deg: _SeriesLikeInt_co, domain: _SeriesLikeFloat_co | None = ..., - rcond: _FloatLike_co | None = ..., - full: Literal[False] = ..., - w: _SeriesLikeFloat_co | None = ..., + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = None, ) -> _FloatArray: ... @overload def _fit( @@ -372,9 +372,9 @@ def _fit( y: _ArrayLikeComplex_co, deg: _SeriesLikeInt_co, domain: _SeriesLikeComplex_co | None = ..., - rcond: _FloatLike_co | None = ..., - full: Literal[False] = ..., - w: _SeriesLikeComplex_co | None = ..., + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeComplex_co | None = None, ) -> _ComplexArray: ... @overload def _fit( @@ -383,9 +383,9 @@ def _fit( y: _ArrayLikeCoef_co, deg: _SeriesLikeInt_co, domain: _SeriesLikeCoef_co | None = ..., - rcond: _FloatLike_co | None = ..., - full: Literal[False] = ..., - w: _SeriesLikeCoef_co | None = ..., + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeCoef_co | None = None, ) -> _CoefArray: ... @overload def _fit( @@ -397,7 +397,7 @@ def _fit( rcond: _FloatLike_co | None, full: Literal[True], /, - w: _SeriesLikeCoef_co | None = ..., + w: _SeriesLikeCoef_co | None = None, ) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... @overload def _fit( @@ -406,11 +406,11 @@ def _fit( y: _SeriesLikeCoef_co, deg: _SeriesLikeInt_co, domain: _SeriesLikeCoef_co | None = ..., - rcond: _FloatLike_co | None = ..., + rcond: _FloatLike_co | None = None, *, full: Literal[True], - w: _SeriesLikeCoef_co | None = ..., + w: _SeriesLikeCoef_co | None = None, ) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... def _as_int(x: SupportsIndex, desc: str) -> int: ... -def format_float(x: _FloatLike_co, parens: bool = ...) -> str: ... +def format_float(x: _FloatLike_co, parens: bool = False) -> str: ... diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 76455df87eb0..a9b773cf2247 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -178,10 +178,10 @@ else: def build_err_msg( arrays: Iterable[object], err_msg: object, - header: str = ..., - verbose: bool = ..., - names: Sequence[str] = ..., - precision: SupportsIndex | None = ..., + header: str = "Items are not equal:", + verbose: bool = True, + names: Sequence[str] = ..., # = ('ACTUAL', 'DESIRED') + precision: SupportsIndex | None = 8, ) -> str: ... # From beb8c9cf993faddc58811eda47c141e110aecde2 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Sat, 13 Sep 2025 14:16:21 -0400 Subject: [PATCH 0455/1018] TST: Replace test_smoke xunit setup with methods (#29671) --- numpy/random/tests/test_smoke.py | 537 +++++++++++++++++-------------- 1 file changed, 300 insertions(+), 237 deletions(-) diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py index 6f07443f79a9..5353a72a1174 100644 --- a/numpy/random/tests/test_smoke.py +++ b/numpy/random/tests/test_smoke.py @@ -1,4 +1,5 @@ import pickle +from dataclasses import dataclass from functools import partial import pytest @@ -7,12 +8,8 @@ from numpy.random import MT19937, PCG64, PCG64DXSM, SFC64, Generator, Philox from numpy.testing import assert_, assert_array_equal, assert_equal - -@pytest.fixture(scope='module', - params=(np.bool, np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64)) -def dtype(request): - return request.param +DTYPES_BOOL_INT_UINT = (np.bool, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64) def params_0(f): @@ -92,403 +89,459 @@ def warmup(rg, n=None): rg.random(n, dtype=np.float32) +@dataclass +class RNGData: + bit_generator: type[np.random.BitGenerator] + advance: int + seed: list[int] + rg: Generator + seed_vector_bits: int + + class RNG: @classmethod - def setup_class(cls): + def _create_rng(cls): # Overridden in test classes. Place holder to silence IDE noise - cls.bit_generator = PCG64 - cls.advance = None - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() - - @classmethod - def _extra_setup(cls): - cls.vec_1d = np.arange(2.0, 102.0) - cls.vec_2d = np.arange(2.0, 102.0)[None, :] - cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) - cls.seed_error = TypeError - - def _reset_state(self): - self.rg.bit_generator.state = self.initial_state + bit_generator = PCG64 + advance = None + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_init(self): - rg = Generator(self.bit_generator()) - state = rg.bit_generator.state - rg.standard_normal(1) - rg.standard_normal(1) - rg.bit_generator.state = state - new_state = rg.bit_generator.state + data = self._create_rng() + data.rg = Generator(data.bit_generator()) + state = data.rg.bit_generator.state + data.rg.standard_normal(1) + data.rg.standard_normal(1) + data.rg.bit_generator.state = state + new_state = data.rg.bit_generator.state assert_(comp_state(state, new_state)) def test_advance(self): - state = self.rg.bit_generator.state - if hasattr(self.rg.bit_generator, 'advance'): - self.rg.bit_generator.advance(self.advance) - assert_(not comp_state(state, self.rg.bit_generator.state)) + data = self._create_rng() + state = data.rg.bit_generator.state + if hasattr(data.rg.bit_generator, 'advance'): + data.rg.bit_generator.advance(data.advance) + assert_(not comp_state(state, data.rg.bit_generator.state)) else: - bitgen_name = self.rg.bit_generator.__class__.__name__ + bitgen_name = data.rg.bit_generator.__class__.__name__ pytest.skip(f'Advance is not supported by {bitgen_name}') def test_jump(self): - state = self.rg.bit_generator.state - if hasattr(self.rg.bit_generator, 'jumped'): - bit_gen2 = self.rg.bit_generator.jumped() + rg = self._create_rng().rg + state = rg.bit_generator.state + if hasattr(rg.bit_generator, 'jumped'): + bit_gen2 = rg.bit_generator.jumped() jumped_state = bit_gen2.state assert_(not comp_state(state, jumped_state)) - self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) - self.rg.bit_generator.state = state - bit_gen3 = self.rg.bit_generator.jumped() + rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) + rg.bit_generator.state = state + bit_gen3 = rg.bit_generator.jumped() rejumped_state = bit_gen3.state assert_(comp_state(jumped_state, rejumped_state)) else: - bitgen_name = self.rg.bit_generator.__class__.__name__ + bitgen_name = rg.bit_generator.__class__.__name__ if bitgen_name not in ('SFC64',): raise AttributeError(f'no "jumped" in {bitgen_name}') pytest.skip(f'Jump is not supported by {bitgen_name}') def test_uniform(self): - r = self.rg.uniform(-1.0, 0.0, size=10) + rg = self._create_rng().rg + r = rg.uniform(-1.0, 0.0, size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) def test_uniform_array(self): - r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10) + rg = self._create_rng().rg + r = rg.uniform(np.array([-1.0] * 10), 0.0, size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) - r = self.rg.uniform(np.array([-1.0] * 10), + r = rg.uniform(np.array([-1.0] * 10), np.array([0.0] * 10), size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) - r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10) + r = rg.uniform(-1.0, np.array([0.0] * 10), size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) def test_random(self): - assert_(len(self.rg.random(10)) == 10) - params_0(self.rg.random) + rg = self._create_rng().rg + assert_(len(rg.random(10)) == 10) + params_0(rg.random) def test_standard_normal_zig(self): - assert_(len(self.rg.standard_normal(10)) == 10) + rg = self._create_rng().rg + assert_(len(rg.standard_normal(10)) == 10) def test_standard_normal(self): - assert_(len(self.rg.standard_normal(10)) == 10) - params_0(self.rg.standard_normal) + rg = self._create_rng().rg + assert_(len(rg.standard_normal(10)) == 10) + params_0(rg.standard_normal) def test_standard_gamma(self): - assert_(len(self.rg.standard_gamma(10, 10)) == 10) - assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10) - params_1(self.rg.standard_gamma) + rg = self._create_rng().rg + assert_(len(rg.standard_gamma(10, 10)) == 10) + assert_(len(rg.standard_gamma(np.array([10] * 10), 10)) == 10) + params_1(rg.standard_gamma) def test_standard_exponential(self): - assert_(len(self.rg.standard_exponential(10)) == 10) - params_0(self.rg.standard_exponential) + rg = self._create_rng().rg + assert_(len(rg.standard_exponential(10)) == 10) + params_0(rg.standard_exponential) def test_standard_exponential_float(self): - randoms = self.rg.standard_exponential(10, dtype='float32') + rg = self._create_rng().rg + randoms = rg.standard_exponential(10, dtype='float32') assert_(len(randoms) == 10) assert randoms.dtype == np.float32 - params_0(partial(self.rg.standard_exponential, dtype='float32')) + params_0(partial(rg.standard_exponential, dtype='float32')) def test_standard_exponential_float_log(self): - randoms = self.rg.standard_exponential(10, dtype='float32', + rg = self._create_rng().rg + randoms = rg.standard_exponential(10, dtype='float32', method='inv') assert_(len(randoms) == 10) assert randoms.dtype == np.float32 - params_0(partial(self.rg.standard_exponential, dtype='float32', + params_0(partial(rg.standard_exponential, dtype='float32', method='inv')) def test_standard_cauchy(self): - assert_(len(self.rg.standard_cauchy(10)) == 10) - params_0(self.rg.standard_cauchy) + rg = self._create_rng().rg + assert_(len(rg.standard_cauchy(10)) == 10) + params_0(rg.standard_cauchy) def test_standard_t(self): - assert_(len(self.rg.standard_t(10, 10)) == 10) - params_1(self.rg.standard_t) + rg = self._create_rng().rg + assert_(len(rg.standard_t(10, 10)) == 10) + params_1(rg.standard_t) def test_binomial(self): - assert_(self.rg.binomial(10, .5) >= 0) - assert_(self.rg.binomial(1000, .5) >= 0) + rg = self._create_rng().rg + assert_(rg.binomial(10, .5) >= 0) + assert_(rg.binomial(1000, .5) >= 0) def test_reset_state(self): - state = self.rg.bit_generator.state - int_1 = self.rg.integers(2**31) - self.rg.bit_generator.state = state - int_2 = self.rg.integers(2**31) + rg = self._create_rng().rg + state = rg.bit_generator.state + int_1 = rg.integers(2**31) + rg.bit_generator.state = state + int_2 = rg.integers(2**31) assert_(int_1 == int_2) def test_entropy_init(self): - rg = Generator(self.bit_generator()) - rg2 = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) + rg2 = Generator(bit_generator()) assert_(not comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_seed(self): - rg = Generator(self.bit_generator(*self.seed)) - rg2 = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) + rg2 = Generator(data.bit_generator(*data.seed)) rg.random() rg2.random() assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_reset_state_gauss(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.standard_normal() state = rg.bit_generator.state n1 = rg.standard_normal(size=10) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.standard_normal(size=10) assert_array_equal(n1, n2) def test_reset_state_uint32(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.integers(0, 2 ** 24, 120, dtype=np.uint32) state = rg.bit_generator.state n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32) assert_array_equal(n1, n2) def test_reset_state_float(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.random(dtype='float32') state = rg.bit_generator.state n1 = rg.random(size=10, dtype='float32') - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.random(size=10, dtype='float32') assert_((n1 == n2).all()) def test_shuffle(self): + rg = self._create_rng().rg original = np.arange(200, 0, -1) - permuted = self.rg.permutation(original) + permuted = rg.permutation(original) assert_((original != permuted).any()) def test_permutation(self): + rg = self._create_rng().rg original = np.arange(200, 0, -1) - permuted = self.rg.permutation(original) + permuted = rg.permutation(original) assert_((original != permuted).any()) def test_beta(self): - vals = self.rg.beta(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.beta(2.0, 2.0, 10) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), 2.0) + vals = rg.beta(np.array([2.0] * 10), 2.0) assert_(len(vals) == 10) - vals = self.rg.beta(2.0, np.array([2.0] * 10)) + vals = rg.beta(2.0, np.array([2.0] * 10)) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) + vals = rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) + vals = rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) assert_(vals.shape == (10, 10)) def test_bytes(self): - vals = self.rg.bytes(10) + rg = self._create_rng().rg + vals = rg.bytes(10) assert_(len(vals) == 10) def test_chisquare(self): - vals = self.rg.chisquare(2.0, 10) + rg = self._create_rng().rg + vals = rg.chisquare(2.0, 10) assert_(len(vals) == 10) - params_1(self.rg.chisquare) + params_1(rg.chisquare) def test_exponential(self): - vals = self.rg.exponential(2.0, 10) + rg = self._create_rng().rg + vals = rg.exponential(2.0, 10) assert_(len(vals) == 10) - params_1(self.rg.exponential) + params_1(rg.exponential) def test_f(self): - vals = self.rg.f(3, 1000, 10) + rg = self._create_rng().rg + vals = rg.f(3, 1000, 10) assert_(len(vals) == 10) def test_gamma(self): - vals = self.rg.gamma(3, 2, 10) + rg = self._create_rng().rg + vals = rg.gamma(3, 2, 10) assert_(len(vals) == 10) def test_geometric(self): - vals = self.rg.geometric(0.5, 10) + rg = self._create_rng().rg + vals = rg.geometric(0.5, 10) assert_(len(vals) == 10) - params_1(self.rg.exponential, bounded=True) + params_1(rg.exponential, bounded=True) def test_gumbel(self): - vals = self.rg.gumbel(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.gumbel(2.0, 2.0, 10) assert_(len(vals) == 10) def test_laplace(self): - vals = self.rg.laplace(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.laplace(2.0, 2.0, 10) assert_(len(vals) == 10) def test_logitic(self): - vals = self.rg.logistic(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.logistic(2.0, 2.0, 10) assert_(len(vals) == 10) def test_logseries(self): - vals = self.rg.logseries(0.5, 10) + rg = self._create_rng().rg + vals = rg.logseries(0.5, 10) assert_(len(vals) == 10) def test_negative_binomial(self): - vals = self.rg.negative_binomial(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.negative_binomial(10, 0.2, 10) assert_(len(vals) == 10) def test_noncentral_chisquare(self): - vals = self.rg.noncentral_chisquare(10, 2, 10) + rg = self._create_rng().rg + vals = rg.noncentral_chisquare(10, 2, 10) assert_(len(vals) == 10) def test_noncentral_f(self): - vals = self.rg.noncentral_f(3, 1000, 2, 10) + rg = self._create_rng().rg + vals = rg.noncentral_f(3, 1000, 2, 10) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2) + vals = rg.noncentral_f(np.array([3] * 10), 1000, 2) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2) + vals = rg.noncentral_f(3, np.array([1000] * 10), 2) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10)) + vals = rg.noncentral_f(3, 1000, np.array([2] * 10)) assert_(len(vals) == 10) def test_normal(self): - vals = self.rg.normal(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.normal(10, 0.2, 10) assert_(len(vals) == 10) def test_pareto(self): - vals = self.rg.pareto(3.0, 10) + rg = self._create_rng().rg + vals = rg.pareto(3.0, 10) assert_(len(vals) == 10) def test_poisson(self): - vals = self.rg.poisson(10, 10) + rg = self._create_rng().rg + vals = rg.poisson(10, 10) assert_(len(vals) == 10) - vals = self.rg.poisson(np.array([10] * 10)) + vals = rg.poisson(np.array([10] * 10)) assert_(len(vals) == 10) - params_1(self.rg.poisson) + params_1(rg.poisson) def test_power(self): - vals = self.rg.power(0.2, 10) + rg = self._create_rng().rg + vals = rg.power(0.2, 10) assert_(len(vals) == 10) def test_integers(self): - vals = self.rg.integers(10, 20, 10) + rg = self._create_rng().rg + vals = rg.integers(10, 20, 10) assert_(len(vals) == 10) def test_rayleigh(self): - vals = self.rg.rayleigh(0.2, 10) + rg = self._create_rng().rg + vals = rg.rayleigh(0.2, 10) assert_(len(vals) == 10) - params_1(self.rg.rayleigh, bounded=True) + params_1(rg.rayleigh, bounded=True) def test_vonmises(self): - vals = self.rg.vonmises(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.vonmises(10, 0.2, 10) assert_(len(vals) == 10) def test_wald(self): - vals = self.rg.wald(1.0, 1.0, 10) + rg = self._create_rng().rg + vals = rg.wald(1.0, 1.0, 10) assert_(len(vals) == 10) def test_weibull(self): - vals = self.rg.weibull(1.0, 10) + rg = self._create_rng().rg + vals = rg.weibull(1.0, 10) assert_(len(vals) == 10) def test_zipf(self): - vals = self.rg.zipf(10, 10) + rg = self._create_rng().rg + vec_1d = np.arange(2.0, 102.0) + vec_2d = np.arange(2.0, 102.0)[None, :] + mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) + vals = rg.zipf(10, 10) assert_(len(vals) == 10) - vals = self.rg.zipf(self.vec_1d) + vals = rg.zipf(vec_1d) assert_(len(vals) == 100) - vals = self.rg.zipf(self.vec_2d) + vals = rg.zipf(vec_2d) assert_(vals.shape == (1, 100)) - vals = self.rg.zipf(self.mat) + vals = rg.zipf(mat) assert_(vals.shape == (100, 100)) def test_hypergeometric(self): - vals = self.rg.hypergeometric(25, 25, 20) + rg = self._create_rng().rg + vals = rg.hypergeometric(25, 25, 20) assert_(np.isscalar(vals)) - vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20) + vals = rg.hypergeometric(np.array([25] * 10), 25, 20) assert_(vals.shape == (10,)) def test_triangular(self): - vals = self.rg.triangular(-5, 0, 5) + rg = self._create_rng().rg + vals = rg.triangular(-5, 0, 5) assert_(np.isscalar(vals)) - vals = self.rg.triangular(-5, np.array([0] * 10), 5) + vals = rg.triangular(-5, np.array([0] * 10), 5) assert_(vals.shape == (10,)) def test_multivariate_normal(self): + rg = self._create_rng().rg mean = [0, 0] cov = [[1, 0], [0, 100]] # diagonal covariance - x = self.rg.multivariate_normal(mean, cov, 5000) + x = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) - x_zig = self.rg.multivariate_normal(mean, cov, 5000) + x_zig = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) - x_inv = self.rg.multivariate_normal(mean, cov, 5000) + x_inv = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) assert_((x_zig != x_inv).any()) def test_multinomial(self): - vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3]) + rg = self._create_rng().rg + vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3]) assert_(vals.shape == (2,)) - vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) + vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) assert_(vals.shape == (10, 2)) def test_dirichlet(self): - s = self.rg.dirichlet((10, 5, 3), 20) + rg = self._create_rng().rg + s = rg.dirichlet((10, 5, 3), 20) assert_(s.shape == (20, 3)) def test_pickle(self): - pick = pickle.dumps(self.rg) + rg = self._create_rng().rg + pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_(type(self.rg) == type(unpick)) - assert_(comp_state(self.rg.bit_generator.state, + assert_(type(rg) == type(unpick)) + assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) - pick = pickle.dumps(self.rg) + pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_(type(self.rg) == type(unpick)) - assert_(comp_state(self.rg.bit_generator.state, + assert_(type(rg) == type(unpick)) + assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) def test_seed_array(self): - if self.seed_vector_bits is None: - bitgen_name = self.bit_generator.__name__ + data = self._create_rng() + if data.seed_vector_bits is None: + bitgen_name = data.bit_generator.__name__ pytest.skip(f'Vector seeding is not supported by {bitgen_name}') - if self.seed_vector_bits == 32: + if data.seed_vector_bits == 32: dtype = np.uint32 else: dtype = np.uint64 seed = np.array([1], dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(1) + bg = data.bit_generator(1) state2 = bg.state assert_(comp_state(state1, state2)) seed = np.arange(4, dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) seed = np.arange(1500, dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) seed = 2 ** np.mod(np.arange(1500, dtype=dtype), - self.seed_vector_bits - 1) + 1 - bg = self.bit_generator(seed) + data.seed_vector_bits - 1) + 1 + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) def test_uniform_float(self): - rg = Generator(self.bit_generator(12345)) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator(12345)) warmup(rg) state = rg.bit_generator.state r1 = rg.random(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.random(11, dtype=np.float32) @@ -497,11 +550,12 @@ def test_uniform_float(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_gamma_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_gamma(4.0, 11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32) @@ -510,11 +564,12 @@ def test_gamma_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_normal_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_normal(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_normal(11, dtype=np.float32) @@ -523,11 +578,12 @@ def test_normal_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_normal_zig_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_normal(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_normal(11, dtype=np.float32) @@ -536,7 +592,7 @@ def test_normal_zig_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_output_fill(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -558,7 +614,7 @@ def test_output_fill(self): assert_equal(direct, existing) def test_output_filling_uniform(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -576,7 +632,7 @@ def test_output_filling_uniform(self): assert_equal(direct, existing) def test_output_filling_exponential(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -594,7 +650,7 @@ def test_output_filling_exponential(self): assert_equal(direct, existing) def test_output_filling_gamma(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.zeros(size) @@ -612,7 +668,7 @@ def test_output_filling_gamma(self): assert_equal(direct, existing) def test_output_filling_gamma_broadcast(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) mu = np.arange(97.0) + 1.0 @@ -631,7 +687,7 @@ def test_output_filling_gamma_broadcast(self): assert_equal(direct, existing) def test_output_fill_error(self): - rg = self.rg + rg = self._create_rng().rg size = (31, 7, 97) existing = np.empty(size) with pytest.raises(TypeError): @@ -653,7 +709,14 @@ def test_output_fill_error(self): with pytest.raises(ValueError): rg.standard_gamma(1.0, out=existing[::3]) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_broadcast(self, dtype): + rg = self._create_rng().rg + initial_state = rg.bit_generator.state + + def reset_state(rng): + rng.bit_generator.state = initial_state + if dtype == np.bool: upper = 2 lower = 0 @@ -661,45 +724,50 @@ def test_integers_broadcast(self, dtype): info = np.iinfo(dtype) upper = int(info.max) + 1 lower = info.min - self._reset_state() - a = self.rg.integers(lower, [upper] * 10, dtype=dtype) - self._reset_state() - b = self.rg.integers([lower] * 10, upper, dtype=dtype) + reset_state(rg) + rg.bit_generator.state = initial_state + a = rg.integers(lower, [upper] * 10, dtype=dtype) + reset_state(rg) + b = rg.integers([lower] * 10, upper, dtype=dtype) assert_equal(a, b) - self._reset_state() - c = self.rg.integers(lower, upper, size=10, dtype=dtype) + reset_state(rg) + c = rg.integers(lower, upper, size=10, dtype=dtype) assert_equal(a, c) - self._reset_state() - d = self.rg.integers(np.array( + reset_state(rg) + d = rg.integers(np.array( [lower] * 10), np.array([upper], dtype=object), size=10, dtype=dtype) assert_equal(a, d) - self._reset_state() - e = self.rg.integers( + reset_state(rg) + e = rg.integers( np.array([lower] * 10), np.array([upper] * 10), size=10, dtype=dtype) assert_equal(a, e) - self._reset_state() - a = self.rg.integers(0, upper, size=10, dtype=dtype) - self._reset_state() - b = self.rg.integers([upper] * 10, dtype=dtype) + reset_state(rg) + a = rg.integers(0, upper, size=10, dtype=dtype) + reset_state(rg) + b = rg.integers([upper] * 10, dtype=dtype) assert_equal(a, b) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_numpy(self, dtype): + rg = self._create_rng().rg high = np.array([1]) low = np.array([0]) - out = self.rg.integers(low, high, dtype=dtype) + out = rg.integers(low, high, dtype=dtype) assert out.shape == (1,) - out = self.rg.integers(low[0], high, dtype=dtype) + out = rg.integers(low[0], high, dtype=dtype) assert out.shape == (1,) - out = self.rg.integers(low, high[0], dtype=dtype) + out = rg.integers(low, high[0], dtype=dtype) assert out.shape == (1,) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_broadcast_errors(self, dtype): + rg = self._create_rng().rg if dtype == np.bool: upper = 2 lower = 0 @@ -708,102 +776,97 @@ def test_integers_broadcast_errors(self, dtype): upper = int(info.max) + 1 lower = info.min with pytest.raises(ValueError): - self.rg.integers(lower, [upper + 1] * 10, dtype=dtype) + rg.integers(lower, [upper + 1] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers(lower - 1, [upper] * 10, dtype=dtype) + rg.integers(lower - 1, [upper] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers([lower - 1], [upper] * 10, dtype=dtype) + rg.integers([lower - 1], [upper] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers([0], [0], dtype=dtype) + rg.integers([0], [0], dtype=dtype) class TestMT19937(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = MT19937 - cls.advance = None - cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 32 - cls._extra_setup() - cls.seed_error = ValueError + def _create_rng(cls): + bit_generator = MT19937 + advance = None + seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 32 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_numpy_state(self): + rg = self._create_rng().rg nprg = np.random.RandomState() nprg.standard_normal(99) state = nprg.get_state() - self.rg.bit_generator.state = state - state2 = self.rg.bit_generator.state + rg.bit_generator.state = state + state2 = rg.bit_generator.state assert_((state[1] == state2['state']['key']).all()) assert_(state[2] == state2['state']['pos']) class TestPhilox(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = Philox - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = Philox + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestSFC64(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = SFC64 - cls.advance = None - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 192 - cls._extra_setup() + def _create_rng(cls): + bit_generator = SFC64 + advance = None + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 192 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestPCG64(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = PCG64 - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = PCG64 + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestPCG64DXSM(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = PCG64DXSM - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = PCG64DXSM + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestDefaultRNG(RNG): @classmethod - def setup_class(cls): + def _create_rng(cls): # This will duplicate some tests that directly instantiate a fresh # Generator(), but that's okay. - cls.bit_generator = PCG64 - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = np.random.default_rng(*cls.seed) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + bit_generator = PCG64 + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = np.random.default_rng(*seed) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_default_is_pcg64(self): # In order to change the default BitGenerator, we'll go through # a deprecation cycle to move to a different function. - assert_(isinstance(self.rg.bit_generator, PCG64)) + rg = self._create_rng().rg + assert_(isinstance(rg.bit_generator, PCG64)) def test_seed(self): np.random.default_rng() From 6bf63e712010ba059f7a37b9a3d1628361c330ca Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 14 Sep 2025 14:57:57 +0200 Subject: [PATCH 0456/1018] apply suggested edit [skip ci] Co-authored-by: Matti Picus --- doc/release/upcoming_changes/29750.change.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/29750.change.rst b/doc/release/upcoming_changes/29750.change.rst index 5c72ef13db0f..2759c08d8349 100644 --- a/doc/release/upcoming_changes/29750.change.rst +++ b/doc/release/upcoming_changes/29750.change.rst @@ -1,5 +1,5 @@ The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a -``.a`` file extension, for compatibility for building with MSVC and +``.a`` file extension on win-arm64, for compatibility for building with MSVC and ``setuptools``. Please note that using these static libraries is discouraged and for existing projects using it, it's best to use it with a matching compiler toolchain, which is ``clang-cl`` on Windows on Arm. From 9fab0d3a065b44bc358d749b1185b9eed4beb79a Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 14 Sep 2025 18:18:47 +0300 Subject: [PATCH 0457/1018] pin asv<0.6.5 [skip azp][skip cirrus][skip circleci] --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 5a453521f2dd..b33213449561 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -218,7 +218,7 @@ jobs: run: | sudo apt-get update sudo apt-get install libopenblas-dev ninja-build - pip install asv virtualenv packaging -r requirements/build_requirements.txt + pip install "asv<0.6.5" virtualenv packaging -r requirements/build_requirements.txt - name: Install NumPy run: | spin build -- -Dcpu-dispatch=none From 76ec13aff566e94379c34045d7e152e0d8c6b918 Mon Sep 17 00:00:00 2001 From: Vineet Kumar Date: Mon, 15 Sep 2025 00:56:35 +0530 Subject: [PATCH 0458/1018] DOC: Clarify description of diagonal covariance in multivariate_normal function --- numpy/random/_generator.pyx | 3 ++- numpy/random/mtrand.pyx | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index c067a0821563..c1b78ce5b97e 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -3791,7 +3791,8 @@ cdef class Generator: >>> mean = [0, 0] >>> cov = [[1, 0], [0, 100]] # diagonal covariance - Diagonal covariance means that points are oriented along x or y-axis: + Diagonal covariance means that the variables are independent, and the + probability density contours have their axes aligned with the coordinate axes: >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index beaf96c06921..719d1d860d3c 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -4147,7 +4147,8 @@ cdef class RandomState: >>> mean = [0, 0] >>> cov = [[1, 0], [0, 100]] # diagonal covariance - Diagonal covariance means that points are oriented along x or y-axis: + Diagonal covariance means that the variables are independent, and the + probability density contours have their axes aligned with the coordinate axes: >>> import matplotlib.pyplot as plt >>> x, y = np.random.multivariate_normal(mean, cov, 5000).T From dca33b3df733c485fc228e89c9459cec10aebe63 Mon Sep 17 00:00:00 2001 From: Phoenix studio <59125767+phoenixstudiodz@users.noreply.github.com> Date: Mon, 15 Sep 2025 16:10:07 +0100 Subject: [PATCH 0459/1018] DOC: Fix typo in absolute_beginners.rst (#29753) Co-authored-by: phx --- doc/source/user/absolute_beginners.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 3ba12b84052e..5f620fa36cef 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -863,12 +863,13 @@ NumPy also performs aggregation functions. In addition to ``min``, ``max``, and result of multiplying the elements together, ``std`` to get the standard deviation, and more. :: + >>> data = np.array([1, 2, 3]) >>> data.max() - 2.0 + 3 >>> data.min() - 1.0 + 1 >>> data.sum() - 3.0 + 6 .. image:: images/np_aggregation.png From 5f8296630d0102029175164a76fc1eae6da3fd20 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Mon, 15 Sep 2025 11:52:42 -0400 Subject: [PATCH 0460/1018] TST: Fix np.random thread test failures (#29729) * TST: Fix np.random thread failures * TST: Use RandomState to fix thread safety issues. * TST: Remove rfunc from TestRandint * TST: Remove setSeed in TestBroadcast --- numpy/lib/tests/test_function_base.py | 4 +- numpy/random/tests/test_random.py | 681 ++++++++------- numpy/random/tests/test_randomstate.py | 783 +++++++++--------- .../tests/test_randomstate_regression.py | 24 +- numpy/random/tests/test_regression.py | 12 +- 5 files changed, 723 insertions(+), 781 deletions(-) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index ce293ec2f256..6ca7892c6cbe 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1186,8 +1186,8 @@ def test_second_order_accurate(self): assert_(np.all(num_error < 0.03) == True) # test with unevenly spaced - np.random.seed(0) - x = np.sort(np.random.random(10)) + rng = np.random.default_rng(0) + x = np.sort(rng.random(10)) y = 2 * x ** 3 + 4 * x ** 2 + 2 * x analytical = 6 * x ** 2 + 8 * x + 2 num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1) diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index b3e69b41956b..f110aa892b31 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -165,47 +165,49 @@ def test_set_invalid_state(self): class TestRandint: - rfunc = np.random.randint - # valid integer/boolean types itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] def test_unsupported_type(self): - assert_raises(TypeError, self.rfunc, 1, dtype=float) + rng = random.RandomState() + assert_raises(TypeError, rng.randint, 1, dtype=float) def test_bounds_checking(self): + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, rng.randint, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, rng.randint, 1, 0, dtype=dt) def test_rng_zero_and_extremes(self): + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 tgt = ubnd - 1 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = lbnd - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = (lbnd + ubnd) // 2 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) def test_full_range(self): # Test for ticket #1690 + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 try: - self.rfunc(lbnd, ubnd, dtype=dt) + rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " @@ -213,15 +215,15 @@ def test_full_range(self): def test_in_bounds_fuzz(self): # Don't use fixed seed - np.random.seed() + rng = random.RandomState() for dt in self.itype[1:]: for ubnd in [4, 8, 16]: - vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + vals = rng.randint(2, ubnd, size=2**16, dtype=dt) assert_(vals.max() < ubnd) assert_(vals.min() >= 2) - vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + vals = rng.randint(0, 2, size=2**16, dtype=np.bool) assert_(vals.max() < 2) assert_(vals.min() >= 0) @@ -242,20 +244,20 @@ def test_repeatability(self): 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 for dt in self.itype[1:]: - np.random.seed(1234) + rng = random.RandomState(1234) # view as little endian for hash if sys.byteorder == 'little': - val = self.rfunc(0, 6, size=1000, dtype=dt) + val = rng.randint(0, 6, size=1000, dtype=dt) else: - val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + val = rng.randint(0, 6, size=1000, dtype=dt).byteswap() res = hashlib.sha256(val.view(np.int8)).hexdigest() assert_(tgt[np.dtype(dt).name] == res) # bools do not depend on endianness - np.random.seed(1234) - val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + rng = random.RandomState(1234) + val = rng.randint(0, 2, size=1000, dtype=bool).view(np.int8) res = hashlib.sha256(val).hexdigest() assert_(tgt[np.dtype(bool).name] == res) @@ -284,11 +286,12 @@ def test_int64_uint64_corner_case(self): def test_respect_dtype_singleton(self): # See gh-7203 + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_equal(sample.dtype, np.dtype(dt)) for dt in (bool, int): @@ -297,7 +300,7 @@ def test_respect_dtype_singleton(self): ubnd = 2 if dt is bool else np.iinfo("long").max + 1 # gh-7284: Ensure that we get Python data types - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_(not hasattr(sample, 'dtype')) assert_equal(type(sample), dt) @@ -308,33 +311,33 @@ class TestRandomDist: seed = 1234567890 def test_rand(self): - np.random.seed(self.seed) - actual = np.random.rand(3, 2) + rng = random.RandomState(self.seed) + actual = rng.rand(3, 2) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): - np.random.seed(self.seed) - actual = np.random.randn(3, 2) + rng = random.RandomState(self.seed) + actual = rng.randn(3, 2) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randint(self): - np.random.seed(self.seed) - actual = np.random.randint(-99, 99, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.randint(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) def test_random_integers(self): - np.random.seed(self.seed) + rng = random.RandomState(self.seed) with pytest.warns(DeprecationWarning): - actual = np.random.random_integers(-99, 99, size=(3, 2)) + actual = rng.random_integers(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) @@ -368,41 +371,41 @@ def test_random_integers_deprecated(self): np.iinfo('l').max, np.iinfo('l').max) def test_random(self): - np.random.seed(self.seed) - actual = np.random.random((3, 2)) + rng = random.RandomState(self.seed) + actual = rng.random((3, 2)) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_choice_uniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4) desired = np.array([2, 3, 2, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) desired = np.array([1, 1, 2, 2]) assert_array_equal(actual, desired) def test_choice_uniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False) desired = np.array([0, 1, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False, + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) desired = np.array([2, 3, 1]) assert_array_equal(actual, desired) def test_choice_noninteger(self): - np.random.seed(self.seed) - actual = np.random.choice(['a', 'b', 'c', 'd'], 4) + rng = random.RandomState(self.seed) + actual = rng.choice(['a', 'b', 'c', 'd'], 4) desired = np.array(['c', 'd', 'c', 'd']) assert_array_equal(actual, desired) @@ -477,8 +480,8 @@ def test_choice_nan_probabilities(self): assert_raises(ValueError, np.random.choice, a, p=p) def test_bytes(self): - np.random.seed(self.seed) - actual = np.random.bytes(10) + rng = random.RandomState(self.seed) + actual = rng.bytes(10) desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' assert_equal(actual, desired) @@ -501,9 +504,9 @@ def test_shuffle(self): # gh-4270 lambda x: np.asarray([(i, i) for i in x], [("a", object), ("b", np.int32)])]: - np.random.seed(self.seed) + rng = random.RandomState(self.seed) alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - np.random.shuffle(alist) + rng.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) assert_array_equal(actual, desired) @@ -563,11 +566,11 @@ def test_shuffle_memoryview(self): # gh-18273 # allow graceful handling of memoryviews # (treat the same as arrays) - np.random.seed(self.seed) + rng = random.RandomState(self.seed) a = np.arange(5).data - np.random.shuffle(a) + rng.shuffle(a) assert_equal(np.asarray(a), [0, 1, 4, 3, 2]) - rng = np.random.RandomState(self.seed) + rng = random.RandomState(self.seed) rng.shuffle(a) assert_equal(np.asarray(a), [0, 1, 2, 3, 4]) rng = np.random.default_rng(self.seed) @@ -581,8 +584,8 @@ def test_shuffle_not_writeable(self): np.random.shuffle(a) def test_beta(self): - np.random.seed(self.seed) - actual = np.random.beta(.1, .9, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.beta(.1, .9, size=(3, 2)) desired = np.array( [[1.45341850513746058e-02, 5.31297615662868145e-04], [1.85366619058432324e-06, 4.19214516800110563e-03], @@ -590,25 +593,25 @@ def test_beta(self): assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): - np.random.seed(self.seed) - actual = np.random.binomial(100, .456, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.binomial(100, .456, size=(3, 2)) desired = np.array([[37, 43], [42, 48], [46, 45]]) assert_array_equal(actual, desired) def test_chisquare(self): - np.random.seed(self.seed) - actual = np.random.chisquare(50, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.chisquare(50, size=(3, 2)) desired = np.array([[63.87858175501090585, 68.68407748911370447], [65.77116116901505904, 47.09686762438974483], [72.3828403199695174, 74.18408615260374006]]) assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): - np.random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) + actual = rng.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.54539444573611562, 0.45460555426388438], [0.62345816822039413, 0.37654183177960598]], [[0.55206000085785778, 0.44793999914214233], @@ -641,8 +644,8 @@ def test_dirichlet_bad_alpha(self): assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) def test_exponential(self): - np.random.seed(self.seed) - actual = np.random.exponential(1.1234, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.exponential(1.1234, size=(3, 2)) desired = np.array([[1.08342649775011624, 1.00607889924557314], [2.46628830085216721, 2.49668106809923884], [0.68717433461363442, 1.69175666993575979]]) @@ -653,16 +656,16 @@ def test_exponential_0(self): assert_raises(ValueError, np.random.exponential, scale=-0.) def test_f(self): - np.random.seed(self.seed) - actual = np.random.f(12, 77, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.f(12, 77, size=(3, 2)) desired = np.array([[1.21975394418575878, 1.75135759791559775], [1.44803115017146489, 1.22108959480396262], [1.02176975757740629, 1.34431827623300415]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): - np.random.seed(self.seed) - actual = np.random.gamma(5, 3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gamma(5, 3, size=(3, 2)) desired = np.array([[24.60509188649287182, 28.54993563207210627], [26.13476110204064184, 12.56988482927716078], [31.71863275789960568, 33.30143302795922011]]) @@ -673,16 +676,16 @@ def test_gamma_0(self): assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.) def test_geometric(self): - np.random.seed(self.seed) - actual = np.random.geometric(.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.geometric(.123456789, size=(3, 2)) desired = np.array([[8, 7], [17, 17], [5, 12]]) assert_array_equal(actual, desired) def test_gumbel(self): - np.random.seed(self.seed) - actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.19591898743416816, 0.34405539668096674], [-1.4492522252274278, -1.47374816298446865], [1.10651090478803416, -0.69535848626236174]]) @@ -693,34 +696,34 @@ def test_gumbel_0(self): assert_raises(ValueError, np.random.gumbel, scale=-0.) def test_hypergeometric(self): - np.random.seed(self.seed) - actual = np.random.hypergeometric(10, 5, 14, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(10, 5, 14, size=(3, 2)) desired = np.array([[10, 10], [10, 10], [9, 9]]) assert_array_equal(actual, desired) # Test nbad = 0 - actual = np.random.hypergeometric(5, 0, 3, size=4) + actual = rng.hypergeometric(5, 0, 3, size=4) desired = np.array([3, 3, 3, 3]) assert_array_equal(actual, desired) - actual = np.random.hypergeometric(15, 0, 12, size=4) + actual = rng.hypergeometric(15, 0, 12, size=4) desired = np.array([12, 12, 12, 12]) assert_array_equal(actual, desired) # Test ngood = 0 - actual = np.random.hypergeometric(0, 5, 3, size=4) + actual = rng.hypergeometric(0, 5, 3, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) - actual = np.random.hypergeometric(0, 15, 12, size=4) + actual = rng.hypergeometric(0, 15, 12, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) def test_laplace(self): - np.random.seed(self.seed) - actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.66599721112760157, 0.52829452552221945], [3.12791959514407125, 3.18202813572992005], [-0.05391065675859356, 1.74901336242837324]]) @@ -731,16 +734,16 @@ def test_laplace_0(self): assert_raises(ValueError, np.random.laplace, scale=-0.) def test_logistic(self): - np.random.seed(self.seed) - actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[1.09232835305011444, 0.8648196662399954], [4.27818590694950185, 4.33897006346929714], [-0.21682183359214885, 2.63373365386060332]]) assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): - np.random.seed(self.seed) - actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) desired = np.array([[16.50698631688883822, 36.54846706092654784], [22.67886599981281748, 0.71617561058995771], [65.72798501792723869, 86.84341601437161273]]) @@ -751,16 +754,16 @@ def test_lognormal_0(self): assert_raises(ValueError, np.random.lognormal, sigma=-0.) def test_logseries(self): - np.random.seed(self.seed) - actual = np.random.logseries(p=.923456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logseries(p=.923456789, size=(3, 2)) desired = np.array([[2, 2], [6, 17], [3, 6]]) assert_array_equal(actual, desired) def test_multinomial(self): - np.random.seed(self.seed) - actual = np.random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.multinomial(20, [1 / 6.] * 6, size=(3, 2)) desired = np.array([[[4, 3, 5, 4, 2, 2], [5, 2, 8, 2, 2, 1]], [[3, 4, 3, 6, 0, 4], @@ -770,11 +773,11 @@ def test_multinomial(self): assert_array_equal(actual, desired) def test_multivariate_normal(self): - np.random.seed(self.seed) + rng = random.RandomState(self.seed) mean = (.123456789, 10) cov = [[1, 0], [0, 1]] size = (3, 2) - actual = np.random.multivariate_normal(mean, cov, size) + actual = rng.multivariate_normal(mean, cov, size) desired = np.array([[[1.463620246718631, 11.73759122771936], [1.622445133300628, 9.771356667546383]], [[2.154490787682787, 12.170324946056553], @@ -785,7 +788,7 @@ def test_multivariate_normal(self): assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning - actual = np.random.multivariate_normal(mean, cov) + actual = rng.multivariate_normal(mean, cov) desired = np.array([0.895289569463708, 9.17180864067987]) assert_array_almost_equal(actual, desired, decimal=15) @@ -793,53 +796,53 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - pytest.warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, rng.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' - assert_no_warnings(np.random.multivariate_normal, mean, cov, + assert_no_warnings(rng.multivariate_normal, mean, cov, check_valid='ignore') # and that it raises with RuntimeWarning check_valid='raises' - assert_raises(ValueError, np.random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) with warnings.catch_warnings(): warnings.simplefilter('error') - np.random.multivariate_normal(mean, cov) + rng.multivariate_normal(mean, cov) def test_negative_binomial(self): - np.random.seed(self.seed) - actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n=100, p=.12345, size=(3, 2)) desired = np.array([[848, 841], [892, 611], [779, 647]]) assert_array_equal(actual, desired) def test_noncentral_chisquare(self): - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[23.91905354498517511, 13.35324692733826346], [31.22452661329736401, 16.60047399466177254], [5.03461598262724586, 17.94973089023519464]]) assert_array_almost_equal(actual, desired, decimal=14) - actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + actual = rng.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) desired = np.array([[1.47145377828516666, 0.15052899268012659], [0.00943803056963588, 1.02647251615666169], [0.332334982684171, 0.15451287602753125]]) assert_array_almost_equal(actual, desired, decimal=14) - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) desired = np.array([[9.597154162763948, 11.725484450296079], [10.413711048138335, 3.694475922923986], [13.484222138963087, 14.377255424602957]]) assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): - np.random.seed(self.seed) - actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) desired = np.array([[1.40598099674926669, 0.34207973179285761], [3.57715069265772545, 7.92632662577829805], @@ -847,8 +850,8 @@ def test_noncentral_f(self): assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): - np.random.seed(self.seed) - actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.normal(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[2.80378370443726244, 3.59863924443872163], [3.121433477601256, -0.33382987590723379], [4.18552478636557357, 4.46410668111310471]]) @@ -859,8 +862,8 @@ def test_normal_0(self): assert_raises(ValueError, np.random.normal, scale=-0.) def test_pareto(self): - np.random.seed(self.seed) - actual = np.random.pareto(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.pareto(a=.123456789, size=(3, 2)) desired = np.array( [[2.46852460439034849e+03, 1.41286880810518346e+03], [5.28287797029485181e+07, 6.57720981047328785e+07], @@ -874,8 +877,8 @@ def test_pareto(self): np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) def test_poisson(self): - np.random.seed(self.seed) - actual = np.random.poisson(lam=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], [1, 0], [0, 0]]) @@ -890,16 +893,16 @@ def test_poisson_exceptions(self): assert_raises(ValueError, np.random.poisson, [lambig] * 10) def test_power(self): - np.random.seed(self.seed) - actual = np.random.power(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.power(a=.123456789, size=(3, 2)) desired = np.array([[0.02048932883240791, 0.01424192241128213], [0.38446073748535298, 0.39499689943484395], [0.00177699707563439, 0.13115505880863756]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): - np.random.seed(self.seed) - actual = np.random.rayleigh(scale=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale=10, size=(3, 2)) desired = np.array([[13.8882496494248393, 13.383318339044731], [20.95413364294492098, 21.08285015800712614], [11.06066537006854311, 17.35468505778271009]]) @@ -910,24 +913,24 @@ def test_rayleigh_0(self): assert_raises(ValueError, np.random.rayleigh, scale=-0.) def test_standard_cauchy(self): - np.random.seed(self.seed) - actual = np.random.standard_cauchy(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_cauchy(size=(3, 2)) desired = np.array([[0.77127660196445336, -6.55601161955910605], [0.93582023391158309, -2.07479293013759447], [-4.74601644297011926, 0.18338989290760804]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): - np.random.seed(self.seed) - actual = np.random.standard_exponential(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_exponential(size=(3, 2)) desired = np.array([[0.96441739162374596, 0.89556604882105506], [2.1953785836319808, 2.22243285392490542], [0.6116915921431676, 1.50592546727413201]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): - np.random.seed(self.seed) - actual = np.random.standard_gamma(shape=3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[5.50841531318455058, 6.62953470301903103], [5.93988484943779227, 2.31044849402133989], [7.54838614231317084, 8.012756093271868]]) @@ -938,24 +941,24 @@ def test_standard_gamma_0(self): assert_raises(ValueError, np.random.standard_gamma, shape=-0.) def test_standard_normal(self): - np.random.seed(self.seed) - actual = np.random.standard_normal(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_normal(size=(3, 2)) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): - np.random.seed(self.seed) - actual = np.random.standard_t(df=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df=10, size=(3, 2)) desired = np.array([[0.97140611862659965, -0.08830486548450577], [1.36311143689505321, -0.55317463909867071], [-0.18473749069684214, 0.61181537341755321]]) assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): - np.random.seed(self.seed) - actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, + rng = random.RandomState(self.seed) + actual = rng.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) desired = np.array([[12.68117178949215784, 12.4129206149193152], [16.20131377335158263, 16.25692138747600524], @@ -963,8 +966,8 @@ def test_triangular(self): assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): - np.random.seed(self.seed) - actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.uniform(low=1.23, high=10.54, size=(3, 2)) desired = np.array([[6.99097932346268003, 6.73801597444323974], [9.50364421400426274, 9.53130618907631089], [5.48995325769805476, 8.47493103280052118]]) @@ -1011,8 +1014,8 @@ def __int__(self): assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1) def test_vonmises(self): - np.random.seed(self.seed) - actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) desired = np.array([[2.28567572673902042, 2.89163838442285037], [0.38198375564286025, 2.57638023113890746], [1.19153771588353052, 1.83509849681825354]]) @@ -1025,16 +1028,16 @@ def test_vonmises_small(self): np.testing.assert_(np.isfinite(r).all()) def test_wald(self): - np.random.seed(self.seed) - actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.wald(mean=1.23, scale=1.54, size=(3, 2)) desired = np.array([[3.82935265715889983, 5.13125249184285526], [0.35045403618358717, 1.50832396872003538], [0.24124319895843183, 0.22031101461955038]]) assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): - np.random.seed(self.seed) - actual = np.random.weibull(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.weibull(a=1.23, size=(3, 2)) desired = np.array([[0.97097342648766727, 0.91422896443565516], [1.89517770034962929, 1.91414357960479564], [0.67057783752390987, 1.39494046635066793]]) @@ -1046,8 +1049,8 @@ def test_weibull_0(self): assert_raises(ValueError, np.random.weibull, a=-0.) def test_zipf(self): - np.random.seed(self.seed) - actual = np.random.zipf(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.zipf(a=1.23, size=(3, 2)) desired = np.array([[66, 29], [1, 1], [3, 13]]) @@ -1059,138 +1062,128 @@ class TestBroadcast: # correctly when presented with non-scalar arguments seed = 123456789 - def setSeed(self): - np.random.seed(self.seed) - # TODO: Include test for randint once it can broadcast # Can steal the test written in PR #6938 def test_uniform(self): low = [0] high = [1] - uniform = np.random.uniform desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.setSeed() - actual = uniform(low * 3, high) + rng = random.RandomState(self.seed) + actual = rng.uniform(low * 3, high) assert_array_almost_equal(actual, desired, decimal=14) - self.setSeed() - actual = uniform(low, high * 3) + rng = random.RandomState(self.seed) + actual = rng.uniform(low, high * 3) assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): loc = [0] scale = [1] bad_scale = [-1] - normal = np.random.normal desired = np.array([2.2129019979039612, 2.1283977976520019, 1.8417114045748335]) - self.setSeed() - actual = normal(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.normal(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc * 3, bad_scale) + assert_raises(ValueError, rng.normal, loc * 3, bad_scale) - self.setSeed() - actual = normal(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.normal(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc, bad_scale * 3) + assert_raises(ValueError, rng.normal, loc, bad_scale * 3) def test_beta(self): a = [1] b = [2] bad_a = [-1] bad_b = [-2] - beta = np.random.beta desired = np.array([0.19843558305989056, 0.075230336409423643, 0.24976865978980844]) - self.setSeed() - actual = beta(a * 3, b) + rng = random.RandomState(self.seed) + actual = rng.beta(a * 3, b) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a * 3, b) - assert_raises(ValueError, beta, a * 3, bad_b) + assert_raises(ValueError, rng.beta, bad_a * 3, b) + assert_raises(ValueError, rng.beta, a * 3, bad_b) - self.setSeed() - actual = beta(a, b * 3) + rng = random.RandomState(self.seed) + actual = rng.beta(a, b * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a, b * 3) - assert_raises(ValueError, beta, a, bad_b * 3) + assert_raises(ValueError, rng.beta, bad_a, b * 3) + assert_raises(ValueError, rng.beta, a, bad_b * 3) def test_exponential(self): scale = [1] bad_scale = [-1] - exponential = np.random.exponential desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = exponential(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.exponential(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, exponential, bad_scale * 3) + assert_raises(ValueError, rng.exponential, bad_scale * 3) def test_standard_gamma(self): shape = [1] bad_shape = [-1] - std_gamma = np.random.standard_gamma desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = std_gamma(shape * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, std_gamma, bad_shape * 3) + assert_raises(ValueError, rng.standard_gamma, bad_shape * 3) def test_gamma(self): shape = [1] scale = [2] bad_shape = [-1] bad_scale = [-2] - gamma = np.random.gamma desired = np.array([1.5221370731769048, 1.5277256455738331, 1.4248762625178359]) - self.setSeed() - actual = gamma(shape * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape * 3, scale) - assert_raises(ValueError, gamma, shape * 3, bad_scale) + assert_raises(ValueError, rng.gamma, bad_shape * 3, scale) + assert_raises(ValueError, rng.gamma, shape * 3, bad_scale) - self.setSeed() - actual = gamma(shape, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape, scale * 3) - assert_raises(ValueError, gamma, shape, bad_scale * 3) + assert_raises(ValueError, rng.gamma, bad_shape, scale * 3) + assert_raises(ValueError, rng.gamma, shape, bad_scale * 3) def test_f(self): dfnum = [1] dfden = [2] bad_dfnum = [-1] bad_dfden = [-2] - f = np.random.f desired = np.array([0.80038951638264799, 0.86768719635363512, 2.7251095168386801]) - self.setSeed() - actual = f(dfnum * 3, dfden) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum * 3, dfden) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum * 3, dfden) - assert_raises(ValueError, f, dfnum * 3, bad_dfden) + assert_raises(ValueError, rng.f, bad_dfnum * 3, dfden) + assert_raises(ValueError, rng.f, dfnum * 3, bad_dfden) - self.setSeed() - actual = f(dfnum, dfden * 3) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum, dfden * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum, dfden * 3) - assert_raises(ValueError, f, dfnum, bad_dfden * 3) + assert_raises(ValueError, rng.f, bad_dfnum, dfden * 3) + assert_raises(ValueError, rng.f, dfnum, bad_dfden * 3) def test_noncentral_f(self): dfnum = [2] @@ -1199,256 +1192,242 @@ def test_noncentral_f(self): bad_dfnum = [0] bad_dfden = [-1] bad_nonc = [-2] - nonc_f = np.random.noncentral_f desired = np.array([9.1393943263705211, 13.025456344595602, 8.8018098359100545]) - self.setSeed() - actual = nonc_f(dfnum * 3, dfden, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum * 3, dfden, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, dfden, bad_nonc) - self.setSeed() - actual = nonc_f(dfnum, dfden * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden * 3, bad_nonc) - self.setSeed() - actual = nonc_f(dfnum, dfden, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden, bad_nonc * 3) def test_noncentral_f_small_df(self): - self.setSeed() + rng = random.RandomState(self.seed) desired = np.array([6.869638627492048, 0.785880199263955]) - actual = np.random.noncentral_f(0.9, 0.9, 2, size=2) + actual = rng.noncentral_f(0.9, 0.9, 2, size=2) assert_array_almost_equal(actual, desired, decimal=14) def test_chisquare(self): df = [1] bad_df = [-1] - chisquare = np.random.chisquare desired = np.array([0.57022801133088286, 0.51947702108840776, 0.1320969254923558]) - self.setSeed() - actual = chisquare(df * 3) + rng = random.RandomState(self.seed) + actual = rng.chisquare(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, chisquare, bad_df * 3) + assert_raises(ValueError, rng.chisquare, bad_df * 3) def test_noncentral_chisquare(self): df = [1] nonc = [2] bad_df = [-1] bad_nonc = [-2] - nonc_chi = np.random.noncentral_chisquare desired = np.array([9.0015599467913763, 4.5804135049718742, 6.0872302432834564]) - self.setSeed() - actual = nonc_chi(df * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) - assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df * 3, nonc) + assert_raises(ValueError, rng.noncentral_chisquare, df * 3, bad_nonc) - self.setSeed() - actual = nonc_chi(df, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) - assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df, nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, df, bad_nonc * 3) def test_standard_t(self): df = [1] bad_df = [-1] - t = np.random.standard_t desired = np.array([3.0702872575217643, 5.8560725167361607, 1.0274791436474273]) - self.setSeed() - actual = t(df * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, rng.standard_t, bad_df * 3) def test_vonmises(self): mu = [2] kappa = [1] bad_kappa = [-1] - vonmises = np.random.vonmises desired = np.array([2.9883443664201312, -2.7064099483995943, -1.8672476700665914]) - self.setSeed() - actual = vonmises(mu * 3, kappa) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu * 3, kappa) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + assert_raises(ValueError, rng.vonmises, mu * 3, bad_kappa) - self.setSeed() - actual = vonmises(mu, kappa * 3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu, kappa * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + assert_raises(ValueError, rng.vonmises, mu, bad_kappa * 3) def test_pareto(self): a = [1] bad_a = [-1] - pareto = np.random.pareto desired = np.array([1.1405622680198362, 1.1465519762044529, 1.0389564467453547]) - self.setSeed() - actual = pareto(a * 3) + rng = random.RandomState(self.seed) + actual = rng.pareto(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, rng.pareto, bad_a * 3) def test_weibull(self): a = [1] bad_a = [-1] - weibull = np.random.weibull desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = weibull(a * 3) + rng = random.RandomState(self.seed) + actual = rng.weibull(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, rng.weibull, bad_a * 3) def test_power(self): a = [1] bad_a = [-1] - power = np.random.power desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.setSeed() - actual = power(a * 3) + rng = random.RandomState(self.seed) + actual = rng.power(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, rng.power, bad_a * 3) def test_laplace(self): loc = [0] scale = [1] bad_scale = [-1] - laplace = np.random.laplace desired = np.array([0.067921356028507157, 0.070715642226971326, 0.019290950698972624]) - self.setSeed() - actual = laplace(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc * 3, bad_scale) + assert_raises(ValueError, rng.laplace, loc * 3, bad_scale) - self.setSeed() - actual = laplace(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc, bad_scale * 3) + assert_raises(ValueError, rng.laplace, loc, bad_scale * 3) def test_gumbel(self): loc = [0] scale = [1] bad_scale = [-1] - gumbel = np.random.gumbel desired = np.array([0.2730318639556768, 0.26936705726291116, 0.33906220393037939]) - self.setSeed() - actual = gumbel(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc * 3, bad_scale) + assert_raises(ValueError, rng.gumbel, loc * 3, bad_scale) - self.setSeed() - actual = gumbel(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc, bad_scale * 3) + assert_raises(ValueError, rng.gumbel, loc, bad_scale * 3) def test_logistic(self): loc = [0] scale = [1] bad_scale = [-1] - logistic = np.random.logistic desired = np.array([0.13152135837586171, 0.13675915696285773, 0.038216792802833396]) - self.setSeed() - actual = logistic(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc * 3, bad_scale) + assert_raises(ValueError, rng.logistic, loc * 3, bad_scale) - self.setSeed() - actual = logistic(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc, bad_scale * 3) + assert_raises(ValueError, rng.logistic, loc, bad_scale * 3) def test_lognormal(self): mean = [0] sigma = [1] bad_sigma = [-1] - lognormal = np.random.lognormal desired = np.array([9.1422086044848427, 8.4013952870126261, 6.3073234116578671]) - self.setSeed() - actual = lognormal(mean * 3, sigma) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean * 3, sigma) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, rng.lognormal, mean * 3, bad_sigma) - self.setSeed() - actual = lognormal(mean, sigma * 3) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean, sigma * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, rng.lognormal, mean, bad_sigma * 3) def test_rayleigh(self): scale = [1] bad_scale = [-1] - rayleigh = np.random.rayleigh desired = np.array([1.2337491937897689, 1.2360119924878694, 1.1936818095781789]) - self.setSeed() - actual = rayleigh(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, rayleigh, bad_scale * 3) + assert_raises(ValueError, rng.rayleigh, bad_scale * 3) def test_wald(self): mean = [0.5] scale = [1] bad_mean = [0] bad_scale = [-2] - wald = np.random.wald desired = np.array([0.11873681120271318, 0.12450084820795027, 0.9096122728408238]) - self.setSeed() - actual = wald(mean * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.wald(mean * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean * 3, scale) - assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, rng.wald, bad_mean * 3, scale) + assert_raises(ValueError, rng.wald, mean * 3, bad_scale) - self.setSeed() - actual = wald(mean, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.wald(mean, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean, scale * 3) - assert_raises(ValueError, wald, mean, bad_scale * 3) - assert_raises(ValueError, wald, 0.0, 1) - assert_raises(ValueError, wald, 0.5, 0.0) + assert_raises(ValueError, rng.wald, bad_mean, scale * 3) + assert_raises(ValueError, rng.wald, mean, bad_scale * 3) + assert_raises(ValueError, rng.wald, 0.0, 1) + assert_raises(ValueError, rng.wald, 0.5, 0.0) def test_triangular(self): left = [1] @@ -1457,33 +1436,32 @@ def test_triangular(self): bad_left_one = [3] bad_mode_one = [4] bad_left_two, bad_mode_two = right * 2 - triangular = np.random.triangular desired = np.array([2.03339048710429, 2.0347400359389356, 2.0095991069536208]) - self.setSeed() - actual = triangular(left * 3, mode, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left * 3, mode, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) - assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) - assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, rng.triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, rng.triangular, bad_left_two * 3, bad_mode_two, right) - self.setSeed() - actual = triangular(left, mode * 3, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode * 3, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) - assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + assert_raises(ValueError, rng.triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, rng.triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two * 3, right) - self.setSeed() - actual = triangular(left, mode, right * 3) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode, right * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) - assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, rng.triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two, right * 3) def test_binomial(self): @@ -1492,22 +1470,21 @@ def test_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - binom = np.random.binomial desired = np.array([1, 1, 1]) - self.setSeed() - actual = binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n * 3, p) - assert_raises(ValueError, binom, n * 3, bad_p_one) - assert_raises(ValueError, binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.binomial, bad_n * 3, p) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_two) - self.setSeed() - actual = binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n, p * 3) - assert_raises(ValueError, binom, n, bad_p_one * 3) - assert_raises(ValueError, binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.binomial, bad_n, p * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_two * 3) def test_negative_binomial(self): n = [1] @@ -1515,22 +1492,21 @@ def test_negative_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - neg_binom = np.random.negative_binomial desired = np.array([1, 0, 1]) - self.setSeed() - actual = neg_binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n * 3, p) - assert_raises(ValueError, neg_binom, n * 3, bad_p_one) - assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.negative_binomial, bad_n * 3, p) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_two) - self.setSeed() - actual = neg_binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n, p * 3) - assert_raises(ValueError, neg_binom, n, bad_p_one * 3) - assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.negative_binomial, bad_n, p * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_two * 3) def test_poisson(self): max_lam = np.random.RandomState()._poisson_lam_max @@ -1538,41 +1514,38 @@ def test_poisson(self): lam = [1] bad_lam_one = [-1] bad_lam_two = [max_lam * 2] - poisson = np.random.poisson desired = np.array([1, 1, 0]) - self.setSeed() - actual = poisson(lam * 3) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, poisson, bad_lam_one * 3) - assert_raises(ValueError, poisson, bad_lam_two * 3) + assert_raises(ValueError, rng.poisson, bad_lam_one * 3) + assert_raises(ValueError, rng.poisson, bad_lam_two * 3) def test_zipf(self): a = [2] bad_a = [0] - zipf = np.random.zipf desired = np.array([2, 2, 1]) - self.setSeed() - actual = zipf(a * 3) + rng = random.RandomState(self.seed) + actual = rng.zipf(a * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, zipf, bad_a * 3) + assert_raises(ValueError, rng.zipf, bad_a * 3) with np.errstate(invalid='ignore'): - assert_raises(ValueError, zipf, np.nan) - assert_raises(ValueError, zipf, [0, 0, np.nan]) + assert_raises(ValueError, rng.zipf, np.nan) + assert_raises(ValueError, rng.zipf, [0, 0, np.nan]) def test_geometric(self): p = [0.5] bad_p_one = [-1] bad_p_two = [1.5] - geom = np.random.geometric desired = np.array([2, 2, 2]) - self.setSeed() - actual = geom(p * 3) + rng = random.RandomState(self.seed) + actual = rng.geometric(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, geom, bad_p_one * 3) - assert_raises(ValueError, geom, bad_p_two * 3) + assert_raises(ValueError, rng.geometric, bad_p_one * 3) + assert_raises(ValueError, rng.geometric, bad_p_two * 3) def test_hypergeometric(self): ngood = [1] @@ -1582,45 +1555,43 @@ def test_hypergeometric(self): bad_nbad = [-2] bad_nsample_one = [0] bad_nsample_two = [4] - hypergeom = np.random.hypergeometric desired = np.array([1, 1, 1]) - self.setSeed() - actual = hypergeom(ngood * 3, nbad, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood * 3, nbad, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_two) - self.setSeed() - actual = hypergeom(ngood, nbad * 3, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_two) - self.setSeed() - actual = hypergeom(ngood, nbad, nsample * 3) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad, nsample * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_two * 3) def test_logseries(self): p = [0.5] bad_p_one = [2] bad_p_two = [-1] - logseries = np.random.logseries desired = np.array([1, 1, 1]) - self.setSeed() - actual = logseries(p * 3) + rng = random.RandomState(self.seed) + actual = rng.logseries(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, logseries, bad_p_one * 3) - assert_raises(ValueError, logseries, bad_p_two * 3) + assert_raises(ValueError, rng.logseries, bad_p_one * 3) + assert_raises(ValueError, rng.logseries, bad_p_two * 3) @pytest.mark.skipif(IS_WASM, reason="can't start thread") diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index ff7daf0496bb..fd371218616c 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -173,10 +173,10 @@ def test_p_non_contiguous(self): p = np.arange(15.) p /= np.sum(p[1::3]) pvals = p[1::3] - random.seed(1432985819) - non_contig = random.multinomial(100, pvals=pvals) - random.seed(1432985819) - contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + rng = random.RandomState(1432985819) + non_contig = rng.multinomial(100, pvals=pvals) + rng = random.RandomState(1432985819) + contig = rng.multinomial(100, pvals=np.ascontiguousarray(pvals)) assert_array_equal(non_contig, contig) def test_multinomial_pvals_float32(self): @@ -288,47 +288,49 @@ def test_repr(self): class TestRandint: - rfunc = random.randint - # valid integer/boolean types itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] def test_unsupported_type(self): - assert_raises(TypeError, self.rfunc, 1, dtype=float) + rng = np.random.RandomState() + assert_raises(TypeError, rng.randint, 1, dtype=float) def test_bounds_checking(self): + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, rng.randint, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, rng.randint, 1, 0, dtype=dt) def test_rng_zero_and_extremes(self): + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 tgt = ubnd - 1 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = lbnd - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = (lbnd + ubnd) // 2 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) def test_full_range(self): # Test for ticket #1690 + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 try: - self.rfunc(lbnd, ubnd, dtype=dt) + rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " @@ -336,15 +338,15 @@ def test_full_range(self): def test_in_bounds_fuzz(self): # Don't use fixed seed - random.seed() + rng = np.random.RandomState() for dt in self.itype[1:]: for ubnd in [4, 8, 16]: - vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + vals = rng.randint(2, ubnd, size=2**16, dtype=dt) assert_(vals.max() < ubnd) assert_(vals.min() >= 2) - vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + vals = rng.randint(0, 2, size=2**16, dtype=np.bool) assert_(vals.max() < 2) assert_(vals.min() >= 0) @@ -364,20 +366,20 @@ def test_repeatability(self): 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 for dt in self.itype[1:]: - random.seed(1234) + rng = random.RandomState(1234) # view as little endian for hash if sys.byteorder == 'little': - val = self.rfunc(0, 6, size=1000, dtype=dt) + val = rng.randint(0, 6, size=1000, dtype=dt) else: - val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + val = rng.randint(0, 6, size=1000, dtype=dt).byteswap() res = hashlib.sha256(val.view(np.int8)).hexdigest() assert_(tgt[np.dtype(dt).name] == res) # bools do not depend on endianness - random.seed(1234) - val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + rng = random.RandomState(1234) + val = rng.randint(0, 2, size=1000, dtype=bool).view(np.int8) res = hashlib.sha256(val).hexdigest() assert_(tgt[np.dtype(bool).name] == res) @@ -400,8 +402,8 @@ def test_repeatability_32bit_boundary_broadcasting(self): [2978368172, 764731833, 2282559898], [ 105711276, 720447391, 3596512484]]]) for size in [None, (5, 3, 3)]: - random.seed(12345) - x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], + rng = random.RandomState(12345) + x = rng.randint([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], size=size) assert_array_equal(x, desired if size is not None else desired[0]) @@ -430,11 +432,13 @@ def test_int64_uint64_corner_case(self): def test_respect_dtype_singleton(self): # See gh-7203 + rng = np.random.RandomState() + for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_equal(sample.dtype, np.dtype(dt)) for dt in (bool, int): @@ -445,7 +449,7 @@ def test_respect_dtype_singleton(self): lbnd = 0 if dt is bool else np.iinfo(op_dtype).min ubnd = 2 if dt is bool else np.iinfo(op_dtype).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_(not hasattr(sample, 'dtype')) assert_equal(type(sample), dt) @@ -456,55 +460,54 @@ class TestRandomDist: seed = 1234567890 def test_rand(self): - random.seed(self.seed) - actual = random.rand(3, 2) + rng = random.RandomState(self.seed) + actual = rng.rand(3, 2) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rand_singleton(self): - random.seed(self.seed) - actual = random.rand() + rng = random.RandomState(self.seed) + actual = rng.rand() desired = 0.61879477158567997 assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): - random.seed(self.seed) - actual = random.randn(3, 2) + rng = random.RandomState(self.seed) + actual = rng.randn(3, 2) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) - random.seed(self.seed) - actual = random.randn() + rng = random.RandomState(self.seed) + actual = rng.randn() assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_randint(self): - random.seed(self.seed) - actual = random.randint(-99, 99, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.randint(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) def test_random_integers(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) with pytest.warns(DeprecationWarning): - actual = random.random_integers(-99, 99, size=(3, 2)) + actual = rng.random_integers(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) - random.seed(self.seed) + rng = random.RandomState(self.seed) with pytest.warns(DeprecationWarning): - actual = random.random_integers(198, size=(3, 2)) + actual = rng.random_integers(198, size=(3, 2)) assert_array_equal(actual, desired + 100) def test_tomaxint(self): - random.seed(self.seed) rs = random.RandomState(self.seed) actual = rs.tomaxint(size=(3, 2)) if np.iinfo(np.long).max == 2147483647: @@ -556,44 +559,44 @@ def test_random_integers_deprecated(self): np.iinfo('l').max, np.iinfo('l').max) def test_random_sample(self): - random.seed(self.seed) - actual = random.random_sample((3, 2)) + rng = random.RandomState(self.seed) + actual = rng.random_sample((3, 2)) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) - random.seed(self.seed) - actual = random.random_sample() + rng = random.RandomState(self.seed) + actual = rng.random_sample() assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_choice_uniform_replace(self): - random.seed(self.seed) - actual = random.choice(4, 4) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4) desired = np.array([2, 3, 2, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_replace(self): - random.seed(self.seed) - actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) desired = np.array([1, 1, 2, 2]) assert_array_equal(actual, desired) def test_choice_uniform_noreplace(self): - random.seed(self.seed) - actual = random.choice(4, 3, replace=False) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False) desired = np.array([0, 1, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_noreplace(self): - random.seed(self.seed) - actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) desired = np.array([2, 3, 1]) assert_array_equal(actual, desired) def test_choice_noninteger(self): - random.seed(self.seed) - actual = random.choice(['a', 'b', 'c', 'd'], 4) + rng = random.RandomState(self.seed) + actual = rng.choice(['a', 'b', 'c', 'd'], 4) desired = np.array(['c', 'd', 'c', 'd']) assert_array_equal(actual, desired) @@ -670,15 +673,15 @@ def test_choice_nan_probabilities(self): def test_choice_p_non_contiguous(self): p = np.ones(10) / 5 p[1::2] = 3.0 - random.seed(self.seed) - non_contig = random.choice(5, 3, p=p[::2]) - random.seed(self.seed) - contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + rng = random.RandomState(self.seed) + non_contig = rng.choice(5, 3, p=p[::2]) + rng = random.RandomState(self.seed) + contig = rng.choice(5, 3, p=np.ascontiguousarray(p[::2])) assert_array_equal(non_contig, contig) def test_bytes(self): - random.seed(self.seed) - actual = random.bytes(10) + rng = random.RandomState(self.seed) + actual = rng.bytes(10) desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' assert_equal(actual, desired) @@ -702,9 +705,9 @@ def test_shuffle(self): lambda x: np.asarray([(i, i) for i in x], [("a", object, (1,)), ("b", np.int32, (1,))])]: - random.seed(self.seed) + rng = random.RandomState(self.seed) alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - random.shuffle(alist) + rng.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) assert_array_equal(actual, desired) @@ -728,35 +731,35 @@ def test_shuffle_invalid_objects(self): assert_raises(TypeError, random.shuffle, x) def test_permutation(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] - actual = random.permutation(alist) + actual = rng.permutation(alist) desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3] assert_array_equal(actual, desired) - random.seed(self.seed) + rng = random.RandomState(self.seed) arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T - actual = random.permutation(arr_2d) + actual = rng.permutation(arr_2d) assert_array_equal(actual, np.atleast_2d(desired).T) - random.seed(self.seed) + rng = random.RandomState(self.seed) bad_x_str = "abcd" assert_raises(IndexError, random.permutation, bad_x_str) - random.seed(self.seed) + rng = random.RandomState(self.seed) bad_x_float = 1.2 assert_raises(IndexError, random.permutation, bad_x_float) integer_val = 10 desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2] - random.seed(self.seed) - actual = random.permutation(integer_val) + rng = random.RandomState(self.seed) + actual = rng.permutation(integer_val) assert_array_equal(actual, desired) def test_beta(self): - random.seed(self.seed) - actual = random.beta(.1, .9, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.beta(.1, .9, size=(3, 2)) desired = np.array( [[1.45341850513746058e-02, 5.31297615662868145e-04], [1.85366619058432324e-06, 4.19214516800110563e-03], @@ -764,30 +767,30 @@ def test_beta(self): assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): - random.seed(self.seed) - actual = random.binomial(100.123, .456, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.binomial(100.123, .456, size=(3, 2)) desired = np.array([[37, 43], [42, 48], [46, 45]]) assert_array_equal(actual, desired) - random.seed(self.seed) - actual = random.binomial(100.123, .456) + rng = random.RandomState(self.seed) + actual = rng.binomial(100.123, .456) desired = 37 assert_array_equal(actual, desired) def test_chisquare(self): - random.seed(self.seed) - actual = random.chisquare(50, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.chisquare(50, size=(3, 2)) desired = np.array([[63.87858175501090585, 68.68407748911370447], [65.77116116901505904, 47.09686762438974483], [72.3828403199695174, 74.18408615260374006]]) assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha, size=(3, 2)) + actual = rng.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.54539444573611562, 0.45460555426388438], [0.62345816822039413, 0.37654183177960598]], [[0.55206000085785778, 0.44793999914214233], @@ -798,9 +801,9 @@ def test_dirichlet(self): bad_alpha = np.array([5.4e-01, -1.0e-16]) assert_raises(ValueError, random.dirichlet, bad_alpha) - random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha) + actual = rng.dirichlet(alpha) assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_dirichlet_size(self): @@ -823,16 +826,16 @@ def test_dirichlet_bad_alpha(self): def test_dirichlet_alpha_non_contiguous(self): a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) alpha = a[::2] - random.seed(self.seed) - non_contig = random.dirichlet(alpha, size=(3, 2)) - random.seed(self.seed) - contig = random.dirichlet(np.ascontiguousarray(alpha), + rng = random.RandomState(self.seed) + non_contig = rng.dirichlet(alpha, size=(3, 2)) + rng = random.RandomState(self.seed) + contig = rng.dirichlet(np.ascontiguousarray(alpha), size=(3, 2)) assert_array_almost_equal(non_contig, contig) def test_exponential(self): - random.seed(self.seed) - actual = random.exponential(1.1234, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.exponential(1.1234, size=(3, 2)) desired = np.array([[1.08342649775011624, 1.00607889924557314], [2.46628830085216721, 2.49668106809923884], [0.68717433461363442, 1.69175666993575979]]) @@ -843,16 +846,16 @@ def test_exponential_0(self): assert_raises(ValueError, random.exponential, scale=-0.) def test_f(self): - random.seed(self.seed) - actual = random.f(12, 77, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.f(12, 77, size=(3, 2)) desired = np.array([[1.21975394418575878, 1.75135759791559775], [1.44803115017146489, 1.22108959480396262], [1.02176975757740629, 1.34431827623300415]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): - random.seed(self.seed) - actual = random.gamma(5, 3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gamma(5, 3, size=(3, 2)) desired = np.array([[24.60509188649287182, 28.54993563207210627], [26.13476110204064184, 12.56988482927716078], [31.71863275789960568, 33.30143302795922011]]) @@ -863,8 +866,8 @@ def test_gamma_0(self): assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) def test_geometric(self): - random.seed(self.seed) - actual = random.geometric(.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.geometric(.123456789, size=(3, 2)) desired = np.array([[8, 7], [17, 17], [5, 12]]) @@ -881,8 +884,8 @@ def test_geometric_exceptions(self): assert_raises(ValueError, random.geometric, [np.nan] * 10) def test_gumbel(self): - random.seed(self.seed) - actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.19591898743416816, 0.34405539668096674], [-1.4492522252274278, -1.47374816298446865], [1.10651090478803416, -0.69535848626236174]]) @@ -893,34 +896,34 @@ def test_gumbel_0(self): assert_raises(ValueError, random.gumbel, scale=-0.) def test_hypergeometric(self): - random.seed(self.seed) - actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(10.1, 5.5, 14, size=(3, 2)) desired = np.array([[10, 10], [10, 10], [9, 9]]) assert_array_equal(actual, desired) # Test nbad = 0 - actual = random.hypergeometric(5, 0, 3, size=4) + actual = rng.hypergeometric(5, 0, 3, size=4) desired = np.array([3, 3, 3, 3]) assert_array_equal(actual, desired) - actual = random.hypergeometric(15, 0, 12, size=4) + actual = rng.hypergeometric(15, 0, 12, size=4) desired = np.array([12, 12, 12, 12]) assert_array_equal(actual, desired) # Test ngood = 0 - actual = random.hypergeometric(0, 5, 3, size=4) + actual = rng.hypergeometric(0, 5, 3, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) - actual = random.hypergeometric(0, 15, 12, size=4) + actual = rng.hypergeometric(0, 15, 12, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) def test_laplace(self): - random.seed(self.seed) - actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.66599721112760157, 0.52829452552221945], [3.12791959514407125, 3.18202813572992005], [-0.05391065675859356, 1.74901336242837324]]) @@ -931,16 +934,16 @@ def test_laplace_0(self): assert_raises(ValueError, random.laplace, scale=-0.) def test_logistic(self): - random.seed(self.seed) - actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[1.09232835305011444, 0.8648196662399954], [4.27818590694950185, 4.33897006346929714], [-0.21682183359214885, 2.63373365386060332]]) assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): - random.seed(self.seed) - actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) desired = np.array([[16.50698631688883822, 36.54846706092654784], [22.67886599981281748, 0.71617561058995771], [65.72798501792723869, 86.84341601437161273]]) @@ -951,8 +954,8 @@ def test_lognormal_0(self): assert_raises(ValueError, random.lognormal, sigma=-0.) def test_logseries(self): - random.seed(self.seed) - actual = random.logseries(p=.923456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logseries(p=.923456789, size=(3, 2)) desired = np.array([[2, 2], [6, 17], [3, 6]]) @@ -974,8 +977,8 @@ def test_logseries_exceptions(self, value): random.logseries(np.array([value] * 10)[::2]) def test_multinomial(self): - random.seed(self.seed) - actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.multinomial(20, [1 / 6.] * 6, size=(3, 2)) desired = np.array([[[4, 3, 5, 4, 2, 2], [5, 2, 8, 2, 2, 1]], [[3, 4, 3, 6, 0, 4], @@ -985,11 +988,11 @@ def test_multinomial(self): assert_array_equal(actual, desired) def test_multivariate_normal(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) mean = (.123456789, 10) cov = [[1, 0], [0, 1]] size = (3, 2) - actual = random.multivariate_normal(mean, cov, size) + actual = rng.multivariate_normal(mean, cov, size) desired = np.array([[[1.463620246718631, 11.73759122771936], [1.622445133300628, 9.771356667546383]], [[2.154490787682787, 12.170324946056553], @@ -1000,7 +1003,7 @@ def test_multivariate_normal(self): assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning - actual = random.multivariate_normal(mean, cov) + actual = rng.multivariate_normal(mean, cov) desired = np.array([0.895289569463708, 9.17180864067987]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1008,35 +1011,35 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, rng.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' - assert_no_warnings(random.multivariate_normal, mean, cov, + assert_no_warnings(rng.multivariate_normal, mean, cov, check_valid='ignore') # and that it raises with RuntimeWarning check_valid='raises' - assert_raises(ValueError, random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) with warnings.catch_warnings(): warnings.simplefilter('error', RuntimeWarning) - random.multivariate_normal(mean, cov) + rng.multivariate_normal(mean, cov) mu = np.zeros(2) cov = np.eye(2) - assert_raises(ValueError, random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='other') - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, np.zeros((2, 1, 1)), cov) - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, mu, np.empty((3, 2))) - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, mu, np.eye(3)) def test_negative_binomial(self): - random.seed(self.seed) - actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n=100, p=.12345, size=(3, 2)) desired = np.array([[848, 841], [892, 611], [779, 647]]) @@ -1050,29 +1053,29 @@ def test_negative_binomial_exceptions(self): [np.nan] * 10) def test_noncentral_chisquare(self): - random.seed(self.seed) - actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[23.91905354498517511, 13.35324692733826346], [31.22452661329736401, 16.60047399466177254], [5.03461598262724586, 17.94973089023519464]]) assert_array_almost_equal(actual, desired, decimal=14) - actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + actual = rng.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) desired = np.array([[1.47145377828516666, 0.15052899268012659], [0.00943803056963588, 1.02647251615666169], [0.332334982684171, 0.15451287602753125]]) assert_array_almost_equal(actual, desired, decimal=14) - random.seed(self.seed) - actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) desired = np.array([[9.597154162763948, 11.725484450296079], [10.413711048138335, 3.694475922923986], [13.484222138963087, 14.377255424602957]]) assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): - random.seed(self.seed) - actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) desired = np.array([[1.40598099674926669, 0.34207973179285761], [3.57715069265772545, 7.92632662577829805], @@ -1085,8 +1088,8 @@ def test_noncentral_f_nan(self): assert np.isnan(actual) def test_normal(self): - random.seed(self.seed) - actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.normal(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[2.80378370443726244, 3.59863924443872163], [3.121433477601256, -0.33382987590723379], [4.18552478636557357, 4.46410668111310471]]) @@ -1097,8 +1100,8 @@ def test_normal_0(self): assert_raises(ValueError, random.normal, scale=-0.) def test_pareto(self): - random.seed(self.seed) - actual = random.pareto(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.pareto(a=.123456789, size=(3, 2)) desired = np.array( [[2.46852460439034849e+03, 1.41286880810518346e+03], [5.28287797029485181e+07, 6.57720981047328785e+07], @@ -1112,8 +1115,8 @@ def test_pareto(self): np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) def test_poisson(self): - random.seed(self.seed) - actual = random.poisson(lam=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], [1, 0], [0, 0]]) @@ -1132,16 +1135,16 @@ def test_poisson_exceptions(self): assert_raises(ValueError, random.poisson, [np.nan] * 10) def test_power(self): - random.seed(self.seed) - actual = random.power(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.power(a=.123456789, size=(3, 2)) desired = np.array([[0.02048932883240791, 0.01424192241128213], [0.38446073748535298, 0.39499689943484395], [0.00177699707563439, 0.13115505880863756]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): - random.seed(self.seed) - actual = random.rayleigh(scale=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale=10, size=(3, 2)) desired = np.array([[13.8882496494248393, 13.383318339044731], [20.95413364294492098, 21.08285015800712614], [11.06066537006854311, 17.35468505778271009]]) @@ -1152,24 +1155,24 @@ def test_rayleigh_0(self): assert_raises(ValueError, random.rayleigh, scale=-0.) def test_standard_cauchy(self): - random.seed(self.seed) - actual = random.standard_cauchy(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_cauchy(size=(3, 2)) desired = np.array([[0.77127660196445336, -6.55601161955910605], [0.93582023391158309, -2.07479293013759447], [-4.74601644297011926, 0.18338989290760804]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): - random.seed(self.seed) - actual = random.standard_exponential(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_exponential(size=(3, 2)) desired = np.array([[0.96441739162374596, 0.89556604882105506], [2.1953785836319808, 2.22243285392490542], [0.6116915921431676, 1.50592546727413201]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): - random.seed(self.seed) - actual = random.standard_gamma(shape=3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[5.50841531318455058, 6.62953470301903103], [5.93988484943779227, 2.31044849402133989], [7.54838614231317084, 8.012756093271868]]) @@ -1180,30 +1183,30 @@ def test_standard_gamma_0(self): assert_raises(ValueError, random.standard_gamma, shape=-0.) def test_standard_normal(self): - random.seed(self.seed) - actual = random.standard_normal(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_normal(size=(3, 2)) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randn_singleton(self): - random.seed(self.seed) - actual = random.randn() + rng = random.RandomState(self.seed) + actual = rng.randn() desired = np.array(1.34016345771863121) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): - random.seed(self.seed) - actual = random.standard_t(df=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df=10, size=(3, 2)) desired = np.array([[0.97140611862659965, -0.08830486548450577], [1.36311143689505321, -0.55317463909867071], [-0.18473749069684214, 0.61181537341755321]]) assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): - random.seed(self.seed) - actual = random.triangular(left=5.12, mode=10.23, right=20.34, + rng = random.RandomState(self.seed) + actual = rng.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) desired = np.array([[12.68117178949215784, 12.4129206149193152], [16.20131377335158263, 16.25692138747600524], @@ -1211,8 +1214,8 @@ def test_triangular(self): assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): - random.seed(self.seed) - actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.uniform(low=1.23, high=10.54, size=(3, 2)) desired = np.array([[6.99097932346268003, 6.73801597444323974], [9.50364421400426274, 9.53130618907631089], [5.48995325769805476, 8.47493103280052118]]) @@ -1257,8 +1260,8 @@ def __int__(self): assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) def test_vonmises(self): - random.seed(self.seed) - actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) desired = np.array([[2.28567572673902042, 2.89163838442285037], [0.38198375564286025, 2.57638023113890746], [1.19153771588353052, 1.83509849681825354]]) @@ -1272,8 +1275,8 @@ def test_vonmises_small(self): def test_vonmises_large(self): # guard against changes in RandomState when Generator is fixed - random.seed(self.seed) - actual = random.vonmises(mu=0., kappa=1e7, size=3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=0., kappa=1e7, size=3) desired = np.array([4.634253748521111e-04, 3.558873596114509e-04, -2.337119622577433e-04]) @@ -1285,16 +1288,16 @@ def test_vonmises_nan(self): assert_(np.isnan(r)) def test_wald(self): - random.seed(self.seed) - actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.wald(mean=1.23, scale=1.54, size=(3, 2)) desired = np.array([[3.82935265715889983, 5.13125249184285526], [0.35045403618358717, 1.50832396872003538], [0.24124319895843183, 0.22031101461955038]]) assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): - random.seed(self.seed) - actual = random.weibull(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.weibull(a=1.23, size=(3, 2)) desired = np.array([[0.97097342648766727, 0.91422896443565516], [1.89517770034962929, 1.91414357960479564], [0.67057783752390987, 1.39494046635066793]]) @@ -1306,8 +1309,8 @@ def test_weibull_0(self): assert_raises(ValueError, random.weibull, a=-0.) def test_zipf(self): - random.seed(self.seed) - actual = random.zipf(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.zipf(a=1.23, size=(3, 2)) desired = np.array([[66, 29], [1, 1], [3, 13]]) @@ -1319,135 +1322,125 @@ class TestBroadcast: # correctly when presented with non-scalar arguments seed = 123456789 - def set_seed(self): - random.seed(self.seed) - def test_uniform(self): low = [0] high = [1] - uniform = random.uniform desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.set_seed() - actual = uniform(low * 3, high) + rng = random.RandomState(self.seed) + actual = rng.uniform(low * 3, high) assert_array_almost_equal(actual, desired, decimal=14) - self.set_seed() - actual = uniform(low, high * 3) + rng = random.RandomState(self.seed) + actual = rng.uniform(low, high * 3) assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): loc = [0] scale = [1] bad_scale = [-1] - normal = random.normal desired = np.array([2.2129019979039612, 2.1283977976520019, 1.8417114045748335]) - self.set_seed() - actual = normal(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.normal(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc * 3, bad_scale) + assert_raises(ValueError, rng.normal, loc * 3, bad_scale) - self.set_seed() - actual = normal(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.normal(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc, bad_scale * 3) + assert_raises(ValueError, rng.normal, loc, bad_scale * 3) def test_beta(self): a = [1] b = [2] bad_a = [-1] bad_b = [-2] - beta = random.beta desired = np.array([0.19843558305989056, 0.075230336409423643, 0.24976865978980844]) - self.set_seed() - actual = beta(a * 3, b) + rng = random.RandomState(self.seed) + actual = rng.beta(a * 3, b) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a * 3, b) - assert_raises(ValueError, beta, a * 3, bad_b) + assert_raises(ValueError, rng.beta, bad_a * 3, b) + assert_raises(ValueError, rng.beta, a * 3, bad_b) - self.set_seed() - actual = beta(a, b * 3) + rng = random.RandomState(self.seed) + actual = rng.beta(a, b * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a, b * 3) - assert_raises(ValueError, beta, a, bad_b * 3) + assert_raises(ValueError, rng.beta, bad_a, b * 3) + assert_raises(ValueError, rng.beta, a, bad_b * 3) def test_exponential(self): scale = [1] bad_scale = [-1] - exponential = random.exponential desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = exponential(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.exponential(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, exponential, bad_scale * 3) + assert_raises(ValueError, rng.exponential, bad_scale * 3) def test_standard_gamma(self): shape = [1] bad_shape = [-1] - std_gamma = random.standard_gamma desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = std_gamma(shape * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, std_gamma, bad_shape * 3) + assert_raises(ValueError, rng.standard_gamma, bad_shape * 3) def test_gamma(self): shape = [1] scale = [2] bad_shape = [-1] bad_scale = [-2] - gamma = random.gamma desired = np.array([1.5221370731769048, 1.5277256455738331, 1.4248762625178359]) - self.set_seed() - actual = gamma(shape * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape * 3, scale) - assert_raises(ValueError, gamma, shape * 3, bad_scale) + assert_raises(ValueError, rng.gamma, bad_shape * 3, scale) + assert_raises(ValueError, rng.gamma, shape * 3, bad_scale) - self.set_seed() - actual = gamma(shape, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape, scale * 3) - assert_raises(ValueError, gamma, shape, bad_scale * 3) + assert_raises(ValueError, rng.gamma, bad_shape, scale * 3) + assert_raises(ValueError, rng.gamma, shape, bad_scale * 3) def test_f(self): dfnum = [1] dfden = [2] bad_dfnum = [-1] bad_dfden = [-2] - f = random.f desired = np.array([0.80038951638264799, 0.86768719635363512, 2.7251095168386801]) - self.set_seed() - actual = f(dfnum * 3, dfden) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum * 3, dfden) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum * 3, dfden) - assert_raises(ValueError, f, dfnum * 3, bad_dfden) + assert_raises(ValueError, rng.f, bad_dfnum * 3, dfden) + assert_raises(ValueError, rng.f, dfnum * 3, bad_dfden) - self.set_seed() - actual = f(dfnum, dfden * 3) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum, dfden * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum, dfden * 3) - assert_raises(ValueError, f, dfnum, bad_dfden * 3) + assert_raises(ValueError, rng.f, bad_dfnum, dfden * 3) + assert_raises(ValueError, rng.f, dfnum, bad_dfden * 3) def test_noncentral_f(self): dfnum = [2] @@ -1456,267 +1449,253 @@ def test_noncentral_f(self): bad_dfnum = [0] bad_dfden = [-1] bad_nonc = [-2] - nonc_f = random.noncentral_f desired = np.array([9.1393943263705211, 13.025456344595602, 8.8018098359100545]) - self.set_seed() - actual = nonc_f(dfnum * 3, dfden, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum * 3, dfden, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + assert np.all(np.isnan(rng.noncentral_f(dfnum, dfden, [np.nan] * 3))) - assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, dfden, bad_nonc) - self.set_seed() - actual = nonc_f(dfnum, dfden * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden * 3, bad_nonc) - self.set_seed() - actual = nonc_f(dfnum, dfden, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden, bad_nonc * 3) def test_noncentral_f_small_df(self): - self.set_seed() + rng = random.RandomState(self.seed) desired = np.array([6.869638627492048, 0.785880199263955]) - actual = random.noncentral_f(0.9, 0.9, 2, size=2) + actual = rng.noncentral_f(0.9, 0.9, 2, size=2) assert_array_almost_equal(actual, desired, decimal=14) def test_chisquare(self): df = [1] bad_df = [-1] - chisquare = random.chisquare desired = np.array([0.57022801133088286, 0.51947702108840776, 0.1320969254923558]) - self.set_seed() - actual = chisquare(df * 3) + rng = random.RandomState(self.seed) + actual = rng.chisquare(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, chisquare, bad_df * 3) + assert_raises(ValueError, rng.chisquare, bad_df * 3) def test_noncentral_chisquare(self): df = [1] nonc = [2] bad_df = [-1] bad_nonc = [-2] - nonc_chi = random.noncentral_chisquare desired = np.array([9.0015599467913763, 4.5804135049718742, 6.0872302432834564]) - self.set_seed() - actual = nonc_chi(df * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) - assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df * 3, nonc) + assert_raises(ValueError, rng.noncentral_chisquare, df * 3, bad_nonc) - self.set_seed() - actual = nonc_chi(df, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) - assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df, nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, df, bad_nonc * 3) def test_standard_t(self): df = [1] bad_df = [-1] - t = random.standard_t desired = np.array([3.0702872575217643, 5.8560725167361607, 1.0274791436474273]) - self.set_seed() - actual = t(df * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, rng.standard_t, bad_df * 3) assert_raises(ValueError, random.standard_t, bad_df * 3) def test_vonmises(self): mu = [2] kappa = [1] bad_kappa = [-1] - vonmises = random.vonmises desired = np.array([2.9883443664201312, -2.7064099483995943, -1.8672476700665914]) - self.set_seed() - actual = vonmises(mu * 3, kappa) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu * 3, kappa) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + assert_raises(ValueError, rng.vonmises, mu * 3, bad_kappa) - self.set_seed() - actual = vonmises(mu, kappa * 3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu, kappa * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + assert_raises(ValueError, rng.vonmises, mu, bad_kappa * 3) def test_pareto(self): a = [1] bad_a = [-1] - pareto = random.pareto desired = np.array([1.1405622680198362, 1.1465519762044529, 1.0389564467453547]) - self.set_seed() - actual = pareto(a * 3) + rng = random.RandomState(self.seed) + actual = rng.pareto(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, rng.pareto, bad_a * 3) assert_raises(ValueError, random.pareto, bad_a * 3) def test_weibull(self): a = [1] bad_a = [-1] - weibull = random.weibull desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = weibull(a * 3) + rng = random.RandomState(self.seed) + actual = rng.weibull(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, rng.weibull, bad_a * 3) assert_raises(ValueError, random.weibull, bad_a * 3) def test_power(self): a = [1] bad_a = [-1] - power = random.power desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.set_seed() - actual = power(a * 3) + rng = random.RandomState(self.seed) + actual = rng.power(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, rng.power, bad_a * 3) assert_raises(ValueError, random.power, bad_a * 3) def test_laplace(self): loc = [0] scale = [1] bad_scale = [-1] - laplace = random.laplace desired = np.array([0.067921356028507157, 0.070715642226971326, 0.019290950698972624]) - self.set_seed() - actual = laplace(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc * 3, bad_scale) + assert_raises(ValueError, rng.laplace, loc * 3, bad_scale) - self.set_seed() - actual = laplace(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc, bad_scale * 3) + assert_raises(ValueError, rng.laplace, loc, bad_scale * 3) def test_gumbel(self): loc = [0] scale = [1] bad_scale = [-1] - gumbel = random.gumbel desired = np.array([0.2730318639556768, 0.26936705726291116, 0.33906220393037939]) - self.set_seed() - actual = gumbel(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc * 3, bad_scale) + assert_raises(ValueError, rng.gumbel, loc * 3, bad_scale) - self.set_seed() - actual = gumbel(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc, bad_scale * 3) + assert_raises(ValueError, rng.gumbel, loc, bad_scale * 3) def test_logistic(self): loc = [0] scale = [1] bad_scale = [-1] - logistic = random.logistic desired = np.array([0.13152135837586171, 0.13675915696285773, 0.038216792802833396]) - self.set_seed() - actual = logistic(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc * 3, bad_scale) + assert_raises(ValueError, rng.logistic, loc * 3, bad_scale) - self.set_seed() - actual = logistic(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc, bad_scale * 3) - assert_equal(random.logistic(1.0, 0.0), 1.0) + assert_raises(ValueError, rng.logistic, loc, bad_scale * 3) + assert_equal(rng.logistic(1.0, 0.0), 1.0) def test_lognormal(self): mean = [0] sigma = [1] bad_sigma = [-1] - lognormal = random.lognormal desired = np.array([9.1422086044848427, 8.4013952870126261, 6.3073234116578671]) - self.set_seed() - actual = lognormal(mean * 3, sigma) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean * 3, sigma) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, rng.lognormal, mean * 3, bad_sigma) assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma) - self.set_seed() - actual = lognormal(mean, sigma * 3) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean, sigma * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, rng.lognormal, mean, bad_sigma * 3) assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) def test_rayleigh(self): scale = [1] bad_scale = [-1] - rayleigh = random.rayleigh desired = np.array([1.2337491937897689, 1.2360119924878694, 1.1936818095781789]) - self.set_seed() - actual = rayleigh(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, rayleigh, bad_scale * 3) + assert_raises(ValueError, rng.rayleigh, bad_scale * 3) def test_wald(self): mean = [0.5] scale = [1] bad_mean = [0] bad_scale = [-2] - wald = random.wald desired = np.array([0.11873681120271318, 0.12450084820795027, 0.9096122728408238]) - self.set_seed() - actual = wald(mean * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.wald(mean * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean * 3, scale) - assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, rng.wald, bad_mean * 3, scale) + assert_raises(ValueError, rng.wald, mean * 3, bad_scale) assert_raises(ValueError, random.wald, bad_mean * 3, scale) assert_raises(ValueError, random.wald, mean * 3, bad_scale) - self.set_seed() - actual = wald(mean, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.wald(mean, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean, scale * 3) - assert_raises(ValueError, wald, mean, bad_scale * 3) - assert_raises(ValueError, wald, 0.0, 1) - assert_raises(ValueError, wald, 0.5, 0.0) + assert_raises(ValueError, rng.wald, bad_mean, scale * 3) + assert_raises(ValueError, rng.wald, mean, bad_scale * 3) + assert_raises(ValueError, rng.wald, 0.0, 1) + assert_raises(ValueError, rng.wald, 0.5, 0.0) def test_triangular(self): left = [1] @@ -1725,38 +1704,37 @@ def test_triangular(self): bad_left_one = [3] bad_mode_one = [4] bad_left_two, bad_mode_two = right * 2 - triangular = random.triangular desired = np.array([2.03339048710429, 2.0347400359389356, 2.0095991069536208]) - self.set_seed() - actual = triangular(left * 3, mode, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left * 3, mode, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) - assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) - assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, rng.triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, rng.triangular, bad_left_two * 3, bad_mode_two, right) - self.set_seed() - actual = triangular(left, mode * 3, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode * 3, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) - assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + assert_raises(ValueError, rng.triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, rng.triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two * 3, right) - self.set_seed() - actual = triangular(left, mode, right * 3) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode, right * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) - assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, rng.triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two, right * 3) - assert_raises(ValueError, triangular, 10., 0., 20.) - assert_raises(ValueError, triangular, 10., 25., 20.) - assert_raises(ValueError, triangular, 10., 10., 10.) + assert_raises(ValueError, rng.triangular, 10., 0., 20.) + assert_raises(ValueError, rng.triangular, 10., 25., 20.) + assert_raises(ValueError, rng.triangular, 10., 10., 10.) def test_binomial(self): n = [1] @@ -1764,22 +1742,21 @@ def test_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - binom = random.binomial desired = np.array([1, 1, 1]) - self.set_seed() - actual = binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n * 3, p) - assert_raises(ValueError, binom, n * 3, bad_p_one) - assert_raises(ValueError, binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.binomial, bad_n * 3, p) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_two) - self.set_seed() - actual = binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n, p * 3) - assert_raises(ValueError, binom, n, bad_p_one * 3) - assert_raises(ValueError, binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.binomial, bad_n, p * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_two * 3) def test_negative_binomial(self): n = [1] @@ -1787,22 +1764,21 @@ def test_negative_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - neg_binom = random.negative_binomial desired = np.array([1, 0, 1]) - self.set_seed() - actual = neg_binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n * 3, p) - assert_raises(ValueError, neg_binom, n * 3, bad_p_one) - assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.negative_binomial, bad_n * 3, p) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_two) - self.set_seed() - actual = neg_binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n, p * 3) - assert_raises(ValueError, neg_binom, n, bad_p_one * 3) - assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.negative_binomial, bad_n, p * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_two * 3) def test_poisson(self): max_lam = random.RandomState()._poisson_lam_max @@ -1810,41 +1786,38 @@ def test_poisson(self): lam = [1] bad_lam_one = [-1] bad_lam_two = [max_lam * 2] - poisson = random.poisson desired = np.array([1, 1, 0]) - self.set_seed() - actual = poisson(lam * 3) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, poisson, bad_lam_one * 3) - assert_raises(ValueError, poisson, bad_lam_two * 3) + assert_raises(ValueError, rng.poisson, bad_lam_one * 3) + assert_raises(ValueError, rng.poisson, bad_lam_two * 3) def test_zipf(self): a = [2] bad_a = [0] - zipf = random.zipf desired = np.array([2, 2, 1]) - self.set_seed() - actual = zipf(a * 3) + rng = random.RandomState(self.seed) + actual = rng.zipf(a * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, zipf, bad_a * 3) + assert_raises(ValueError, rng.zipf, bad_a * 3) with np.errstate(invalid='ignore'): - assert_raises(ValueError, zipf, np.nan) - assert_raises(ValueError, zipf, [0, 0, np.nan]) + assert_raises(ValueError, rng.zipf, np.nan) + assert_raises(ValueError, rng.zipf, [0, 0, np.nan]) def test_geometric(self): p = [0.5] bad_p_one = [-1] bad_p_two = [1.5] - geom = random.geometric desired = np.array([2, 2, 2]) - self.set_seed() - actual = geom(p * 3) + rng = random.RandomState(self.seed) + actual = rng.geometric(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, geom, bad_p_one * 3) - assert_raises(ValueError, geom, bad_p_two * 3) + assert_raises(ValueError, rng.geometric, bad_p_one * 3) + assert_raises(ValueError, rng.geometric, bad_p_two * 3) def test_hypergeometric(self): ngood = [1] @@ -1854,50 +1827,48 @@ def test_hypergeometric(self): bad_nbad = [-2] bad_nsample_one = [0] bad_nsample_two = [4] - hypergeom = random.hypergeometric desired = np.array([1, 1, 1]) - self.set_seed() - actual = hypergeom(ngood * 3, nbad, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood * 3, nbad, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_two) - self.set_seed() - actual = hypergeom(ngood, nbad * 3, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_two) - self.set_seed() - actual = hypergeom(ngood, nbad, nsample * 3) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad, nsample * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_two * 3) - assert_raises(ValueError, hypergeom, -1, 10, 20) - assert_raises(ValueError, hypergeom, 10, -1, 20) - assert_raises(ValueError, hypergeom, 10, 10, 0) - assert_raises(ValueError, hypergeom, 10, 10, 25) + assert_raises(ValueError, rng.hypergeometric, -1, 10, 20) + assert_raises(ValueError, rng.hypergeometric, 10, -1, 20) + assert_raises(ValueError, rng.hypergeometric, 10, 10, 0) + assert_raises(ValueError, rng.hypergeometric, 10, 10, 25) def test_logseries(self): p = [0.5] bad_p_one = [2] bad_p_two = [-1] - logseries = random.logseries desired = np.array([1, 1, 1]) - self.set_seed() - actual = logseries(p * 3) + rng = random.RandomState(self.seed) + actual = rng.logseries(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, logseries, bad_p_one * 3) - assert_raises(ValueError, logseries, bad_p_two * 3) + assert_raises(ValueError, rng.logseries, bad_p_one * 3) + assert_raises(ValueError, rng.logseries, bad_p_two * 3) @pytest.mark.skipif(IS_WASM, reason="can't start thread") @@ -2025,9 +1996,9 @@ def test_integer_dtype(int_func): def test_integer_repeat(int_func): - random.seed(123456789) + rng = random.RandomState(123456789) fname, args, sha256 = int_func - f = getattr(random, fname) + f = getattr(rng, fname) val = f(*args, size=1000000) if sys.byteorder != 'little': val = val.byteswap() diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index e71be8acd981..1c8882d1b672 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -54,9 +54,9 @@ def test_shuffle_mixed_dimension(self): [(1, 1), (2, 2), (3, 3), None], [1, (2, 2), (3, 3), None], [(1, 1), 2, 3, None]]: - random.seed(12345) + rng = random.RandomState(12345) shuffled = list(t) - random.shuffle(shuffled) + rng.shuffle(shuffled) expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) assert_array_equal(np.array(shuffled, dtype=object), expected) @@ -131,9 +131,9 @@ def test_permutation_subclass(self): class N(np.ndarray): pass - random.seed(1) + rng = random.RandomState(1) orig = np.arange(3).view(N) - perm = random.permutation(orig) + perm = rng.permutation(orig) assert_array_equal(perm, np.array([0, 2, 1])) assert_array_equal(orig, np.arange(3).view(N)) @@ -143,9 +143,9 @@ class M: def __array__(self, dtype=None, copy=None): return self.a - random.seed(1) + rng = random.RandomState(1) m = M() - perm = random.permutation(m) + perm = rng.permutation(m) assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) assert_array_equal(m.__array__(), np.arange(5)) @@ -176,27 +176,27 @@ def test_choice_retun_dtype(self): reason='Cannot test with 32-bit C long') def test_randint_117(self): # GH 14189 - random.seed(0) + rng = random.RandomState(0) expected = np.array([2357136044, 2546248239, 3071714933, 3626093760, 2588848963, 3684848379, 2340255427, 3638918503, 1819583497, 2678185683], dtype='int64') - actual = random.randint(2**32, size=10) + actual = rng.randint(2**32, size=10) assert_array_equal(actual, expected) def test_p_zero_stream(self): # Regression test for gh-14522. Ensure that future versions # generate the same variates as version 1.16. - np.random.seed(12345) - assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]), + rng = random.RandomState(12345) + assert_array_equal(rng.binomial(1, [0, 0.25, 0.5, 0.75, 1]), [0, 0, 0, 1, 1]) def test_n_zero_stream(self): # Regression test for gh-14522. Ensure that future versions # generate the same variates as version 1.16. - np.random.seed(8675309) + rng = random.RandomState(8675309) expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]]) - assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)), + assert_array_equal(rng.binomial([[0], [10]], 0.25, size=(2, 10)), expected) diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index de52582c2b56..b29bd526266e 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -52,9 +52,9 @@ def test_shuffle_mixed_dimension(self): [(1, 1), (2, 2), (3, 3), None], [1, (2, 2), (3, 3), None], [(1, 1), 2, 3, None]]: - np.random.seed(12345) + rng = np.random.RandomState(12345) shuffled = list(t) - random.shuffle(shuffled) + rng.shuffle(shuffled) expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) assert_array_equal(np.array(shuffled, dtype=object), expected) @@ -129,9 +129,9 @@ def test_permutation_subclass(self): class N(np.ndarray): pass - np.random.seed(1) + rng = np.random.RandomState(1) orig = np.arange(3).view(N) - perm = np.random.permutation(orig) + perm = rng.permutation(orig) assert_array_equal(perm, np.array([0, 2, 1])) assert_array_equal(orig, np.arange(3).view(N)) @@ -141,8 +141,8 @@ class M: def __array__(self, dtype=None, copy=None): return self.a - np.random.seed(1) + rng = np.random.RandomState(1) m = M() - perm = np.random.permutation(m) + perm = rng.permutation(m) assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) assert_array_equal(m.__array__(), np.arange(5)) From 1f5b7392f6a1d57886a4b32f71d7a44f09f22e26 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 15 Sep 2025 13:12:42 -0600 Subject: [PATCH 0461/1018] DOC: add dev docs on C debuggers and compiler sanitizers --- .../dev/development_advanced_debugging.rst | 214 +++++++++++++++++- 1 file changed, 212 insertions(+), 2 deletions(-) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 6df7a3ecb64a..23b06085b371 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -5,11 +5,15 @@ Advanced debugging tools ======================== If you reached here, you want to dive into, or use, more advanced tooling. -This is usually not necessary for first time contributors and most +This is usually not necessary for first-time contributors and most day-to-day development. These are used more rarely, for example close to a new NumPy release, or when a large or particular complex change was made. +Some of these tools are used in NumPy's continuous integration tests. If you +see a test failure that only happens under a debugging tool, these instructions +should hopefully enable you to reproduce the test failure locally. + Since not all of these tools are used on a regular basis and only available on some systems, please expect differences, issues, or quirks; we will be happy to help if you get stuck and appreciate any improvements @@ -20,7 +24,7 @@ Finding C errors with additional tooling ######################################## Most development will not require more than a typical debugging toolchain -as shown in :ref:`Debugging `. +as shown in :ref:`Debugging `. But for example memory leaks can be particularly subtle or difficult to narrow down. @@ -213,3 +217,209 @@ command for NumPy). .. _pytest-valgrind: https://github.com/seberg/pytest-valgrind + +C debuggers +=========== + +Whenever NumPy crashes or when working on changes to NumPy's low-level C or C++ +code, it's often convenient to run Python under a C debugger to get more +information. A debugger can aid in understanding an interpreter crash (e.g. due +to a segmentation fault) by providing a C call stack at the site of the +crash. The call stack often provides valuable context to understand the nature +of a crash. C debuggers are also very useful during development, allowing +interactive debugging in the C implementation of NumPy. + +The NumPy developers often use both ``gdb`` and ``lldb`` to debug Numpy. It does +not matter which debugger you use - although on a Mac it is often far easier to +use ``lldb`` than ``gdb``. That said, they have disjoint user interfaces, so you +will need to learn how to use whichever one you land on. The ``gdb`` to ``lldb`` +`command map `_ is a convnient reference for +how to accomplish common recipes in both debuggers. + + +Use together with ``spin`` +-------------------------- + +The ``spin`` `development workflow tool +`_. has built-in support for working +with both ``gdb`` and ``ldb`` via the ``spin gdb`` and ``spin lldb`` commands. + +For both debuggers, it's advisable to build NumPy in either the ``debug`` or +``debugoptimized`` meson build profile. To use ``debug`` you can pass the option +via ``spin build``: + +.. code-block:: bash + + spin build -- -Dbuildtype=debug + +You can pass additional arguments to `meson setup +`_ besides ``buildtype`` using the +same positional argument syntax for ``spin build``. + +Let's say you have a test script named `test.py` that lives in a ``test`` folder +in the same directory as the NumPy source checking. You could execute the test +script using the ``spin`` build of NumPy with the following incantation: + +.. code-block:: bash + + spin gdb ../test/test.py + +This will launch into gdb. If all you care about is a call stack for a crash, +type "r" and hit enter. Your test script will run and if a crash happens, you +type "bt" to get a traceback. For ``lldb``, the instructions are similar, just +replace ``spin gdb`` with ``spin lldb``. + +You can also set breakpoints and use other more advanced techniques. See the +documentation for your debugger for more details. + +One common issue with breakpoints in NumPy is that some code paths get hit +repeatedly during the import of the ``numpy`` module. This can make it tricky or +tedious to find the first "real" call after the NumPy import has completed and +the ``numpy`` module is fully initialized. + +One workaround is to use a script like this: + +.. code-block:: python + + import os + import signal + + import numpy as np + + PID = os.getpid() + + def do_nothing(*args): + pass + + signal.signal(signal.SIGUSR1, do_nothing) + + os.kill(PID, signal.SIGUSR1) + + # the code to run under a debugger follows + + +This example installs a signal handler for the ``SIGUSR1`` signal that does +nothing and then calls ``os.kill`` on the Python process with the ``SIGUSR1`` +signal. This causes the signal handler to fire and critically also causes both +``gdb`` and ``lldb`` to halt execution inside of the ``kill`` syscall. + +Since the ``os.kill`` call happens after the ``numpy`` module is already fully +initialized, this means any breakpoints set inside of ``kill`` will happen +*after* ``numpy`` is finished initializing. + +Use together with ``pytest`` +---------------------------- + +You can also run ``pytest`` tests under a debugger. This requires using +the debugger in a slightly more manual fashion, since ``spin`` does not yet +automate this process. First, run ``spin build`` to ensure there is a fully +built copy of NumPy managed by ``spin``. Then, to run the tests under ``lldb`` +you would do something like this: + +.. code-block:: bash + + spin lldb $(which python) $(which pytest) build-install/usr/lib/python3.13/site-packages/numpy/_core/tests/test_multiarray.py + +This will execute the tests in ``test_multiarray.py`` under lldb after typing +'r' and hitting enter. Note that this command comes from a session using Python +3.13 on a Mac. If you are using a different Python version or operating system, +the directory layout inside ``build-install`` may be slightly different. + +You can set breakpoints as described above. The issue about breakpoints +commonly being hit during NumPy import also applies - consider refactoring your +test workflow into a test script so you can adopt the workaround using +``os.kill`` described above. + +Note the use of ``$(which python)`` to ensure the debugger receives a path to a +Python executable. If you are using ``pyenv``, you may need to replace ``which +python`` with ``pyenv which python``, since ``pyenv`` relies on shim scripts +that ``which`` doesn't know about. + + +Compiler Sanitizers +=================== + +The `compiler sanitizer `_ suites +shipped by both GCC and LLVM offer a means to detect many common programming +errors at runtime. The sanitizers work by instrumenting the application code at +build time so additional runtime checks fire. Typically, sanitizers are run +during the course of regular testing and if a sanitizer check fails, this leads +to a test failure or crash, along with a report about the nature of the failure. + +While it is possible to use sanitizers with a "regular" build of CPython - it is +best if you can set up a Python environment based on a from-source Python build +with sanitizer instrumentation, and then use the instrumented Python to build +NumPy and run the tests. If the entire Python stack is instrumented using the +same sanitizer runtime, it becomes possible to identify issues that happen +across the Python stack. This enables detecting memory leaks in NumPy due to +misuse of memory allocated in CPython, for example. + +Build Python with Sanitizer Instrumentation +------------------------------------------- + +See the `section in the Python developer's guide +`_ on this topic for +more information about building Python from source. To enable address sanitizer, +you will need to pass ``--with-address-sanitizer`` to the ``configure`` script +invocation when you build Python. + +You can also use `pyenv `_ to automate the +process of building Python and quickly activate or deactivate a Python +installation using a command-line interface similar to virtual +environments. With ``pyenv`` you could install an ASAN-instrumented build of +Python 3.13 like this: + +.. code-block:: bash + + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.13 + +If you are interested in thread sanitizer, the ``cpython_sanity`` `docker images +`_ might also be a quicker choice +that bypasses building Python from source, although it may be annoying to do +debugging work inside of a docker image. + +Use together with ``spin`` +-------------------------- + +However you build Python, once you have an instrumented Python build, you can +install NumPy's development and test dependencies and build NumPy with address +sanitizer instrumentation. For example, to build NumPy with the ``debug`` +profile and address sanitizer, you would pass additional build options to +``meson`` like this: + +.. code-block:: bash + + spin build -- -Dbuildtype=debug -Db_sanitize=address + + +Once the build is finished, you can use other ``spin`` command like ``spin +test`` and ``spin gdb`` as with any other Python build. + +Special considerations +---------------------- + +Some NumPy tests intentionally lead to ``malloc`` returning ``NULL``. In its +default configuration, some of the compiler sanitizers flag this as an +error. You can disable that check by passing ``allocator_may_return_null=1`` to +the sanitizer as an option. For example, with address sanitizer: + +.. code-block:: bash + + ASAN_OPTIONS=allocator_may_return_null=1 spin test + +You may see memory leaks coming from the Python interpreter, particularly on +MacOS. If the memory leak reports are not useful, you can disable leak detection +by passing ``detect_leaks=0`` in ``ASAN_OPTIONS``. You can pass more than one +option using a comma-delimited list, like this: + +.. code-block:: bash + + ASAN_OPTIONS=allocator_may_return_null=1:halt_on_error=1:detect_leaks=1 spin test + +The ``halt_on_error`` option can be particularly useful -- it hard-crashes the +Python executable whenever it detects an error, along with a report about the +error that includes a stack trace. + +You can also take a look at the ``compiler_sanitizers.yml`` GitHub actions +workflow configuration. It describes several different CI jobs that are run as +part of the NumPy tests using Thread, Address, and Undefined Behavior sanitizer. From 4a0d4d4b0c971696c4858fb95819cde56f83e2a4 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Wed, 17 Sep 2025 00:09:39 +0530 Subject: [PATCH 0462/1018] DOC: Improve documentation for f2py and Meson usage, add ufunc extension example --- doc/source/f2py/index.rst | 67 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst index b5cfb168073a..3427ffbbd2fc 100644 --- a/doc/source/f2py/index.rst +++ b/doc/source/f2py/index.rst @@ -45,6 +45,73 @@ end matches the NumPy version printed from ``python -m numpy.f2py``, then you can use the shorter version. If not, or if you cannot run ``f2py``, you should replace all calls to ``f2py`` mentioned in this guide with the longer version. +======================= +Using f2py with Meson +======================= + +Meson is a modern build system recommended for building Python extension modules, +especially starting with Python 3.12 and NumPy 2.x. Meson provides a robust and +maintainable way to build Fortran extensions with f2py. + +To build a Fortran extension using f2py and Meson, you can use Meson's `custom_target` +to invoke f2py and generate the extension module. The following minimal example +demonstrates how to do this: + +.. code-block:: meson + + # List your Fortran source files + fortran_sources = files('your_module.f90') + + # Find the Python installation + py = import('python').find_installation() + + # Create a custom target to build the extension with f2py + f2py_wrapper = custom_target( + 'your_module_wrapper', + output: 'your_module.so', + input: fortran_sources, + command: [ + py.full_path(), '-m', 'numpy.f2py', + '-c', '@INPUT@', '-m', 'your_module' + ] + ) + + # Install the built extension to the Python site-packages directory + install_data(f2py_wrapper, install_dir: py.site_packages_dir()) + +For more details and advanced usage, see the Meson build guide in +the user documentation or refer to SciPy's Meson build files for +real-world examples: https://github.com/scipy/scipy/tree/main/meson.build + +========================================== +Building NumPy ufunc Extensions with Meson +========================================== + +To build a NumPy ufunc extension (C API) using Meson, you can use the +following template: + +.. code-block:: meson + + # List your C source files + c_sources = files('your_ufunc_module.c') + + # Find the Python installation + py = import('python').find_installation() + + # Create an extension module + extension_module = py.extension_module( + 'your_ufunc_module', + c_sources, + dependencies: py.dependency(), + install: true + ) + +For more information on writing NumPy ufunc extensions, see the +official NumPy documentation: https://numpy.org/doc/stable/reference/c-api.ufunc.html + +You can also refer to SciPy's Meson build files for real-world +examples: https://github.com/scipy/scipy/tree/main/meson.build + .. toctree:: :maxdepth: 3 From a362d7888fef57d0926be81b3ece4257bdce6e94 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Wed, 17 Sep 2025 00:17:16 +0530 Subject: [PATCH 0463/1018] DOC: Improve documentation for f2py and Meson usage, add ufunc extension example --- doc/source/f2py/index.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst index 3427ffbbd2fc..fb1c573d1269 100644 --- a/doc/source/f2py/index.rst +++ b/doc/source/f2py/index.rst @@ -109,9 +109,6 @@ following template: For more information on writing NumPy ufunc extensions, see the official NumPy documentation: https://numpy.org/doc/stable/reference/c-api.ufunc.html -You can also refer to SciPy's Meson build files for real-world -examples: https://github.com/scipy/scipy/tree/main/meson.build - .. toctree:: :maxdepth: 3 From 2fcfde1804e222040d45a56dfe35608b2c562e29 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 16 Sep 2025 14:01:16 -0600 Subject: [PATCH 0464/1018] Apply suggestions from code review Co-authored-by: Lucas Colley --- doc/source/dev/development_advanced_debugging.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 23b06085b371..805b7251e626 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -242,7 +242,7 @@ Use together with ``spin`` The ``spin`` `development workflow tool `_. has built-in support for working -with both ``gdb`` and ``ldb`` via the ``spin gdb`` and ``spin lldb`` commands. +with both ``gdb`` and ``lldb`` via the ``spin gdb`` and ``spin lldb`` commands. For both debuggers, it's advisable to build NumPy in either the ``debug`` or ``debugoptimized`` meson build profile. To use ``debug`` you can pass the option @@ -256,8 +256,8 @@ You can pass additional arguments to `meson setup `_ besides ``buildtype`` using the same positional argument syntax for ``spin build``. -Let's say you have a test script named `test.py` that lives in a ``test`` folder -in the same directory as the NumPy source checking. You could execute the test +Let's say you have a test script named ``test.py`` that lives in a ``test`` folder +in the same directory as the NumPy source checkout. You could execute the test script using the ``spin`` build of NumPy with the following incantation: .. code-block:: bash From 9273b2fef33e3ac5a022e67fb84f8f5f66bdc30a Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 16 Sep 2025 22:55:45 +0200 Subject: [PATCH 0465/1018] BUG: Stable ``ScalarType`` ordering --- numpy/_core/numerictypes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index 135dc1b51d97..265ad4f8eb1f 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -598,7 +598,7 @@ def _scalar_type_key(typ): ScalarType = [int, float, complex, bool, bytes, str, memoryview] -ScalarType += sorted(set(sctypeDict.values()), key=_scalar_type_key) +ScalarType += sorted(dict.fromkeys(sctypeDict.values()), key=_scalar_type_key) ScalarType = tuple(ScalarType) From 707fee62a0f084d6068bc0624d3fb79f974d57bb Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 16 Sep 2025 14:48:30 -0600 Subject: [PATCH 0466/1018] DOC: respond to code review comments [skip actions] [skip azp] [skip cirrus] --- .../dev/development_advanced_debugging.rst | 61 +++++++++++++++---- 1 file changed, 50 insertions(+), 11 deletions(-) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 805b7251e626..07c80314da1b 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -229,16 +229,16 @@ crash. The call stack often provides valuable context to understand the nature of a crash. C debuggers are also very useful during development, allowing interactive debugging in the C implementation of NumPy. -The NumPy developers often use both ``gdb`` and ``lldb`` to debug Numpy. It does -not matter which debugger you use - although on a Mac it is often far easier to -use ``lldb`` than ``gdb``. That said, they have disjoint user interfaces, so you -will need to learn how to use whichever one you land on. The ``gdb`` to ``lldb`` -`command map `_ is a convnient reference for -how to accomplish common recipes in both debuggers. +The NumPy developers often use both ``gdb`` and ``lldb`` to debug Numpy. As a +rule of thumb, ``gdb`` is often easier to use on Linux while ``lldb`` is easier +to use on a Mac environment. They have disjoint user interfaces, so you will need to +learn how to use whichever one you land on. The ``gdb`` to ``lldb`` `command map +`_ is a convnient reference for how to +accomplish common recipes in both debuggers. -Use together with ``spin`` --------------------------- +Building With Debug Symbols +--------------------------- The ``spin`` `development workflow tool `_. has built-in support for working @@ -252,11 +252,16 @@ via ``spin build``: spin build -- -Dbuildtype=debug +to use ``debugoptimized`` you're pass ``-Dbuildtype=debugoptimized`` instead. + You can pass additional arguments to `meson setup `_ besides ``buildtype`` using the same positional argument syntax for ``spin build``. -Let's say you have a test script named ``test.py`` that lives in a ``test`` folder +Running a Test Script +--------------------- + +Let's say you have a test script named `test.py` that lives in a ``test`` folder in the same directory as the NumPy source checkout. You could execute the test script using the ``spin`` build of NumPy with the following incantation: @@ -303,7 +308,41 @@ nothing and then calls ``os.kill`` on the Python process with the ``SIGUSR1`` signal. This causes the signal handler to fire and critically also causes both ``gdb`` and ``lldb`` to halt execution inside of the ``kill`` syscall. -Since the ``os.kill`` call happens after the ``numpy`` module is already fully +If you run ``lldb`` you should see output something like this: + +.. code-block:: + + Process 67365 stopped + * thread #1, queue = 'com.apple.main-thread', stop reason = signal SIGUSR1 + frame #0: 0x000000019c4b9da4 libsystem_kernel.dylib`__kill + 8 + libsystem_kernel.dylib`__kill: + -> 0x19c4b9da4 <+8>: b.lo 0x19c4b9dc4 ; <+40> + 0x19c4b9da8 <+12>: pacibsp + 0x19c4b9dac <+16>: stp x29, x30, [sp, #-0x10]! + 0x19c4b9db0 <+20>: mov x29, sp + Target 0: (python3.13) stopped. + (lldb) bt + * thread #1, queue = 'com.apple.main-thread', stop reason = signal SIGUSR1 + * frame #0: 0x000000019c4b9da4 libsystem_kernel.dylib`__kill + 8 + frame #1: 0x000000010087f5c4 libpython3.13.dylib`os_kill + 104 + frame #2: 0x000000010071374c libpython3.13.dylib`cfunction_vectorcall_FASTCALL + 276 + frame #3: 0x00000001006c1e3c libpython3.13.dylib`PyObject_Vectorcall + 88 + frame #4: 0x00000001007edd1c libpython3.13.dylib`_PyEval_EvalFrameDefault + 23608 + frame #5: 0x00000001007e7e6c libpython3.13.dylib`PyEval_EvalCode + 252 + frame #6: 0x0000000100852944 libpython3.13.dylib`run_eval_code_obj + 180 + frame #7: 0x0000000100852610 libpython3.13.dylib`run_mod + 220 + frame #8: 0x000000010084fa4c libpython3.13.dylib`_PyRun_SimpleFileObject + 868 + frame #9: 0x000000010084f400 libpython3.13.dylib`_PyRun_AnyFileObject + 160 + frame #10: 0x0000000100874ab8 libpython3.13.dylib`pymain_run_file + 336 + frame #11: 0x0000000100874324 libpython3.13.dylib`Py_RunMain + 1516 + frame #12: 0x000000010087459c libpython3.13.dylib`pymain_main + 324 + frame #13: 0x000000010087463c libpython3.13.dylib`Py_BytesMain + 40 + frame #14: 0x000000019c152b98 dyld`start + 6076 + (lldb) + +As you can see, the C stack trace is inside of the ``kill`` syscall and an +``lldb`` prompt is active, allowing interactively setting breakpoints. Since the +``os.kill`` call happens after the ``numpy`` module is already fully initialized, this means any breakpoints set inside of ``kill`` will happen *after* ``numpy`` is finished initializing. @@ -410,7 +449,7 @@ the sanitizer as an option. For example, with address sanitizer: You may see memory leaks coming from the Python interpreter, particularly on MacOS. If the memory leak reports are not useful, you can disable leak detection by passing ``detect_leaks=0`` in ``ASAN_OPTIONS``. You can pass more than one -option using a comma-delimited list, like this: +option using a colon-delimited list, like this: .. code-block:: bash From c9e75927a43c5a52fc8ad7c645d8fa5c6c33f8ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <8431159+mtsokol@users.noreply.github.com> Date: Wed, 17 Sep 2025 08:35:15 +0200 Subject: [PATCH 0467/1018] BUG: Fix `dtype` refcount in `__array__` (#29715) * BUG: Fix `dtype` refcount in `__array__` * Consider all possible code paths * Remove else-if branch * Move refcount checks to a separate test * Add code comments * Add missing `Py_DECREF` for error path * Apply review comments --- numpy/_core/src/multiarray/methods.c | 17 +++++++++----- numpy/_core/tests/test_api.py | 35 +++++++++++++++++++++++++++- 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index f2150d63c496..2d754d7c6e91 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -934,6 +934,11 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) return NULL; } + if (newtype == NULL) { + newtype = PyArray_DESCR(self); + Py_INCREF(newtype); // newtype is owned. + } + /* convert to PyArray_Type */ if (!PyArray_CheckExact(self)) { PyArrayObject *new; @@ -951,6 +956,7 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) (PyObject *)self ); if (new == NULL) { + Py_DECREF(newtype); return NULL; } self = new; @@ -960,22 +966,21 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) } if (copy == NPY_COPY_ALWAYS) { - if (newtype == NULL) { - newtype = PyArray_DESCR(self); - } - ret = PyArray_CastToType(self, newtype, 0); + ret = PyArray_CastToType(self, newtype, 0); // steals newtype reference Py_DECREF(self); return ret; } else { // copy == NPY_COPY_IF_NEEDED || copy == NPY_COPY_NEVER - if (newtype == NULL || PyArray_EquivTypes(PyArray_DESCR(self), newtype)) { + if (PyArray_EquivTypes(PyArray_DESCR(self), newtype)) { + Py_DECREF(newtype); return (PyObject *)self; } if (copy == NPY_COPY_IF_NEEDED) { - ret = PyArray_CastToType(self, newtype, 0); + ret = PyArray_CastToType(self, newtype, 0); // steals newtype reference. Py_DECREF(self); return ret; } else { // copy == NPY_COPY_NEVER PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); + Py_DECREF(newtype); Py_DECREF(self); return NULL; } diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index c2e5bf8909f9..83e0a0179e0a 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -91,7 +91,7 @@ def test_array_array(): # instead we get a array([...], dtype=">V18") assert_equal(bytes(np.array(o).data), bytes(a.data)) - # test array + # test __array__ def custom__array__(self, dtype=None, copy=None): return np.array(100.0, dtype=dtype, copy=copy) @@ -157,6 +157,39 @@ def custom__array__(self, dtype=None, copy=None): assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), np.ones((10, 10), dtype=np.float64)) + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test___array___refcount(): + class MyArray: + def __init__(self, dtype): + self.val = np.array(-1, dtype=dtype) + + def __array__(self, dtype=None, copy=None): + return self.val.__array__(dtype=dtype, copy=copy) + + # test all possible scenarios: + # dtype(none | same | different) x copy(true | false | none) + dt = np.dtype(np.int32) + old_refcount = sys.getrefcount(dt) + np.array(MyArray(dt)) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt, copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + dt2 = np.dtype(np.int16) + old_refcount2 = sys.getrefcount(dt2) + np.array(MyArray(dt), dtype=dt2) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + np.array(MyArray(dt), dtype=dt2, copy=None) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + with pytest.raises(ValueError): + np.array(MyArray(dt), dtype=dt2, copy=False) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + + @pytest.mark.parametrize("array", [True, False]) def test_array_impossible_casts(array): # All builtin types can be forcibly cast, at least theoretically, From fbfaf4276a9f101525b9d7696b03d95acf32a0e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Sep 2025 17:21:09 +0000 Subject: [PATCH 0468/1018] MAINT: Bump larsoner/circleci-artifacts-redirector-action Bumps [larsoner/circleci-artifacts-redirector-action](https://github.com/larsoner/circleci-artifacts-redirector-action) from 1.2.0 to 1.3.1. - [Release notes](https://github.com/larsoner/circleci-artifacts-redirector-action/releases) - [Commits](https://github.com/larsoner/circleci-artifacts-redirector-action/compare/839631420e45a08af893032e5a5e8843bf47e8ff...5d358ff96e96429a5c64a969bb4a574555439f4f) --- updated-dependencies: - dependency-name: larsoner/circleci-artifacts-redirector-action dependency-version: 1.3.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/circleci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/circleci.yml b/.github/workflows/circleci.yml index 12c51735bf81..eafe61098588 100644 --- a/.github/workflows/circleci.yml +++ b/.github/workflows/circleci.yml @@ -17,7 +17,7 @@ jobs: statuses: write steps: - name: GitHub Action step - uses: larsoner/circleci-artifacts-redirector-action@839631420e45a08af893032e5a5e8843bf47e8ff # master + uses: larsoner/circleci-artifacts-redirector-action@5d358ff96e96429a5c64a969bb4a574555439f4f # master with: repo-token: ${{ secrets.GITHUB_TOKEN }} api-token: ${{ secrets.CIRCLE_TOKEN }} From abb8dac07f9364c0e3f3fe943b5ba9d4f6db1ed5 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Wed, 17 Sep 2025 21:16:04 +0300 Subject: [PATCH 0469/1018] ENH: add a casting option 'same_value' and use it in np.astype (#29129) * start implementing same_value casting * work through more places that check 'cast', add a TODO * add a test, percolate casting closer to inner loops * use SAME_VALUE_CAST flag for one inner loop variant * aligned test of same_value passes. Need more tests * handle unaligned casting with 'same_value' * extend tests to use source-is-complex * fix more interfaces to pass casting around, disallow using 'same_value' in raw_array_assign_scalar and raw_array_wheremasked_assign_scalar * raise in places that have a kwarg casting, besides np.astype * refactor based on review comments * CHAR_MAX,MIN -> SCHAR_MAX,MIN * copy context flags * add 'same_value' to typing stubs * document new feature * test, check exact float->int casting: refactor same_value check into a function * enable astype same_value casting for scalars * typo * fix ptr-to-src_value -> value casting errors * fix linting and docs, ignore warning better * gcc warning is different * fixes from review, typos * fix compile warning ignore and make filter in tests more specific, disallow non-numeric 'same_value' * fix warning filters * emit PyErr inside the loop * macOS can emit FPEs when touching NAN * Fix can-cast logic everywhere for same-value casts (only allow numeric) * reorder and simplify, from review * revert last commit and remove redundant checks * gate and document SAME_VALUE_CASTING for v2.4 * make SAME_VALUE a flag, not an enum * fixes from review * fixes from review * inline inner casting loop and float16 -> float64 calls * typo * fixes for AVX512 and debug builds, inline npy_halfbits_to_doublebits only on specific platforms * use optimize instead of inline --------- Co-authored-by: Matti Picus Co-authored-by: Sebastian Berg --- benchmarks/benchmarks/bench_ufunc.py | 2 +- .../upcoming_changes/29129.enhancement.rst | 7 + doc/source/reference/c-api/array.rst | 7 + numpy/__init__.cython-30.pxd | 1 + numpy/__init__.pxd | 1 + numpy/__init__.pyi | 2 +- numpy/_core/_add_newdocs.py | 18 +- numpy/_core/code_generators/cversions.txt | 3 +- numpy/_core/include/numpy/dtype_api.h | 14 +- numpy/_core/include/numpy/ndarraytypes.h | 13 + numpy/_core/include/numpy/numpyconfig.h | 5 +- numpy/_core/meson.build | 3 +- numpy/_core/src/common/array_assign.h | 4 +- .../src/multiarray/_multiarray_tests.c.src | 7 +- .../_core/src/multiarray/array_assign_array.c | 82 +++--- .../src/multiarray/array_assign_scalar.c | 27 +- numpy/_core/src/multiarray/array_method.c | 23 +- numpy/_core/src/multiarray/common.c | 9 - numpy/_core/src/multiarray/conversion_utils.c | 29 +- numpy/_core/src/multiarray/convert.c | 2 +- numpy/_core/src/multiarray/convert_datatype.c | 50 +++- numpy/_core/src/multiarray/ctors.c | 1 - numpy/_core/src/multiarray/datetime.c | 16 ++ numpy/_core/src/multiarray/datetime_strings.c | 2 +- numpy/_core/src/multiarray/dtype_transfer.c | 10 +- numpy/_core/src/multiarray/dtype_transfer.h | 16 +- .../multiarray/legacy_dtype_implementation.c | 8 +- .../multiarray/lowlevel_strided_loops.c.src | 247 +++++++++++++++++- numpy/_core/src/multiarray/mapping.c | 3 +- numpy/_core/src/multiarray/methods.c | 13 +- numpy/_core/src/umath/legacy_array_method.c | 9 +- numpy/_core/src/umath/reduction.c | 2 +- numpy/_core/src/umath/ufunc_object.c | 56 ++-- numpy/_core/tests/test_casting_unittests.py | 146 ++++++++++- numpy/_core/tests/test_conversion_utils.py | 5 +- numpy/_core/tests/test_datetime.py | 4 + numpy/_core/tests/test_einsum.py | 5 + numpy/_core/tests/test_shape_base.py | 5 + 38 files changed, 703 insertions(+), 154 deletions(-) create mode 100644 doc/release/upcoming_changes/29129.enhancement.rst diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 155e7d4f7421..ac978981faba 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -304,7 +304,7 @@ def time_ndarray_dlp(self, methname, npdtypes): class NDArrayAsType(Benchmark): """ Benchmark for type conversion """ - params = [list(itertools.combinations(TYPES1, 2))] + params = [list(itertools.product(TYPES1, TYPES1))] param_names = ['typeconv'] timeout = 10 diff --git a/doc/release/upcoming_changes/29129.enhancement.rst b/doc/release/upcoming_changes/29129.enhancement.rst new file mode 100644 index 000000000000..9a14f13c1f4a --- /dev/null +++ b/doc/release/upcoming_changes/29129.enhancement.rst @@ -0,0 +1,7 @@ +``'same_value'`` for casting by value +------------------------------------- +The ``casting`` kwarg now has a ``'same_value'`` option that checks the actual +values can be round-trip cast without changing value. Currently it is only +implemented in `ndarray.astype`. This will raise a ``ValueError`` if any of the +values in the array would change as a result of the cast, including rounding of +floats or overflowing of ints. diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 667b9948a3d9..95f5cc033b4d 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -4492,5 +4492,12 @@ Enumerated Types Allow any cast, no matter what kind of data loss may occur. +.. c:macro:: NPY_SAME_VALUE_CASTING + + Error if any values change during a cast. Currently + supported only in ``ndarray.astype(... casting='same_value')`` + + .. versionadded:: 2.4 + .. index:: pair: ndarray; C-API diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 86c91cf617a5..c71898626070 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -156,6 +156,7 @@ cdef extern from "numpy/arrayobject.h": NPY_SAFE_CASTING NPY_SAME_KIND_CASTING NPY_UNSAFE_CASTING + NPY_SAME_VALUE_CASTING ctypedef enum NPY_CLIPMODE: NPY_CLIP diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index eb0764126116..40a24b6c7cc1 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -165,6 +165,7 @@ cdef extern from "numpy/arrayobject.h": NPY_SAFE_CASTING NPY_SAME_KIND_CASTING NPY_UNSAFE_CASTING + NPY_SAME_VALUE_CASTING ctypedef enum NPY_CLIPMODE: NPY_CLIP diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 39b256474b3f..caa5b6f5a724 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -940,7 +940,7 @@ _DTypeBuiltinKind: TypeAlias = L[0, 1, 2] _ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12", "2024.12"] -_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "unsafe"] +_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] _OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None _OrderACF: TypeAlias = L["A", "C", "F"] | None diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index ad7029deaa0f..264bb8e25e7c 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -3212,7 +3212,7 @@ 'C' order otherwise, and 'K' means as close to the order the array elements appear in memory as possible. Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + casting : {'no', 'equiv', 'safe', 'same_kind', 'same_value', 'unsafe'}, optional Controls what kind of data casting may occur. Defaults to 'unsafe' for backwards compatibility. @@ -3222,6 +3222,12 @@ * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. + * 'same_value' means any data conversions may be done, but the values + must not change, including rounding of floats or overflow of ints + + .. versionadded:: 2.4 + Support for ``'same_value'`` was added. + subok : bool, optional If True, then sub-classes will be passed-through (default), otherwise the returned array will be forced to be a base-class array. @@ -3244,6 +3250,9 @@ ComplexWarning When casting from complex to float or int. To avoid this, one should use ``a.real.astype(t)``. + ValueError + When casting using ``'same_value'`` and the values change or would + overflow Examples -------- @@ -3255,6 +3264,13 @@ >>> x.astype(int) array([1, 2, 2]) + >>> x.astype(int, casting="same_value") + Traceback (most recent call last): + ... + ValueError: could not cast 'same_value' double to long + + >>> x[:2].astype(int, casting="same_value") + array([1, 2]) """)) diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index 0d642d760b21..a04dd784c67f 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -79,5 +79,6 @@ # Version 19 (NumPy 2.2.0) No change 0x00000013 = 2b8f1f4da822491ff030b2b37dff07e3 # Version 20 (NumPy 2.3.0) -# Version 20 (NumPy 2.4.0) No change 0x00000014 = e56b74d32a934d085e7c3414cb9999b8, +# Version 21 (NumPy 2.4.0) Add 'same_value' casting, header additions +0x00000015 = e56b74d32a934d085e7c3414cb9999b8, diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index b37c9fbb6821..8c3ff720b372 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -99,6 +99,11 @@ typedef enum { } NPY_ARRAYMETHOD_FLAGS; +typedef enum { + /* Casting via same_value logic */ + NPY_SAME_VALUE_CONTEXT_FLAG=1, +} NPY_ARRAYMETHOD_CONTEXT_FLAGS; + typedef struct PyArrayMethod_Context_tag { /* The caller, which is typically the original ufunc. May be NULL */ PyObject *caller; @@ -107,7 +112,15 @@ typedef struct PyArrayMethod_Context_tag { /* Operand descriptors, filled in by resolve_descriptors */ PyArray_Descr *const *descriptors; + #if NPY_FEATURE_VERSION > NPY_2_3_API_VERSION + void * _reserved; + /* + * Optional flag to pass information into the inner loop + * NPY_ARRAYMETHOD_CONTEXT_FLAGS + */ + uint64_t flags; /* Structure may grow (this is harmless for DType authors) */ + #endif } PyArrayMethod_Context; @@ -144,7 +157,6 @@ typedef struct { #define NPY_METH_contiguous_indexed_loop 9 #define _NPY_METH_static_data 10 - /* * The resolve descriptors function, must be able to handle NULL values for * all output (but not input) `given_descrs` and fill `loop_descrs`. diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index ecd5d9724528..f740788f3720 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -233,6 +233,16 @@ typedef enum { NPY_KEEPORDER=2 } NPY_ORDER; +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION +/* + * check that no values overflow/change during casting + * Used explicitly only in the ArrayMethod creation or resolve_dtypes functions to + * indicate that a same-value cast is supported. In external APIs, use only + * NPY_SAME_VALUE_CASTING + */ +#define NPY_SAME_VALUE_CASTING_FLAG 64 +#endif + /* For specifying allowed casting in operations which support it */ typedef enum { _NPY_ERROR_OCCURRED_IN_CAST = -1, @@ -246,6 +256,9 @@ typedef enum { NPY_SAME_KIND_CASTING=3, /* Allow any casts */ NPY_UNSAFE_CASTING=4, +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION + NPY_SAME_VALUE_CASTING=NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG, +#endif } NPY_CASTING; typedef enum { diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index 52d7e2b5d7d7..c129a3aceb6d 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -84,6 +84,7 @@ #define NPY_2_1_API_VERSION 0x00000013 #define NPY_2_2_API_VERSION 0x00000013 #define NPY_2_3_API_VERSION 0x00000014 +#define NPY_2_4_API_VERSION 0x00000015 /* @@ -172,8 +173,10 @@ #define NPY_FEATURE_VERSION_STRING "2.0" #elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.1" -#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION /* also 2.4 */ +#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.3" +#elif NPY_FEATURE_VERSION == NPY_2_4_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.4" #else #error "Missing version string define for new NumPy version." #endif diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 61a9d53a42d0..e05f25da39ca 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -50,7 +50,8 @@ C_ABI_VERSION = '0x02000000' # 0x00000013 - 2.1.x # 0x00000013 - 2.2.x # 0x00000014 - 2.3.x -C_API_VERSION = '0x00000014' +# 0x00000015 - 2.4.x +C_API_VERSION = '0x00000015' # Check whether we have a mismatch between the set C API VERSION and the # actual C API VERSION. Will raise a MismatchCAPIError if so. diff --git a/numpy/_core/src/common/array_assign.h b/numpy/_core/src/common/array_assign.h index 8a28ed1d3a01..cc5f044ef080 100644 --- a/numpy/_core/src/common/array_assign.h +++ b/numpy/_core/src/common/array_assign.h @@ -46,7 +46,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, NPY_NO_EXPORT int raw_array_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, - PyArray_Descr *src_dtype, char *src_data); + PyArray_Descr *src_dtype, char *src_data, NPY_CASTING casting); /* * Assigns the scalar value to every element of the destination raw array @@ -59,7 +59,7 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, PyArray_Descr *src_dtype, char *src_data, PyArray_Descr *wheremask_dtype, char *wheremask_data, - npy_intp const *wheremask_strides); + npy_intp const *wheremask_strides, NPY_CASTING casting); /******** LOW-LEVEL ARRAY MANIPULATION HELPERS ********/ diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 4b58b2789e65..8b0c4b3f85d1 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -2170,12 +2170,17 @@ run_casting_converter(PyObject* NPY_UNUSED(self), PyObject *args) if (!PyArg_ParseTuple(args, "O&", PyArray_CastingConverter, &casting)) { return NULL; } - switch (casting) { + switch ((int)casting) { case NPY_NO_CASTING: return PyUnicode_FromString("NPY_NO_CASTING"); case NPY_EQUIV_CASTING: return PyUnicode_FromString("NPY_EQUIV_CASTING"); case NPY_SAFE_CASTING: return PyUnicode_FromString("NPY_SAFE_CASTING"); case NPY_SAME_KIND_CASTING: return PyUnicode_FromString("NPY_SAME_KIND_CASTING"); case NPY_UNSAFE_CASTING: return PyUnicode_FromString("NPY_UNSAFE_CASTING"); + case NPY_SAME_VALUE_CASTING: return PyUnicode_FromString("NPY_SAME_VALUE_CASTING"); + case NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); + case NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); + case NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); + case NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); default: return PyLong_FromLong(casting); } } diff --git a/numpy/_core/src/multiarray/array_assign_array.c b/numpy/_core/src/multiarray/array_assign_array.c index 8886d1cacb40..306ed07b0ace 100644 --- a/numpy/_core/src/multiarray/array_assign_array.c +++ b/numpy/_core/src/multiarray/array_assign_array.c @@ -29,6 +29,8 @@ #include "umathmodule.h" +#define NPY_ALIGNED_CASTING_FLAG 1 + /* * Check that array data is both uint-aligned and true-aligned for all array * elements, as required by the copy/casting code in lowlevel_strided_loops.c @@ -79,7 +81,8 @@ copycast_isaligned(int ndim, npy_intp const *shape, NPY_NO_EXPORT int raw_array_assign_array(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, - PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides) + PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides, + int flags) { int idim; npy_intp shape_it[NPY_MAXDIMS]; @@ -87,14 +90,11 @@ raw_array_assign_array(int ndim, npy_intp const *shape, npy_intp src_strides_it[NPY_MAXDIMS]; npy_intp coord[NPY_MAXDIMS]; - int aligned; + int aligned = (flags & NPY_ALIGNED_CASTING_FLAG) != 0; + int same_value_cast = (flags & NPY_SAME_VALUE_CASTING_FLAG) != 0; NPY_BEGIN_THREADS_DEF; - aligned = - copycast_isaligned(ndim, shape, dst_dtype, dst_data, dst_strides) && - copycast_isaligned(ndim, shape, src_dtype, src_data, src_strides); - /* Use raw iteration with no heap allocation */ if (PyArray_PrepareTwoRawArrayIter( ndim, shape, @@ -120,21 +120,25 @@ raw_array_assign_array(int ndim, npy_intp const *shape, /* Get the function to do the casting */ NPY_cast_info cast_info; - NPY_ARRAYMETHOD_FLAGS flags; + NPY_ARRAYMETHOD_FLAGS method_flags; if (PyArray_GetDTypeTransferFunction(aligned, src_strides_it[0], dst_strides_it[0], src_dtype, dst_dtype, 0, - &cast_info, &flags) != NPY_SUCCEED) { + &cast_info, &method_flags) != NPY_SUCCEED) { return -1; } - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { npy_clear_floatstatus_barrier((char*)&src_data); } + if (same_value_cast) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } + /* Ensure number of elements exceeds threshold for threading */ - if (!(flags & NPY_METH_REQUIRES_PYAPI)) { + if (!(method_flags & NPY_METH_REQUIRES_PYAPI)) { npy_intp nitems = 1, i; for (i = 0; i < ndim; i++) { nitems *= shape_it[i]; @@ -144,11 +148,14 @@ raw_array_assign_array(int ndim, npy_intp const *shape, npy_intp strides[2] = {src_strides_it[0], dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { /* Process the innermost dimension */ char *args[2] = {src_data, dst_data}; - if (cast_info.func(&cast_info.context, - args, &shape_it[0], strides, cast_info.auxdata) < 0) { + result = cast_info.func(&cast_info.context, + args, &shape_it[0], strides, + cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_TWO_NEXT(idim, ndim, coord, shape_it, @@ -158,7 +165,7 @@ raw_array_assign_array(int ndim, npy_intp const *shape, NPY_END_THREADS; NPY_cast_info_xfree(&cast_info); - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { int fpes = npy_get_floatstatus_barrier((char*)&src_data); if (fpes && PyUFunc_GiveFloatingpointErrors("cast", fpes) < 0) { return -1; @@ -183,7 +190,7 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides, PyArray_Descr *wheremask_dtype, char *wheremask_data, - npy_intp const *wheremask_strides) + npy_intp const *wheremask_strides, int flags) { int idim; npy_intp shape_it[NPY_MAXDIMS]; @@ -192,14 +199,11 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, npy_intp wheremask_strides_it[NPY_MAXDIMS]; npy_intp coord[NPY_MAXDIMS]; - int aligned; + int aligned = (flags & NPY_ALIGNED_CASTING_FLAG) != 0; + int same_value_cast = (flags & NPY_SAME_VALUE_CASTING_FLAG) != 0; NPY_BEGIN_THREADS_DEF; - aligned = - copycast_isaligned(ndim, shape, dst_dtype, dst_data, dst_strides) && - copycast_isaligned(ndim, shape, src_dtype, src_data, src_strides); - /* Use raw iteration with no heap allocation */ if (PyArray_PrepareThreeRawArrayIter( ndim, shape, @@ -229,39 +233,45 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, /* Get the function to do the casting */ NPY_cast_info cast_info; - NPY_ARRAYMETHOD_FLAGS flags; + NPY_ARRAYMETHOD_FLAGS method_flags; if (PyArray_GetMaskedDTypeTransferFunction(aligned, src_strides_it[0], dst_strides_it[0], wheremask_strides_it[0], src_dtype, dst_dtype, wheremask_dtype, 0, - &cast_info, &flags) != NPY_SUCCEED) { + &cast_info, &method_flags) != NPY_SUCCEED) { return -1; } + if (same_value_cast) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { npy_clear_floatstatus_barrier(src_data); } - if (!(flags & NPY_METH_REQUIRES_PYAPI)) { + if (!(method_flags & NPY_METH_REQUIRES_PYAPI)) { npy_intp nitems = 1, i; for (i = 0; i < ndim; i++) { nitems *= shape_it[i]; } NPY_BEGIN_THREADS_THRESHOLDED(nitems); } + npy_intp strides[2] = {src_strides_it[0], dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { PyArray_MaskedStridedUnaryOp *stransfer; stransfer = (PyArray_MaskedStridedUnaryOp *)cast_info.func; /* Process the innermost dimension */ char *args[2] = {src_data, dst_data}; - if (stransfer(&cast_info.context, - args, &shape_it[0], strides, - (npy_bool *)wheremask_data, wheremask_strides_it[0], - cast_info.auxdata) < 0) { + result = stransfer(&cast_info.context, + args, &shape_it[0], strides, + (npy_bool *)wheremask_data, wheremask_strides_it[0], + cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_THREE_NEXT(idim, ndim, coord, shape_it, @@ -272,15 +282,13 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, NPY_END_THREADS; NPY_cast_info_xfree(&cast_info); - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { int fpes = npy_get_floatstatus_barrier(src_data); if (fpes && PyUFunc_GiveFloatingpointErrors("cast", fpes) < 0) { return -1; } } - return 0; - fail: NPY_END_THREADS; NPY_cast_info_xfree(&cast_info); @@ -307,7 +315,6 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, NPY_CASTING casting) { int copied_src = 0; - npy_intp src_strides[NPY_MAXDIMS]; /* Use array_assign_scalar if 'src' NDIM is 0 */ @@ -438,12 +445,21 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, } } + int flags = (NPY_SAME_VALUE_CASTING_FLAG & casting); + if (copycast_isaligned(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(dst), + PyArray_DATA(dst), PyArray_STRIDES(dst)) && + copycast_isaligned(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(src), + PyArray_DATA(src), src_strides)) { + /* NPY_ALIGNED_CASTING_FLAG is internal to this file */ + flags |= NPY_ALIGNED_CASTING_FLAG; + } + if (wheremask == NULL) { /* A straightforward value assignment */ /* Do the assignment with raw array iteration */ if (raw_array_assign_array(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), - PyArray_DESCR(src), PyArray_DATA(src), src_strides) < 0) { + PyArray_DESCR(src), PyArray_DATA(src), src_strides, flags) < 0){ goto fail; } } @@ -465,7 +481,7 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), PyArray_DESCR(src), PyArray_DATA(src), src_strides, PyArray_DESCR(wheremask), PyArray_DATA(wheremask), - wheremask_strides) < 0) { + wheremask_strides, flags) < 0) { goto fail; } } diff --git a/numpy/_core/src/multiarray/array_assign_scalar.c b/numpy/_core/src/multiarray/array_assign_scalar.c index 0199ba969eb9..f7d04ed0a39f 100644 --- a/numpy/_core/src/multiarray/array_assign_scalar.c +++ b/numpy/_core/src/multiarray/array_assign_scalar.c @@ -37,7 +37,7 @@ NPY_NO_EXPORT int raw_array_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, - PyArray_Descr *src_dtype, char *src_data) + PyArray_Descr *src_dtype, char *src_data, NPY_CASTING casting) { int idim; npy_intp shape_it[NPY_MAXDIMS], dst_strides_it[NPY_MAXDIMS]; @@ -86,13 +86,19 @@ raw_array_assign_scalar(int ndim, npy_intp const *shape, NPY_BEGIN_THREADS_THRESHOLDED(nitems); } + if (((int)casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } + npy_intp strides[2] = {0, dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { /* Process the innermost dimension */ char *args[2] = {src_data, dst_data}; - if (cast_info.func(&cast_info.context, - args, &shape_it[0], strides, cast_info.auxdata) < 0) { + result = cast_info.func(&cast_info.context, + args, &shape_it[0], strides, cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_ONE_NEXT(idim, ndim, coord, @@ -126,7 +132,7 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, PyArray_Descr *src_dtype, char *src_data, PyArray_Descr *wheremask_dtype, char *wheremask_data, - npy_intp const *wheremask_strides) + npy_intp const *wheremask_strides, NPY_CASTING casting) { int idim; npy_intp shape_it[NPY_MAXDIMS], dst_strides_it[NPY_MAXDIMS]; @@ -177,8 +183,12 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, } NPY_BEGIN_THREADS_THRESHOLDED(nitems); } + if (((int)casting & NPY_SAME_VALUE_CASTING_FLAG) != 0) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } npy_intp strides[2] = {0, dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { /* Process the innermost dimension */ @@ -186,10 +196,11 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, stransfer = (PyArray_MaskedStridedUnaryOp *)cast_info.func; char *args[2] = {src_data, dst_data}; - if (stransfer(&cast_info.context, + result = stransfer(&cast_info.context, args, &shape_it[0], strides, (npy_bool *)wheremask_data, wheremask_strides_it[0], - cast_info.auxdata) < 0) { + cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_TWO_NEXT(idim, ndim, coord, shape_it, @@ -298,7 +309,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, /* Do the assignment with raw array iteration */ if (raw_array_assign_scalar(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), - src_dtype, src_data) < 0) { + src_dtype, src_data, casting) < 0) { goto fail; } } @@ -319,7 +330,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), src_dtype, src_data, PyArray_DESCR(wheremask), PyArray_DATA(wheremask), - wheremask_strides) < 0) { + wheremask_strides, casting) < 0) { goto fail; } } diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index 5554cad5e2dd..755735ede290 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -39,6 +39,7 @@ #include "convert_datatype.h" #include "common.h" #include "numpy/ufuncobject.h" +#include "dtype_transfer.h" /* @@ -184,12 +185,17 @@ validate_spec(PyArrayMethod_Spec *spec) "not exceed %d. (method: %s)", NPY_MAXARGS, spec->name); return -1; } - switch (spec->casting) { + switch ((int)spec->casting) { case NPY_NO_CASTING: case NPY_EQUIV_CASTING: case NPY_SAFE_CASTING: case NPY_SAME_KIND_CASTING: case NPY_UNSAFE_CASTING: + case NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: break; default: if (spec->casting != -1) { @@ -668,10 +674,11 @@ boundarraymethod__resolve_descripors( if (!parametric) { /* * Non-parametric can only mismatch if it switches from equiv to no - * (e.g. due to byteorder changes). + * (e.g. due to byteorder changes). Throw away same_value casting flag */ + int method_casting = self->method->casting & ~NPY_SAME_VALUE_CASTING_FLAG; if (cast != self->method->casting && - self->method->casting != NPY_EQUIV_CASTING) { + method_casting != NPY_EQUIV_CASTING) { PyErr_Format(PyExc_RuntimeError, "resolve_descriptors cast level changed even though " "the cast is non-parametric where the only possible " @@ -792,11 +799,10 @@ boundarraymethod__simple_strided_call( return NULL; } - PyArrayMethod_Context context = { - .caller = NULL, - .method = self->method, - .descriptors = descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.method = self->method; + PyArrayMethod_StridedLoop *strided_loop = NULL; NpyAuxData *loop_data = NULL; NPY_ARRAYMETHOD_FLAGS flags = 0; @@ -984,3 +990,4 @@ NPY_NO_EXPORT PyTypeObject PyBoundArrayMethod_Type = { .tp_methods = boundarraymethod_methods, .tp_getset = boundarraymethods_getters, }; + diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 8236ec5c65ae..4d1d9c238418 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -25,15 +25,6 @@ * variable is misnamed, but it's part of the public API so I'm not sure we * can just change it. Maybe someone should try and see if anyone notices. */ -/* - * In numpy 1.6 and earlier, this was NPY_UNSAFE_CASTING. In a future - * release, it will become NPY_SAME_KIND_CASTING. Right now, during the - * transitional period, we continue to follow the NPY_UNSAFE_CASTING rules (to - * avoid breaking people's code), but we also check for whether the cast would - * be allowed under the NPY_SAME_KIND_CASTING rules, and if not we issue a - * warning (that people's code will be broken in a future release.) - */ - NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING = NPY_SAME_KIND_CASTING; diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index d487aa16727d..164aa2e4c8b4 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -911,7 +911,7 @@ PyArray_CorrelatemodeConverter(PyObject *object, NPY_CORRELATEMODE *val) } } -static int casting_parser(char const *str, Py_ssize_t length, void *data) +static int casting_parser_full(char const *str, Py_ssize_t length, void *data, int can_use_same_value) { NPY_CASTING *casting = (NPY_CASTING *)data; if (length < 2) { @@ -941,6 +941,10 @@ static int casting_parser(char const *str, Py_ssize_t length, void *data) *casting = NPY_SAME_KIND_CASTING; return 0; } + if (can_use_same_value && length == 10 && strcmp(str, "same_value") == 0) { + *casting = NPY_SAME_VALUE_CASTING; + return 0; + } break; case 's': if (length == 6 && strcmp(str, "unsafe") == 0) { @@ -952,6 +956,11 @@ static int casting_parser(char const *str, Py_ssize_t length, void *data) return -1; } +static int casting_parser(char const *str, Py_ssize_t length, void *data) +{ + return casting_parser_full(str, length, data, 0); +} + /*NUMPY_API * Convert any Python object, *obj*, to an NPY_CASTING enum. */ @@ -961,10 +970,26 @@ PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting) return string_converter_helper( obj, (void *)casting, casting_parser, "casting", "must be one of 'no', 'equiv', 'safe', " - "'same_kind', or 'unsafe'"); + "'same_kind', 'unsafe'"); + return 0; +} + +static int casting_parser_same_value(char const *str, Py_ssize_t length, void *data) +{ + return casting_parser_full(str, length, data, 1); +} + +NPY_NO_EXPORT int +PyArray_CastingConverterSameValue(PyObject *obj, NPY_CASTING *casting) +{ + return string_converter_helper( + obj, (void *)casting, casting_parser_same_value, "casting", + "must be one of 'no', 'equiv', 'safe', " + "'same_kind', 'unsafe', 'same_value'"); return 0; } + /***************************** * Other conversion functions *****************************/ diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index 8e0177616955..983d9bc19ce6 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -429,7 +429,7 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) int retcode = raw_array_assign_scalar( PyArray_NDIM(arr), PyArray_DIMS(arr), descr, PyArray_BYTES(arr), PyArray_STRIDES(arr), - descr, (void *)value); + descr, (void *)value, NPY_UNSAFE_CASTING); if (PyDataType_REFCHK(descr)) { PyArray_ClearBuffer(descr, (void *)value, 0, 1, 1); diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index d34d852a706b..dbab8b4253d8 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -260,6 +260,10 @@ _get_castingimpl(PyObject *NPY_UNUSED(module), PyObject *args) * Supports the NPY_CAST_IS_VIEW check, and should be preferred to allow * extending cast-levels if necessary. * It is not valid for one of the arguments to be -1 to indicate an error. + * Pass through NPY_SAME_VALUE_CASTING_FLAG on casting1, unless both have the + * flag, in which case return max_casting | NPY_SAME_VALUE_CASTING_FLAG. + * Usually this will be exactly NPY_SAME_VALUE_CASTING, but the logic here + * should handle other 'casting with same_value' options * * @param casting1 First (left-hand) casting level to compare * @param casting2 Second (right-hand) casting level to compare @@ -271,11 +275,14 @@ PyArray_MinCastSafety(NPY_CASTING casting1, NPY_CASTING casting2) if (casting1 < 0 || casting2 < 0) { return -1; } + int both_same_casting = casting1 & casting2 & NPY_SAME_VALUE_CASTING_FLAG; + casting1 &= ~NPY_SAME_VALUE_CASTING_FLAG; + casting2 &= ~NPY_SAME_VALUE_CASTING_FLAG; /* larger casting values are less safe */ if (casting1 > casting2) { - return casting1; + return casting1 | both_same_casting; } - return casting2; + return casting2 | both_same_casting; } @@ -746,13 +753,13 @@ can_cast_pyscalar_scalar_to( } else if (PyDataType_ISFLOAT(to)) { if (flags & NPY_ARRAY_WAS_PYTHON_COMPLEX) { - return casting == NPY_UNSAFE_CASTING; + return ((casting == NPY_UNSAFE_CASTING) || ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)); } return 1; } else if (PyDataType_ISINTEGER(to)) { if (!(flags & NPY_ARRAY_WAS_PYTHON_INT)) { - return casting == NPY_UNSAFE_CASTING; + return ((casting == NPY_UNSAFE_CASTING) || ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)); } return 1; } @@ -828,7 +835,7 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to, NPY_NO_EXPORT const char * npy_casting_to_string(NPY_CASTING casting) { - switch (casting) { + switch ((int)casting) { case NPY_NO_CASTING: return "'no'"; case NPY_EQUIV_CASTING: @@ -839,6 +846,16 @@ npy_casting_to_string(NPY_CASTING casting) return "'same_kind'"; case NPY_UNSAFE_CASTING: return "'unsafe'"; + case NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'no and same_value'"; + case NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'equiv and same_value'"; + case NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'safe and same_value'"; + case NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'same_kind and same_value'"; + case NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'same_value'"; default: return ""; } @@ -2116,9 +2133,9 @@ legacy_same_dtype_resolve_descriptors( if (PyDataType_ISNOTSWAPPED(loop_descrs[0]) == PyDataType_ISNOTSWAPPED(loop_descrs[1])) { *view_offset = 0; - return NPY_NO_CASTING; + return NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG; } - return NPY_EQUIV_CASTING; + return NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG; } @@ -2305,6 +2322,7 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) if (dtypes[0]->singleton->kind == dtypes[1]->singleton->kind && from_itemsize == to_itemsize) { spec.casting = NPY_EQUIV_CASTING; + spec.casting |= NPY_SAME_VALUE_CASTING_FLAG; /* When there is no casting (equivalent C-types) use byteswap loops */ slots[0].slot = NPY_METH_resolve_descriptors; @@ -2319,13 +2337,17 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) } else if (_npy_can_cast_safely_table[from->type_num][to->type_num]) { spec.casting = NPY_SAFE_CASTING; - } - else if (dtype_kind_to_ordering(dtypes[0]->singleton->kind) <= - dtype_kind_to_ordering(dtypes[1]->singleton->kind)) { - spec.casting = NPY_SAME_KIND_CASTING; + spec.casting |= NPY_SAME_VALUE_CASTING_FLAG; } else { - spec.casting = NPY_UNSAFE_CASTING; + if (dtype_kind_to_ordering(dtypes[0]->singleton->kind) <= + dtype_kind_to_ordering(dtypes[1]->singleton->kind)) { + spec.casting = NPY_SAME_KIND_CASTING; + } + else { + spec.casting = NPY_UNSAFE_CASTING; + } + spec.casting |= NPY_SAME_VALUE_CASTING_FLAG; } /* Create a bound method, unbind and store it */ @@ -2463,10 +2485,10 @@ cast_to_string_resolve_descriptors( return -1; } - if (self->casting == NPY_UNSAFE_CASTING) { + if ((self->casting == NPY_UNSAFE_CASTING) || ((self->casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)){ assert(dtypes[0]->type_num == NPY_UNICODE && dtypes[1]->type_num == NPY_STRING); - return NPY_UNSAFE_CASTING; + return self->casting; } if (loop_descrs[1]->elsize >= size) { diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 5bcf1cdf7ba3..b7f0dcb521bb 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2830,7 +2830,6 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order) count = (src_count < dst_count) ? src_count : dst_count; if (cast_info.func(&cast_info.context, args, &count, strides, cast_info.auxdata) < 0) { - res = -1; break; } diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 0dac36a0903b..d7d56d086669 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -1233,6 +1233,10 @@ can_cast_datetime64_units(NPY_DATETIMEUNIT src_unit, NPY_DATETIMEUNIT dst_unit, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* TODO: support this */ + return 0; + } switch (casting) { /* Allow anything with unsafe casting */ case NPY_UNSAFE_CASTING: @@ -1278,6 +1282,10 @@ can_cast_timedelta64_units(NPY_DATETIMEUNIT src_unit, NPY_DATETIMEUNIT dst_unit, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* Use SAFE_CASTING, which implies SAME_VALUE */ + casting = NPY_SAFE_CASTING; + } switch (casting) { /* Allow anything with unsafe casting */ case NPY_UNSAFE_CASTING: @@ -1325,6 +1333,10 @@ can_cast_datetime64_metadata(PyArray_DatetimeMetaData *src_meta, PyArray_DatetimeMetaData *dst_meta, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* Force SAFE_CASTING */ + casting = NPY_SAFE_CASTING; + } switch (casting) { case NPY_UNSAFE_CASTING: return 1; @@ -1352,6 +1364,10 @@ can_cast_timedelta64_metadata(PyArray_DatetimeMetaData *src_meta, PyArray_DatetimeMetaData *dst_meta, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* Use SAFE_CASTING, which implies SAME_VALUE */ + casting = NPY_SAFE_CASTING; + } switch (casting) { case NPY_UNSAFE_CASTING: return 1; diff --git a/numpy/_core/src/multiarray/datetime_strings.c b/numpy/_core/src/multiarray/datetime_strings.c index f92eec3f5a59..97f24cfe821e 100644 --- a/numpy/_core/src/multiarray/datetime_strings.c +++ b/numpy/_core/src/multiarray/datetime_strings.c @@ -984,7 +984,7 @@ NpyDatetime_MakeISO8601Datetime( * the string representation, so ensure that the data * is being cast according to the casting rule. */ - if (casting != NPY_UNSAFE_CASTING) { + if ((casting != NPY_UNSAFE_CASTING) && ((casting & NPY_SAME_VALUE_CASTING_FLAG) == 0)) { /* Producing a date as a local time is always 'unsafe' */ if (base <= NPY_FR_D && local) { PyErr_SetString(PyExc_TypeError, "Cannot create a local " diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index 64d5bfa89e8e..dbad10842aff 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -2910,8 +2910,6 @@ _clear_cast_info_after_get_loop_failure(NPY_cast_info *cast_info) * TODO: Expand the view functionality for general offsets, not just 0: * Partial casts could be skipped also for `view_offset != 0`. * - * The `out_needs_api` flag must be initialized. - * * NOTE: In theory casting errors here could be slightly misleading in case * of a multi-step casting scenario. It should be possible to improve * this in the future. @@ -3428,11 +3426,13 @@ PyArray_CastRawArrays(npy_intp count, /* Cast */ char *args[2] = {src, dst}; npy_intp strides[2] = {src_stride, dst_stride}; - cast_info.func(&cast_info.context, args, &count, strides, cast_info.auxdata); + int result = cast_info.func(&cast_info.context, args, &count, strides, cast_info.auxdata); /* Cleanup */ NPY_cast_info_xfree(&cast_info); - + if (result < 0) { + return NPY_FAIL; + } if (flags & NPY_METH_REQUIRES_PYAPI && PyErr_Occurred()) { return NPY_FAIL; } @@ -3831,4 +3831,4 @@ PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp const *shape, *out_dataC = dataC; *out_ndim = ndim; return 0; -} \ No newline at end of file +} diff --git a/numpy/_core/src/multiarray/dtype_transfer.h b/numpy/_core/src/multiarray/dtype_transfer.h index 04df5cb64c22..a354820e5d45 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.h +++ b/numpy/_core/src/multiarray/dtype_transfer.h @@ -25,6 +25,15 @@ typedef struct { } NPY_cast_info; +static inline void +NPY_context_init(PyArrayMethod_Context *context, PyArray_Descr *descr[2]) +{ + context->descriptors = descr; + context->caller = NULL; + context->_reserved = NULL; + context->flags = 0; +} + /* * Create a new cast-info struct with cast_info->context.descriptors linked. * Compilers should inline this to ensure the whole struct is not actually @@ -40,13 +49,9 @@ NPY_cast_info_init(NPY_cast_info *cast_info) * a scratch space to `NPY_cast_info` and link to that instead. */ cast_info->auxdata = NULL; - cast_info->context.descriptors = cast_info->descriptors; - - // TODO: Delete this again probably maybe create a new minimal init macro - cast_info->context.caller = NULL; + NPY_context_init(&(cast_info->context), cast_info->descriptors); } - /* * Free's all references and data held inside the struct (not the struct). * First checks whether `cast_info.func == NULL`, and assume it is @@ -100,6 +105,7 @@ NPY_cast_info_copy(NPY_cast_info *cast_info, NPY_cast_info *original) Py_XINCREF(cast_info->descriptors[1]); cast_info->context.caller = original->context.caller; Py_XINCREF(cast_info->context.caller); + cast_info->context.flags = original->context.flags; cast_info->context.method = original->context.method; Py_XINCREF(cast_info->context.method); if (original->auxdata == NULL) { diff --git a/numpy/_core/src/multiarray/legacy_dtype_implementation.c b/numpy/_core/src/multiarray/legacy_dtype_implementation.c index abfc1bd0e3cd..eee7ce492fab 100644 --- a/numpy/_core/src/multiarray/legacy_dtype_implementation.c +++ b/numpy/_core/src/multiarray/legacy_dtype_implementation.c @@ -367,7 +367,7 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, * field; recurse just in case the single field is itself structured. */ if (!PyDataType_HASFIELDS(to) && !PyDataType_ISOBJECT(to)) { - if (casting == NPY_UNSAFE_CASTING && + if ((casting == NPY_UNSAFE_CASTING || ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)) && PyDict_Size(lfrom->fields) == 1) { Py_ssize_t ppos = 0; PyObject *tuple; @@ -399,7 +399,7 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, * casting; this is not correct, but needed since the treatment in can_cast * below got out of sync with astype; see gh-13667. */ - if (casting == NPY_UNSAFE_CASTING) { + if (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { return 1; } } @@ -408,14 +408,14 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, * If "from" is a simple data type and "to" has fields, then only * unsafe casting works (and that works always, even to multiple fields). */ - return casting == NPY_UNSAFE_CASTING; + return (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0); } /* * Everything else we consider castable for unsafe for now. * FIXME: ensure what we do here is consistent with "astype", * i.e., deal more correctly with subarrays and user-defined dtype. */ - else if (casting == NPY_UNSAFE_CASTING) { + else if (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { return 1; } /* diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 0a69a08e678e..13a126c53850 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -17,6 +17,7 @@ #include #include #include +#include #include "lowlevel_strided_loops.h" #include "array_assign.h" @@ -24,6 +25,7 @@ #include "usertypes.h" #include "umathmodule.h" +#include "gil_utils.h" /* * x86 platform works with unaligned access but the compiler is allowed to @@ -77,6 +79,57 @@ a = (x)[7]; (x)[7] = (x)[8]; (x)[8] = a; \ } +/* half-to-double, copied from CPP to allow inlining */ + +static NPY_GCC_OPT_3 +uint64_t ToDoubleBits(uint16_t h) +{ + uint16_t h_exp = (h&0x7c00u); + uint64_t d_sgn = ((uint64_t)h&0x8000u) << 48; + switch (h_exp) { + case 0x0000u: { // 0 or subnormal + uint16_t h_sig = (h&0x03ffu); + // Signed zero + if (h_sig == 0) { + return d_sgn; + } + // Subnormal + h_sig <<= 1; + while ((h_sig&0x0400u) == 0) { + h_sig <<= 1; + h_exp++; + } + uint64_t d_exp = ((uint64_t)(1023 - 15 - h_exp)) << 52; + uint64_t d_sig = ((uint64_t)(h_sig&0x03ffu)) << 42; + return d_sgn + d_exp + d_sig; + } + case 0x7c00u: // inf or NaN + // All-ones exponent and a copy of the significand + return d_sgn + 0x7ff0000000000000ULL + (((uint64_t)(h&0x03ffu)) << 42); + default: // normalized + // Just need to adjust the exponent and shift + return d_sgn + (((uint64_t)(h&0x7fffu) + 0xfc000u) << 42); + } +} + +NPY_FINLINE +npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ + /* + * Use npymath versions for all the special cases, only inline the + * x86_64 non-intrinsic case. Someday we will rewrite this in CPP and + * can then explore inlining more + */ + #if defined(NPY_HAVE_AVX512FP16) + return npy_halfbits_to_doublebits(h); + #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) + return npy_halfbits_to_doublebits(h); + #elif defined(__ARM_FP16_FORMAT_IEEE) + return npy_halfbits_to_doublebits(h); + #else + return (double)(ToDoubleBits(h)); + #endif +} + /************* STRIDED COPYING/SWAPPING SPECIALIZED FUNCTIONS *************/ /**begin repeat @@ -742,6 +795,7 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * * #is_float1 = 0*12, 1, 0, 0, 1, 0, 0# * #is_double1 = 0*13, 1, 0, 0, 1, 0# * #is_complex1 = 0*15, 1*3# + * #is_unsigned1 = 1*6, 0*12# */ /**begin repeat1 @@ -766,6 +820,16 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * * npy_byte, npy_short, npy_int, npy_long, npy_longlong, * _npy_half, npy_float, npy_double, npy_longdouble, * npy_float, npy_double, npy_longdouble# + * #type2max = 0, + * UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, ULLONG_MAX, + * SCHAR_MAX, SHRT_MAX, INT_MAX, LONG_MAX, LLONG_MAX, + * 65500, FLT_MAX, DBL_MAX, LDBL_MAX, + * FLT_MAX, DBL_MAX, LDBL_MAX# + * #type2min = 0, + * 0, 0, 0, 0, 0, + * SCHAR_MIN, SHRT_MIN, INT_MIN, LONG_MIN, LLONG_MIN, + * -65500, -FLT_MAX, -DBL_MAX, -LDBL_MAX, + * -FLT_MAX, -DBL_MAX, -LDBL_MAX# * #is_bool2 = 1, 0*17# * #is_emu_half2 = 0*11, EMULATED_FP16, 0*6# * #is_native_half2 = 0*11, NATIVE_FP16, 0*6# @@ -810,40 +874,52 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * /* Determine an appropriate casting conversion function */ #if @is_emu_half1@ - +# define _TO_RTYPE1(x) npy_half_to_float(x) # if @is_float2@ # define _CONVERT_FN(x) npy_halfbits_to_floatbits(x) +# define _ROUND_TRIP(x) npy_floatbits_to_halfbits(_CONVERT_FN(x)) # elif @is_double2@ -# define _CONVERT_FN(x) npy_halfbits_to_doublebits(x) +# define _CONVERT_FN(x) _npy_halfbits_to_doublebits(x) +# define _ROUND_TRIP(x) npy_doublebits_to_halfbits(_CONVERT_FN(x)) # elif @is_emu_half2@ # define _CONVERT_FN(x) (x) +# define _ROUND_TRIP(x) (x) # elif @is_bool2@ # define _CONVERT_FN(x) ((npy_bool)!npy_half_iszero(x)) +# define _ROUND_TRIP(x) npy_float_to_half((float)(!npy_half_iszero(x))) # else # define _CONVERT_FN(x) ((_TYPE2)npy_half_to_float(x)) +# define _ROUND_TRIP(x) npy_float_to_half((float)_CONVERT_FN(x)) # endif #elif @is_emu_half2@ +# define _TO_RTYPE1(x) (@rtype1@)(x) # if @is_float1@ # define _CONVERT_FN(x) npy_floatbits_to_halfbits(x) +# define _ROUND_TRIP(x) (@rtype1@)npy_halfbits_to_floatbits(_CONVERT_FN(x)) # elif @is_double1@ # define _CONVERT_FN(x) npy_doublebits_to_halfbits(x) +# define _ROUND_TRIP(x) (@rtype1@)_npy_halfbits_to_doublebits(_CONVERT_FN(x)) # elif @is_emu_half1@ # define _CONVERT_FN(x) (x) +# define _ROUND_TRIP(x) (x) # elif @is_bool1@ # define _CONVERT_FN(x) npy_float_to_half((float)(x!=0)) +# define _ROUND_TRIP(x) (x) # else # define _CONVERT_FN(x) npy_float_to_half((float)x) +# define _ROUND_TRIP(x) ((@rtype1@)npy_half_to_float(_CONVERT_FN(x))) # endif #else - # if @is_bool2@ || @is_bool1@ # define _CONVERT_FN(x) ((npy_bool)(x != 0)) # else # define _CONVERT_FN(x) ((_TYPE2)x) # endif +# define _TO_RTYPE1(x) (@rtype1@)(x) +# define _ROUND_TRIP(x) _TO_RTYPE1(_CONVERT_FN(x)) #endif @@ -869,8 +945,109 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * #define GCC_CAST_OPT_LEVEL NPY_GCC_OPT_3 #endif +#define _RETURN_SAME_VALUE_FAILURE \ + npy_gil_error(PyExc_ValueError, "could not cast 'same_value' @name1@ to @name2@"); \ + return -1 + +#if !@is_bool2@ +/* + * Check various modes of failure to accurately cast src_value to dst + */ +static GCC_CAST_OPT_LEVEL int +@prefix@_check_same_value_@name1@_to_@name2@(@rtype1@ src_value) { + + /* 1. NaN/Infs always work for float to float and otherwise never */ +#if (@is_float1@ || @is_emu_half1@ || @is_double1@ || @is_native_half1@) + if (!npy_isfinite(_TO_RTYPE1(src_value))) { +# if (@is_float2@ || @is_emu_half2@ || @is_double2@ || @is_native_half2@) + return 0; /* float to float can preserve NaN/Inf */ +# else + _RETURN_SAME_VALUE_FAILURE; /* cannot preserve NaN/Inf */ +# endif + } +#endif + /* + * 2. Check that the src does not overflow the dst. + * This is complicated by a warning that, for instance, int8 cannot + * overflow int64max + */ +# ifdef __GNUC__ +# pragma GCC diagnostic push +# ifdef __clang__ +# pragma GCC diagnostic ignored "-Wtautological-constant-out-of-range-compare" +# endif +# pragma GCC diagnostic ignored "-Wtautological-compare" +# endif +# if !@is_bool1@ + if (_TO_RTYPE1(src_value) > @type2max@) { + _RETURN_SAME_VALUE_FAILURE; + } +# if !@is_unsigned1@ + if (_TO_RTYPE1(src_value) < @type2min@) { + _RETURN_SAME_VALUE_FAILURE; + } +# endif +# endif /* !is_bool1 */ + /* 3. Check that the value can round trip exactly */ + if (src_value != _ROUND_TRIP(src_value)) { + _RETURN_SAME_VALUE_FAILURE; + } +# ifdef __GNUC__ +# pragma GCC diagnostic pop +# endif /* __GNUC__ */ + return 0; +} +#endif + +/* + * Use a declaration instead of moving the function definition to here to make reviewing + * easier. TODO: move the repeat3 up here instead of these declarations + */ + +static GCC_CAST_OPT_LEVEL int +@prefix@_cast_@name1@_to_@name2@_no_same_value( + PyArrayMethod_Context *context, char *const *args, + const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *NPY_UNUSED(data)); + +#if !@is_bool2@ +static GCC_CAST_OPT_LEVEL int +@prefix@_cast_@name1@_to_@name2@_same_value( + PyArrayMethod_Context *context, char *const *args, + const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *NPY_UNUSED(data)); +#endif + +/* + * This is the entry point function called outside this file + */ + static GCC_CAST_OPT_LEVEL int @prefix@_cast_@name1@_to_@name2@( + PyArrayMethod_Context *context, char *const *args, + const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *data) +{ +#if !@is_bool2@ + int same_value_casting = ((context->flags & NPY_SAME_VALUE_CONTEXT_FLAG) == NPY_SAME_VALUE_CONTEXT_FLAG); + if (same_value_casting) { + return @prefix@_cast_@name1@_to_@name2@_same_value(context, args, dimensions, strides, data); + } else { +#else + { +#endif + return @prefix@_cast_@name1@_to_@name2@_no_same_value(context, args, dimensions, strides, data); +}} + +/**begin repeat3 + * #func_name = no_same_value,same_value# + * #same_value = 0,1# + */ + + +#if !(@is_bool2@ && @same_value@) +static GCC_CAST_OPT_LEVEL int +@prefix@_cast_@name1@_to_@name2@_@func_name@( PyArrayMethod_Context *context, char *const *args, const npy_intp *dimensions, const npy_intp *strides, NpyAuxData *NPY_UNUSED(data)) @@ -898,7 +1075,7 @@ static GCC_CAST_OPT_LEVEL int assert(N == 0 || npy_is_aligned(dst, NPY_ALIGNOF(_TYPE2))); #endif - /*printf("@prefix@_cast_@name1@_to_@name2@\n");*/ + /* printf("@prefix@_cast_@name1@_to_@name2@_@func_name@, N=%ld\n", N); */ while (N--) { #if @aligned@ @@ -915,31 +1092,81 @@ static GCC_CAST_OPT_LEVEL int # if @is_complex2@ dst_value[0] = _CONVERT_FN(src_value[0]); dst_value[1] = _CONVERT_FN(src_value[1]); -# elif !@aligned@ +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[0]) < 0) { + return -1; + } + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[1]) < 0) { + return -1; + } +# endif //same_value +# elif !@aligned@ # if @is_bool2@ dst_value = _CONVERT_FN(src_value[0]) || _CONVERT_FN(src_value[1]); # else dst_value = _CONVERT_FN(src_value[0]); +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[0]) < 0) { + return -1; + } + if (src_value[1] != 0) { + npy_gil_error(PyExc_ValueError, "could not cast 'same_value' @name1@ to @name2@: imag is not 0"); + return -1; + } +# endif //same_value # endif # else # if @is_bool2@ *(_TYPE2 *)dst = _CONVERT_FN(src_value[0]) || _CONVERT_FN(src_value[1]); # else *(_TYPE2 *)dst = _CONVERT_FN(src_value[0]); +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[0]) < 0) { + return -1; + } + if (src_value[1] != 0) { + npy_gil_error(PyExc_ValueError, "could not cast 'same_value' @name1@ to @name2@: imag is not 0"); + return -1; + } +# endif //same_value # endif # endif -#else +#else // @is_complex1@ # if @is_complex2@ # if !@aligned@ dst_value[0] = _CONVERT_FN(src_value); -# else +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value) < 0) { + return -1; + } +# endif //same_value +# else //!aligned dst_value[0] = _CONVERT_FN(*(_TYPE1 *)src); +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)src) < 0) { + return -1; + } +# endif //same_value # endif dst_value[1] = 0; # elif !@aligned@ dst_value = _CONVERT_FN(src_value); +# if !@is_bool2@ +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value) < 0) { + return -1; + } +# endif //same_value +# endif // @is_bool2@ # else *(_TYPE2 *)dst = _CONVERT_FN(*(_TYPE1 *)src); +# if !@is_bool2@ +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*((@rtype1@ *)src)) < 0) { + return -1; + } +# endif //same_value +# endif // @is_bool2@ # endif #endif @@ -962,6 +1189,9 @@ static GCC_CAST_OPT_LEVEL int } return 0; } +#endif // !@is_bool2@ + +/**end repeat3**/ #if @is_native_half1@ || @is_float1@ || @is_double1@ #if @is_native_half2@ || @is_float2@ || @is_double2@ @@ -977,6 +1207,9 @@ static GCC_CAST_OPT_LEVEL int #undef _CONVERT_FN #undef _TYPE2 #undef _TYPE1 +#undef _TO_RTYPE1 +#undef _ROUND_TRIP +#undef _RETURN_SAME_VALUE_FAILURE #endif diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 28d6a5a26938..997f21928db3 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -2169,7 +2169,8 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) * Could add a casting check, but apparently most assignments do * not care about safe casting. */ - if (mapiter_set(mit, &cast_info, meth_flags, is_aligned) < 0) { + int result = mapiter_set(mit, &cast_info, meth_flags, is_aligned); + if (result < 0) { goto fail; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 2d754d7c6e91..c856fc52742d 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -752,6 +752,10 @@ array_toscalar(PyArrayObject *self, PyObject *args) return PyArray_MultiIndexGetItem(self, multi_index); } + +NPY_NO_EXPORT int +PyArray_CastingConverterSameValue(PyObject *obj, NPY_CASTING *casting); + static PyObject * array_astype(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) @@ -770,7 +774,7 @@ array_astype(PyArrayObject *self, if (npy_parse_arguments("astype", args, len_args, kwnames, "dtype", &PyArray_DTypeOrDescrConverterRequired, &dt_info, "|order", &PyArray_OrderConverter, &order, - "|casting", &PyArray_CastingConverter, &casting, + "|casting", &PyArray_CastingConverterSameValue, &casting, "|subok", &PyArray_PythonPyIntFromInt, &subok, "|copy", &PyArray_AsTypeCopyConverter, &forcecopy, NULL, NULL, NULL) < 0) { @@ -840,7 +844,12 @@ array_astype(PyArrayObject *self, ((PyArrayObject_fields *)ret)->nd = PyArray_NDIM(self); ((PyArrayObject_fields *)ret)->descr = dtype; } - int success = PyArray_CopyInto(ret, self); + int success; + if (((int)casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + success = PyArray_AssignArray(ret, self, NULL, casting); + } else { + success = PyArray_AssignArray(ret, self, NULL, NPY_UNSAFE_CASTING); + } Py_DECREF(dtype); ((PyArrayObject_fields *)ret)->nd = out_ndim; diff --git a/numpy/_core/src/umath/legacy_array_method.c b/numpy/_core/src/umath/legacy_array_method.c index 705262fedd38..7a85937fcc8f 100644 --- a/numpy/_core/src/umath/legacy_array_method.c +++ b/numpy/_core/src/umath/legacy_array_method.c @@ -439,11 +439,10 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, descrs[i] = bound_res->dtypes[i]->singleton; } - PyArrayMethod_Context context = { - (PyObject *)ufunc, - bound_res->method, - descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc; + context.method = bound_res->method; int ret = get_initial_from_ufunc(&context, 0, context.method->legacy_initial); diff --git a/numpy/_core/src/umath/reduction.c b/numpy/_core/src/umath/reduction.c index b376b94936bc..384ac052b226 100644 --- a/numpy/_core/src/umath/reduction.c +++ b/numpy/_core/src/umath/reduction.c @@ -372,7 +372,7 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, PyArray_NDIM(result), PyArray_DIMS(result), PyArray_DESCR(result), PyArray_BYTES(result), PyArray_STRIDES(result), - op_dtypes[0], initial_buf); + op_dtypes[0], initial_buf, NPY_UNSAFE_CASTING); if (ret < 0) { goto fail; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 6b8cc1789f94..75e651c29088 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -2085,11 +2085,10 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, NPY_SIZEOF_INTP * nop); /* Final preparation of the arraymethod call */ - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, operation_descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; PyArrayMethod_StridedLoop *strided_loop; NPY_ARRAYMETHOD_FLAGS flags = 0; @@ -2204,11 +2203,10 @@ PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc, } /* Final preparation of the arraymethod call */ - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, operation_descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; /* Do the ufunc loop */ if (wheremask != NULL) { @@ -2554,11 +2552,10 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, return NULL; } - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; PyArrayObject *result = PyUFunc_ReduceWrapper(&context, arr, out, wheremask, axis_flags, keepdims, @@ -2630,12 +2627,10 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, assert(PyArray_EquivTypes(descrs[0], descrs[1]) && PyArray_EquivTypes(descrs[0], descrs[2])); - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = descrs, - }; - + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc, + context.method = ufuncimpl, ndim = PyArray_NDIM(arr); #if NPY_UF_DBG_TRACING @@ -3062,12 +3057,10 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, goto fail; } - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = descrs, - }; - + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc, + context.method = ufuncimpl, ndim = PyArray_NDIM(arr); #if NPY_UF_DBG_TRACING @@ -5918,11 +5911,10 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) } } - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, operation_descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; /* Use contiguous strides; if there is such a loop it may be faster */ npy_intp strides[3] = { diff --git a/numpy/_core/tests/test_casting_unittests.py b/numpy/_core/tests/test_casting_unittests.py index 91ecc0dc75b0..5f643f8045ba 100644 --- a/numpy/_core/tests/test_casting_unittests.py +++ b/numpy/_core/tests/test_casting_unittests.py @@ -10,13 +10,14 @@ import enum import random import textwrap +import warnings import pytest import numpy as np from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl from numpy.lib.stride_tricks import as_strided -from numpy.testing import assert_array_equal +from numpy.testing import assert_array_equal, assert_equal # Simple skips object, parametric and long double (unsupported by struct) simple_dtypes = "?bhilqBHILQefdFD" @@ -76,8 +77,11 @@ class Casting(enum.IntEnum): safe = 2 same_kind = 3 unsafe = 4 + same_value = 64 +same_value_dtypes = tuple(type(np.dtype(c)) for c in "?bhilqBHILQefdgFDG") + def _get_cancast_table(): table = textwrap.dedent(""" X ? b h i l q B H I L Q e f d g F D G S U V O M m @@ -117,6 +121,9 @@ def _get_cancast_table(): cancast[from_dt] = {} for to_dt, c in zip(dtypes, row[2::2]): cancast[from_dt][to_dt] = convert_cast[c] + # Of the types checked, numeric cast support same-value + if from_dt in same_value_dtypes and to_dt in same_value_dtypes: + cancast[from_dt][to_dt] |= Casting.same_value return cancast @@ -272,9 +279,11 @@ def test_simple_cancast(self, from_Dt): if view_off is not None: # If a view is acceptable, this is "no" casting # and byte order must be matching. - assert casting == Casting.no - # The above table lists this as "equivalent" - assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt] + assert casting == Casting.no | Casting.same_value + # The above table lists this as "equivalent", perhaps + # with "same_value" + v = CAST_TABLE[from_Dt][to_Dt] & ~Casting.same_value + assert Casting.equiv == v # Note that to_res may not be the same as from_dt assert from_res.isnative == to_res.isnative else: @@ -304,6 +313,7 @@ def test_simple_direct_casts(self, from_dt): to_dt = to_dt.values[0] cast = get_castingimpl(type(from_dt), type(to_dt)) + # print("from_dt", from_dt, "to_dt", to_dt) casting, (from_res, to_res), view_off = cast._resolve_descriptors( (from_dt, to_dt)) @@ -317,7 +327,9 @@ def test_simple_direct_casts(self, from_dt): arr1, arr2, values = self.get_data(from_dt, to_dt) + # print("2", arr1, arr2, cast) cast._simple_strided_call((arr1, arr2)) + # print("3") # Check via python list assert arr2.tolist() == values @@ -815,3 +827,129 @@ def test_nonstandard_bool_to_other(self, dtype): res = nonstandard_bools.astype(dtype) expected = [0, 1, 1] assert_array_equal(res, expected) + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + def test_same_value_overflow(self, from_dtype, to_dtype): + if from_dtype == to_dtype: + return + top1 = 0 + top2 = 0 + try: + top1 = np.iinfo(from_dtype).max + except ValueError: + top1 = np.finfo(from_dtype).max + try: + top2 = np.iinfo(to_dtype).max + except ValueError: + top2 = np.finfo(to_dtype).max + # No need to test if top2 > top1, since the test will also do the + # reverse dtype matching. Catch then warning if the comparison warns, + # i.e. np.int16(65535) < np.float16(6.55e4) + with warnings.catch_warnings(record=True): + warnings.simplefilter("always", RuntimeWarning) + if top2 >= top1: + # will be tested when the dtypes are reversed + return + # Happy path + arr1 = np.array([0] * 10, dtype=from_dtype) + arr2 = np.array([0] * 10, dtype=to_dtype) + arr1_astype = arr1.astype(to_dtype, casting='same_value') + assert_equal(arr1_astype, arr2, strict=True) + # Make it overflow, both aligned and unaligned + arr1[0] = top1 + aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8') + unaligned = aligned[1:].view(arr1.dtype) + unaligned[:] = arr1 + with pytest.raises(ValueError): + # Casting float to float with overflow should raise + # RuntimeWarning (fperror) + # Casting float to int with overflow sometimes raises + # RuntimeWarning (fperror) + # Casting with overflow and 'same_value', should raise ValueError + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + arr1.astype(to_dtype, casting='same_value') + assert len(w) < 2 + with pytest.raises(ValueError): + # again, unaligned + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + unaligned.astype(to_dtype, casting='same_value') + assert len(w) < 2 + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_float_to_int(self, from_dtype, to_dtype): + # Should not raise, since the values can round trip + arr1 = np.arange(10, dtype=from_dtype) + aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8') + unaligned = aligned[1:].view(arr1.dtype) + unaligned[:] = arr1 + arr2 = np.arange(10, dtype=to_dtype) + assert_array_equal(arr1.astype(to_dtype, casting='same_value'), arr2) + assert_array_equal(unaligned.astype(to_dtype, casting='same_value'), arr2) + + # Should raise, since values cannot round trip. Might warn too about + # FPE errors + arr1_66 = arr1 + 0.666 + unaligned_66 = unaligned + 0.66 + with pytest.raises(ValueError): + arr1_66.astype(to_dtype, casting='same_value') + with pytest.raises(ValueError): + unaligned_66.astype(to_dtype, casting='same_value') + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_float_to_int_scalar(self, from_dtype, to_dtype): + # Should not raise, since the values can round trip + s1 = np.array(10, dtype=from_dtype) + assert s1.astype(to_dtype, casting='same_value') == 10 + + # Should raise, since values cannot round trip + s1_66 = s1 + 0.666 + with pytest.raises(ValueError): + s1_66.astype(to_dtype, casting='same_value') + + @pytest.mark.parametrize("value", [np.nan, np.inf, -np.inf]) + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_naninf(self, value): + # These work, but may trigger FPE warnings on macOS + np.array([value], dtype=np.half).astype(np.cdouble, casting='same_value') + np.array([value], dtype=np.half).astype(np.double, casting='same_value') + np.array([value], dtype=np.float32).astype(np.cdouble, casting='same_value') + np.array([value], dtype=np.float32).astype(np.double, casting='same_value') + np.array([value], dtype=np.float32).astype(np.half, casting='same_value') + np.array([value], dtype=np.complex64).astype(np.half, casting='same_value') + # These fail + with pytest.raises(ValueError): + np.array([value], dtype=np.half).astype(np.int64, casting='same_value') + with pytest.raises(ValueError): + np.array([value], dtype=np.complex64).astype(np.int64, casting='same_value') + with pytest.raises(ValueError): + np.array([value], dtype=np.float32).astype(np.int64, casting='same_value') + + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + def test_same_value_complex(self): + arr = np.array([complex(1, 1)], dtype=np.cdouble) + # This works + arr.astype(np.complex64, casting='same_value') + # Casting with a non-zero imag part fails + with pytest.raises(ValueError): + arr.astype(np.float32, casting='same_value') + + def test_same_value_scalar(self): + i = np.array(123, dtype=np.int64) + f = np.array(123, dtype=np.float64) + assert i.astype(np.float64, casting='same_value') == f + assert f.astype(np.int64, casting='same_value') == f diff --git a/numpy/_core/tests/test_conversion_utils.py b/numpy/_core/tests/test_conversion_utils.py index d63ca9e58df5..067c2973c592 100644 --- a/numpy/_core/tests/test_conversion_utils.py +++ b/numpy/_core/tests/test_conversion_utils.py @@ -172,9 +172,12 @@ def test_valid(self): self._check("no", "NPY_NO_CASTING") self._check("equiv", "NPY_EQUIV_CASTING") self._check("safe", "NPY_SAFE_CASTING") - self._check("same_kind", "NPY_SAME_KIND_CASTING") self._check("unsafe", "NPY_UNSAFE_CASTING") + self._check("same_kind", "NPY_SAME_KIND_CASTING") + def test_invalid(self): + # Currently, 'same_value' is supported only in ndarray.astype + self._check_value_error("same_value") class TestIntpConverter: """ Tests of PyArray_IntpConverter """ diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 21151ed11ba0..18b32ea42da2 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1870,6 +1870,10 @@ def test_datetime_as_string(self): '2032-07-18') assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'), '2032-07-18') + + with pytest.raises(ValueError): + np.datetime_as_string(a, unit='Y', casting='same_value') + assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12') assert_equal(np.datetime_as_string(a, unit='m'), '2032-07-18T12:23') diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 82789eae0679..6403d47034d7 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -79,6 +79,11 @@ def test_einsum_errors(self, do_opt, einsum_fn): b = np.ones((3, 4, 5)) einsum_fn('aabcb,abc', a, b) + with pytest.raises(ValueError): + a = np.arange(3) + # einsum_path does not yet accept kwarg 'casting' + np.einsum('ij->j', [a, a], casting='same_value') + def test_einsum_sorting_behavior(self): # Case 1: 26 dimensions (all lowercase indices) n1 = 26 diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 1b9728e5c006..2d606a2d33fd 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -383,6 +383,11 @@ def test_concatenate(self): assert_(out is rout) assert_equal(res, rout) + def test_concatenate_same_value(self): + r4 = list(range(4)) + with pytest.raises(ValueError, match="^casting must be one of"): + concatenate([r4, r4], casting="same_value") + @pytest.mark.skipif( IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython" From eb354d7f470e263da747044b05dd735093be0374 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 17 Sep 2025 23:57:39 +0200 Subject: [PATCH 0470/1018] TST: Test the ``ScalarType`` ordering assumptions --- numpy/_core/tests/test_numerictypes.py | 29 ++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index c6ecd5327850..5763a964c41d 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -616,6 +616,35 @@ def test_names_are_undersood_by_dtype(self, t): assert np.dtype(t.__name__).type is t +class TestScalarTypeOrder: + @pytest.mark.parametrize(('a', 'b'), [ + # signedinteger + (np.byte, np.short), + (np.short, np.intc), + (np.intc, np.long), + (np.long, np.longlong), + # unsignedinteger + (np.ubyte, np.ushort), + (np.ushort, np.uintc), + (np.uintc, np.ulong), + (np.ulong, np.ulonglong), + # floating + (np.half, np.single), + (np.single, np.double), + (np.double, np.longdouble), + # complexfloating + (np.csingle, np.cdouble), + (np.cdouble, np.clongdouble), + # flexible + (np.bytes_, np.str_), + (np.str_, np.void), + # bouncy castles + (np.datetime64, np.timedelta64), + ]) + def test_stable_ordering(self, a: type[np.generic], b: type[np.generic]): + assert np.ScalarType.index(a) <= np.ScalarType.index(b) + + class TestBoolDefinition: def test_bool_definition(self): assert nt.bool is np.bool From 0a8763c168335278e2a4728eb1703cd3ca7ef146 Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 18 Sep 2025 09:26:40 +0300 Subject: [PATCH 0471/1018] DOC: add another mention of 'same_value' [skip actions][skip azp][skip cirrus] --- doc/source/user/basics.types.rst | 34 +++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index a605d32fcd51..41c100e519db 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -35,18 +35,6 @@ See :ref:`arrays.dtypes.constructing` for more information about specifying and constructing data type objects, including how to specify parameters like the byte order. -To convert the type of an array, use the .astype() method. For example: :: - - >>> z.astype(np.float64) #doctest: +NORMALIZE_WHITESPACE - array([0., 1., 2.]) - -Note that, above, we could have used the *Python* float object as a dtype -instead of `numpy.float64`. NumPy knows that -:class:`int` refers to `numpy.int_`, :class:`bool` means -`numpy.bool`, that :class:`float` is `numpy.float64` and -:class:`complex` is `numpy.complex128`. The other data-types do not have -Python equivalents. - To determine the type of an array, look at the dtype attribute:: >>> z.dtype @@ -66,6 +54,28 @@ properties of the type, such as whether it is an integer:: >>> np.issubdtype(d, np.floating) False +To convert the type of an array, use the .astype() method. For example: :: + + >>> z.astype(np.float64) #doctest: +NORMALIZE_WHITESPACE + array([0., 1., 2.]) + +Note that, above, we could have used the *Python* float object as a dtype +instead of `numpy.float64`. NumPy knows that +:class:`int` refers to `numpy.int_`, :class:`bool` means +`numpy.bool`, that :class:`float` is `numpy.float64` and +:class:`complex` is `numpy.complex128`. The other data-types do not have +Python equivalents. + +Sometimes the conversion can overflow, for instance when converting a `numpy.int64` value +300 to `numpy.int8`. NumPy follows C casting rules, so that value would overflow and +become 44 ``(300 - 256)``. If you wish to avoid such overflows, you can specify that the +overflow action fail by using ``same_value`` for the ``casting`` argument (see also +:ref:`overflow-errors`): :: + + >>> z.astype(np.float64, casting="same_value") #doctest: +NORMALIZE_WHITESPACE + array([0., 1., 2.]) + + Numerical Data Types -------------------- From 22cd369bec46b557f9bf38cf8e3a8ff57ac71016 Mon Sep 17 00:00:00 2001 From: DWesl <22566757+DWesl@users.noreply.github.com> Date: Thu, 18 Sep 2025 04:04:04 -0400 Subject: [PATCH 0472/1018] BUG: Include python-including headers first (#29281) Makes sure that Python headers always come first (if implicitly) Also adds a script to check this and a CI run to ensure this. NOTE(seberg): If that script/CI run becomes problematic and not super easy to fix maybe we shouldn't worry too much about even deleting it again. It's wortwhile for sure, but I am not sure it is worthwhile to spend too many cycles making pretty. --- azure-pipelines.yml | 3 + numpy/_core/src/common/binop_override.h | 2 +- numpy/_core/src/common/blas_utils.c | 8 +- numpy/_core/src/common/blas_utils.h | 4 +- numpy/_core/src/multiarray/array_method.c | 2 +- numpy/_core/src/multiarray/common.h | 2 + .../src/multiarray/textreading/conversions.h | 4 +- numpy/_core/src/npysort/npysort_common.h | 2 +- numpy/_core/src/umath/clip.cpp | 2 +- numpy/random/src/mt19937/randomkit.c | 2 + tools/check_python_h_first.py | 254 ++++++++++++++++++ tools/get_submodule_paths.py | 31 +++ 12 files changed, 304 insertions(+), 12 deletions(-) create mode 100755 tools/check_python_h_first.py create mode 100644 tools/get_submodule_paths.py diff --git a/azure-pipelines.yml b/azure-pipelines.yml index af6e5cf52ac4..89dee2267352 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -56,6 +56,9 @@ stages: python tools/linter.py displayName: 'Run Lint Checks' failOnStderr: true + - script: | + python tools/check_python_h_first.py + displayName: 'Check Python.h is first file included' - job: Linux_Python_311_32bit_full_with_asserts pool: diff --git a/numpy/_core/src/common/binop_override.h b/numpy/_core/src/common/binop_override.h index a6b4747ca560..e17b147c1d0a 100644 --- a/numpy/_core/src/common/binop_override.h +++ b/numpy/_core/src/common/binop_override.h @@ -1,8 +1,8 @@ #ifndef NUMPY_CORE_SRC_COMMON_BINOP_OVERRIDE_H_ #define NUMPY_CORE_SRC_COMMON_BINOP_OVERRIDE_H_ -#include #include +#include #include "numpy/arrayobject.h" #include "get_attr_string.h" diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c index 155963891071..409d3818ae0f 100644 --- a/numpy/_core/src/common/blas_utils.c +++ b/numpy/_core/src/common/blas_utils.c @@ -1,3 +1,7 @@ +#include "numpy/npy_math.h" // npy_get_floatstatus_barrier +#include "numpy/numpyconfig.h" // NPY_VISIBILITY_HIDDEN +#include "blas_utils.h" + #include #include #include @@ -6,10 +10,6 @@ #include #endif -#include "numpy/numpyconfig.h" // NPY_VISIBILITY_HIDDEN -#include "numpy/npy_math.h" // npy_get_floatstatus_barrier -#include "blas_utils.h" - #if NPY_BLAS_CHECK_FPE_SUPPORT /* Return whether we're running on macOS 15.4 or later diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h index 8c1437f88899..34d6321c2920 100644 --- a/numpy/_core/src/common/blas_utils.h +++ b/numpy/_core/src/common/blas_utils.h @@ -1,7 +1,7 @@ -#include - #include "numpy/numpyconfig.h" // for NPY_VISIBILITY_HIDDEN +#include + /* NPY_BLAS_CHECK_FPE_SUPPORT controls whether we need a runtime check * for floating-point error (FPE) support in BLAS. */ diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index 755735ede290..a0a6c3fda7d6 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -30,8 +30,8 @@ #define _UMATHMODULE #define _MULTIARRAYMODULE -#include #include +#include #include "arrayobject.h" #include "array_coercion.h" #include "array_method.h" diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index a18f74bda71a..d6b9ad36588f 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -1,6 +1,8 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ #define NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ +#include + #include #include "numpy/npy_common.h" #include "numpy/ndarraytypes.h" diff --git a/numpy/_core/src/multiarray/textreading/conversions.h b/numpy/_core/src/multiarray/textreading/conversions.h index 09f2510413b5..e30b28a9a7af 100644 --- a/numpy/_core/src/multiarray/textreading/conversions.h +++ b/numpy/_core/src/multiarray/textreading/conversions.h @@ -1,12 +1,12 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_CONVERSIONS_H_ #define NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_CONVERSIONS_H_ -#include - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" +#include + #include "textreading/parser_config.h" NPY_NO_EXPORT int diff --git a/numpy/_core/src/npysort/npysort_common.h b/numpy/_core/src/npysort/npysort_common.h index 0680ae52afe3..8b7e0ef43f88 100644 --- a/numpy/_core/src/npysort/npysort_common.h +++ b/numpy/_core/src/npysort/npysort_common.h @@ -1,8 +1,8 @@ #ifndef __NPY_SORT_COMMON_H__ #define __NPY_SORT_COMMON_H__ -#include #include +#include #include #include "dtypemeta.h" diff --git a/numpy/_core/src/umath/clip.cpp b/numpy/_core/src/umath/clip.cpp index e051692c6d48..127b019ef8ae 100644 --- a/numpy/_core/src/umath/clip.cpp +++ b/numpy/_core/src/umath/clip.cpp @@ -1,7 +1,6 @@ /** * This module provides the inner loops for the clip ufunc */ -#include #define _UMATHMODULE #define _MULTIARRAYMODULE @@ -10,6 +9,7 @@ #define PY_SSIZE_T_CLEAN #include +#include #include "numpy/halffloat.h" #include "numpy/ndarraytypes.h" #include "numpy/npy_common.h" diff --git a/numpy/random/src/mt19937/randomkit.c b/numpy/random/src/mt19937/randomkit.c index 32f40fa49cc1..21d270234c9a 100644 --- a/numpy/random/src/mt19937/randomkit.c +++ b/numpy/random/src/mt19937/randomkit.c @@ -62,6 +62,8 @@ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include + /* static char const rcsid[] = "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */ diff --git a/tools/check_python_h_first.py b/tools/check_python_h_first.py new file mode 100755 index 000000000000..c0d44ad635f4 --- /dev/null +++ b/tools/check_python_h_first.py @@ -0,0 +1,254 @@ +#!/usr/bin/env python +"""Check that Python.h is included before any stdlib headers. + +May be a bit overzealous, but it should get the job done. +""" +import argparse +import fnmatch +import os.path +import re +import subprocess +import sys + +from get_submodule_paths import get_submodule_paths + +HEADER_PATTERN = re.compile( + r'^\s*#\s*include\s*[<"]((?:\w+/)*\w+(?:\.h[hp+]{0,2})?)[>"]\s*$' +) + +PYTHON_INCLUDING_HEADERS = [ + "Python.h", + # This isn't all of Python.h, but it is the visibility macros + "pyconfig.h", + "numpy/npy_common.h", + "numpy/npy_math.h", + "numpy/arrayobject.h", + "numpy/ndarrayobject.h", + "numpy/ndarraytypes.h", + "numpy/random/distributions.h", + "npy_sort.h", + "npy_config.h", + "common.h", + "npy_cpu_features.h", + # Boost::Python + "boost/python.hpp", +] +LEAF_HEADERS = [ + "numpy/numpyconfig.h", + "numpy/npy_os.h", + "numpy/npy_cpu.h", + "numpy/utils.h", +] + +C_CPP_EXTENSIONS = (".c", ".h", ".cpp", ".hpp", ".cc", ".hh", ".cxx", ".hxx") +# check against list in diff_files + +PARSER = argparse.ArgumentParser(description=__doc__) +PARSER.add_argument( + "files", + nargs="*", + help="Lint these files or directories; use **/*.c to lint all files\n" + "Expects relative paths", +) + + +def check_python_h_included_first(name_to_check: str) -> int: + """Check that the passed file includes Python.h first if it does at all. + + Perhaps overzealous, but that should work around concerns with + recursion. + + Parameters + ---------- + name_to_check : str + The name of the file to check. + + Returns + ------- + int + The number of headers before Python.h + """ + included_python = False + included_non_python_header = [] + warned_python_construct = False + basename_to_check = os.path.basename(name_to_check) + in_comment = False + includes_headers = False + with open(name_to_check) as in_file: + for i, line in enumerate(in_file, 1): + # Very basic comment parsing + # Assumes /*...*/ comments are on their own lines + if "/*" in line: + if "*/" not in line: + in_comment = True + # else-branch could use regex to remove comment and continue + continue + if in_comment: + if "*/" in line: + in_comment = False + continue + line = line.split("//", 1)[0].strip() + match = HEADER_PATTERN.match(line) + if match: + includes_headers = True + this_header = match.group(1) + if this_header in PYTHON_INCLUDING_HEADERS: + if included_non_python_header and not included_python: + # Headers before python-including header + print( + f"Header before Python.h in file {name_to_check:s}\n" + f"Python.h on line {i:d}, other header(s) on line(s)" + f" {included_non_python_header}", + file=sys.stderr, + ) + # else: # no headers before python-including header + included_python = True + PYTHON_INCLUDING_HEADERS.append(basename_to_check) + if os.path.dirname(name_to_check).endswith("include/numpy"): + PYTHON_INCLUDING_HEADERS.append(f"numpy/{basename_to_check:s}") + # We just found out where Python.h comes in this file + break + elif this_header in LEAF_HEADERS: + # This header is just defines, so it won't include + # the system headers that cause problems + continue + elif not included_python and ( + "numpy/" in this_header + and this_header not in LEAF_HEADERS + or "python" in this_header.lower() + ): + print( + f"Python.h not included before python-including header " + f"in file {name_to_check:s}\n" + f"{this_header:s} on line {i:d}", + file=sys.stderr, + ) + included_python = True + PYTHON_INCLUDING_HEADERS.append(basename_to_check) + elif not included_python and this_header not in LEAF_HEADERS: + included_non_python_header.append(i) + elif ( + not included_python + and not warned_python_construct + and ".h" not in basename_to_check + ) and ("py::" in line or "PYBIND11_" in line): + print( + "Python-including header not used before python constructs " + f"in file {name_to_check:s}\nConstruct on line {i:d}", + file=sys.stderr, + ) + warned_python_construct = True + if not includes_headers: + LEAF_HEADERS.append(basename_to_check) + return included_python and len(included_non_python_header) + + +def sort_order(path: str) -> tuple[int, str]: + if "include/numpy" in path: + # Want to process numpy/*.h first, to work out which of those + # include Python.h directly + priority = 0x00 + elif "h" in os.path.splitext(path)[1].lower(): + # Then other headers, which tend to include numpy/*.h + priority = 0x10 + else: + # Source files after headers, to give the best chance of + # properly checking whether they include Python.h + priority = 0x20 + if "common" in path: + priority -= 8 + path_basename = os.path.basename(path) + if path_basename.startswith("npy_"): + priority -= 4 + elif path_basename.startswith("npy"): + priority -= 3 + elif path_basename.startswith("np"): + priority -= 2 + if "config" in path_basename: + priority -= 1 + return priority, path + + +def process_files(file_list: list[str]) -> int: + n_out_of_order = 0 + submodule_paths = get_submodule_paths() + root_directory = os.path.dirname(os.path.dirname(__file__)) + for name_to_check in sorted(file_list, key=sort_order): + name_to_check = os.path.join(root_directory, name_to_check) + if any(submodule_path in name_to_check for submodule_path in submodule_paths): + continue + if ".dispatch." in name_to_check: + continue + try: + n_out_of_order += check_python_h_included_first(name_to_check) + except UnicodeDecodeError: + print(f"File {name_to_check:s} not utf-8", sys.stdout) + return n_out_of_order + + +def find_c_cpp_files(root: str) -> list[str]: + + result = [] + + for dirpath, dirnames, filenames in os.walk(root): + # I'm assuming other people have checked boost + for name in ("build", ".git", "boost"): + try: + dirnames.remove(name) + except ValueError: + pass + for name in fnmatch.filter(dirnames, "*.p"): + dirnames.remove(name) + result.extend( + [ + os.path.join(dirpath, name) + for name in filenames + if os.path.splitext(name)[1].lower() in C_CPP_EXTENSIONS + ] + ) + # Check the headers before the source files + result.sort(key=lambda path: "h" in os.path.splitext(path)[1], reverse=True) + return result + + +def diff_files(sha: str) -> list[str]: + """Find the diff since the given SHA. + + Adapted from lint.py + """ + res = subprocess.run( + [ + "git", + "diff", + "--name-only", + "--diff-filter=ACMR", + "-z", + sha, + "--", + # Check against C_CPP_EXTENSIONS + "*.[chCH]", + "*.[ch]pp", + "*.[ch]xx", + "*.cc", + "*.hh", + ], + stdout=subprocess.PIPE, + encoding="utf-8", + ) + res.check_returncode() + return [f for f in res.stdout.split("\0") if f] + + +if __name__ == "__main__": + args = PARSER.parse_args() + + if len(args.files) == 0: + files = find_c_cpp_files("numpy") + else: + files = args.files + if len(files) == 1 and os.path.isdir(files[0]): + files = find_c_cpp_files(files[0]) + + # See which of the headers include Python.h and add them to the list + n_out_of_order = process_files(files) + sys.exit(n_out_of_order) diff --git a/tools/get_submodule_paths.py b/tools/get_submodule_paths.py new file mode 100644 index 000000000000..abab86140712 --- /dev/null +++ b/tools/get_submodule_paths.py @@ -0,0 +1,31 @@ +import glob +import os.path + + +def get_submodule_paths(): + ''' + Get paths to submodules so that we can exclude them from things like + check_test_name.py, check_unicode.py, etc. + ''' + root_directory = os.path.dirname(os.path.dirname(__file__)) + gitmodule_file = os.path.join(root_directory, '.gitmodules') + with open(gitmodule_file) as gitmodules: + data = gitmodules.read().split('\n') + submodule_paths = [datum.split(' = ')[1] for datum in data if + datum.startswith('\tpath = ')] + submodule_paths = [os.path.join(root_directory, path) for path in + submodule_paths] + # vendored with a script rather than via gitmodules + with open( + os.path.join(root_directory, ".gitattributes"), "r" + ) as attr_file: + for line in attr_file: + if "vendored" in line: + pattern = line.split(" ", 1)[0] + submodule_paths.extend(glob.glob(pattern)) + + return submodule_paths + + +if __name__ == "__main__": + print('\n'.join(get_submodule_paths())) From d6b325ca8252db865bd631624b8d0097aa7b4870 Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 18 Sep 2025 18:46:09 +0300 Subject: [PATCH 0473/1018] DOC: tweak colons (from review) [skip actions][skip azp][skip cirrus] --- doc/source/user/basics.types.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index 41c100e519db..8145f6adb93b 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -54,7 +54,7 @@ properties of the type, such as whether it is an integer:: >>> np.issubdtype(d, np.floating) False -To convert the type of an array, use the .astype() method. For example: :: +To convert the type of an array, use the .astype() method. For example:: >>> z.astype(np.float64) #doctest: +NORMALIZE_WHITESPACE array([0., 1., 2.]) @@ -70,7 +70,7 @@ Sometimes the conversion can overflow, for instance when converting a `numpy.int 300 to `numpy.int8`. NumPy follows C casting rules, so that value would overflow and become 44 ``(300 - 256)``. If you wish to avoid such overflows, you can specify that the overflow action fail by using ``same_value`` for the ``casting`` argument (see also -:ref:`overflow-errors`): :: +:ref:`overflow-errors`):: >>> z.astype(np.float64, casting="same_value") #doctest: +NORMALIZE_WHITESPACE array([0., 1., 2.]) From 8e392ebe2eda9f9cd904b2baca7a29fd115716e4 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Fri, 19 Sep 2025 03:42:52 +0530 Subject: [PATCH 0474/1018] DOC: Update Meson build examples in usage.rst [skip actions] [skip azp] [skip cirrus] --- doc/source/f2py/index.rst | 67 ++------------------------------------- doc/source/f2py/usage.rst | 66 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 64 deletions(-) diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst index fb1c573d1269..31840e812379 100644 --- a/doc/source/f2py/index.rst +++ b/doc/source/f2py/index.rst @@ -45,73 +45,12 @@ end matches the NumPy version printed from ``python -m numpy.f2py``, then you can use the shorter version. If not, or if you cannot run ``f2py``, you should replace all calls to ``f2py`` mentioned in this guide with the longer version. -======================= -Using f2py with Meson -======================= - -Meson is a modern build system recommended for building Python extension modules, -especially starting with Python 3.12 and NumPy 2.x. Meson provides a robust and -maintainable way to build Fortran extensions with f2py. - -To build a Fortran extension using f2py and Meson, you can use Meson's `custom_target` -to invoke f2py and generate the extension module. The following minimal example -demonstrates how to do this: - -.. code-block:: meson - - # List your Fortran source files - fortran_sources = files('your_module.f90') - - # Find the Python installation - py = import('python').find_installation() - - # Create a custom target to build the extension with f2py - f2py_wrapper = custom_target( - 'your_module_wrapper', - output: 'your_module.so', - input: fortran_sources, - command: [ - py.full_path(), '-m', 'numpy.f2py', - '-c', '@INPUT@', '-m', 'your_module' - ] - ) - - # Install the built extension to the Python site-packages directory - install_data(f2py_wrapper, install_dir: py.site_packages_dir()) - -For more details and advanced usage, see the Meson build guide in -the user documentation or refer to SciPy's Meson build files for -real-world examples: https://github.com/scipy/scipy/tree/main/meson.build - -========================================== -Building NumPy ufunc Extensions with Meson -========================================== - -To build a NumPy ufunc extension (C API) using Meson, you can use the -following template: - -.. code-block:: meson - - # List your C source files - c_sources = files('your_ufunc_module.c') - - # Find the Python installation - py = import('python').find_installation() - - # Create an extension module - extension_module = py.extension_module( - 'your_ufunc_module', - c_sources, - dependencies: py.dependency(), - install: true - ) - -For more information on writing NumPy ufunc extensions, see the -official NumPy documentation: https://numpy.org/doc/stable/reference/c-api.ufunc.html +For Meson build examples, see :doc:`usage`. .. toctree:: :maxdepth: 3 - + + usage f2py-user f2py-reference windows/index diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index 635455fdb58a..c0fb6c0428df 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -327,3 +327,69 @@ resulting package to work, you need to create a file named ``__init__.py`` (in the same directory as ``add.pyf``). Notice the extension module is defined entirely in terms of the ``add.pyf`` and ``add.f`` files. The conversion of the .pyf file to a .c file is handled by `numpy.distutils`. + +=============================== +Building with Meson (Examples) +=============================== + +Using f2py with Meson +--------------------- + +Meson is a modern build system recommended for building Python extension +modules, especially starting with Python 3.12 and NumPy 2.x. Meson provides +a robust and maintainable way to build Fortran extensions with f2py. + +To build a Fortran extension using f2py and Meson, you can use Meson's +`custom_target` to invoke f2py and generate the extension module. The +following minimal example demonstrates how to do this: + +.. code-block:: meson + + # List your Fortran source files + fortran_sources = files('your_module.f90') + + # Find the Python installation + py = import('python').find_installation() + + # Create a custom target to build the extension with f2py + f2py_wrapper = custom_target( + 'your_module_wrapper', + output: 'your_module.so', + input: fortran_sources, + command: [ + py.full_path(), '-m', 'numpy.f2py', + '-c', '@INPUT@', '-m', 'your_module' + ] + ) + + # Install the built extension to the Python site-packages directory + install_data(f2py_wrapper, install_dir: py.site_packages_dir()) + +For more details and advanced usage, see the Meson build guide in the +user documentation or refer to SciPy's Meson build files for real-world +examples: https://github.com/scipy/scipy/tree/main/meson.build + +Building NumPy ufunc Extensions with Meson +------------------------------------------ + +To build a NumPy ufunc extension (C API) using Meson, you can use the +following template: + +.. code-block:: meson + + # List your C source files + c_sources = files('your_ufunc_module.c') + + # Find the Python installation + py = import('python').find_installation() + + # Create an extension module + extension_module = py.extension_module( + 'your_ufunc_module', + c_sources, + dependencies: py.dependency(), + install: true + ) + +For more information on writing NumPy ufunc extensions, see the official +NumPy documentation: https://numpy.org/doc/stable/reference/c-api.ufunc.html From 0aa8087067270a8ff94a21470c45022cfc942b35 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 20 Sep 2025 05:24:54 -0700 Subject: [PATCH 0475/1018] DOC: Correct a typo in Troubleshooting guidelines [skip actions][skip azp][skip cirrus] (#29773) --- doc/source/user/troubleshooting-importerror.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index da456dd17e36..68ac4f939525 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -158,7 +158,7 @@ line that isn't inside NumPy to see which package has the incompatibility. Note your NumPy version and the version of the incompatible package to help you find the best solution. -There can be various reason for the incompatibility: +There can be various reasons for the incompatibility: * You have recently upgraded NumPy, most likely to NumPy 2, and the other module now also needs to be upgraded. (NumPy 2 was released in June 2024.) From a9bbd2352fc70b097954de6b898fe736b2381cbe Mon Sep 17 00:00:00 2001 From: cibarbia05 Date: Sat, 20 Sep 2025 13:35:43 -0400 Subject: [PATCH 0476/1018] BUG: np.setbufsize should raise ValueError for negative input np.setbufsize previously accepted negative values silently. This commit adds input validation so that a ValueError is raised when a negative buffer size is provided, and adds a regression test to cover this behavior. Closes #29651 --- numpy/_core/_ufunc_config.py | 3 +++ numpy/_core/tests/test_umath.py | 12 ++++++++++++ 2 files changed, 15 insertions(+) diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index 24abecd20652..0b1c7ba3743a 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -187,6 +187,9 @@ def setbufsize(size): 8192 """ + if size < 0: + raise ValueError("buffer size must be non-negative") + old = _get_extobj_dict()["bufsize"] extobj = _make_extobj(bufsize=size) _extobj_contextvar.set(extobj) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index d8ed56c31b93..9ae0cffd5cc8 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4708,6 +4708,18 @@ def test_reduceat(): np.setbufsize(ncu.UFUNC_BUFSIZE_DEFAULT) assert_array_almost_equal(h1, h2) +def test_negative_value_raises(): + with pytest.raises(ValueError, match="buffer size must be non-negative"): + np.setbufsize(-5) + + old = np.getbufsize() + try: + prev = np.setbufsize(4096) + assert prev == old + assert np.getbufsize() == 4096 + finally: + np.setbufsize(old) + def test_reduceat_empty(): """Reduceat should work with empty arrays""" indices = np.array([], 'i4') From 0934462b484de5b6d236e685327ceaccebbbff51 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 20 Sep 2025 20:59:40 -0700 Subject: [PATCH 0477/1018] DOC: Link [CT] to cross-reference [skip actions][skip azp][skip cirrus] --- numpy/fft/_pocketfft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index c7f2f6a8bc3a..1ce7c76b8636 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -124,7 +124,7 @@ def fft(a, n=None, axis=-1, norm=None, out=None): This function computes the one-dimensional *n*-point discrete Fourier Transform (DFT) with the efficient Fast Fourier Transform (FFT) - algorithm [CT]. + algorithm [CT]_. Parameters ---------- From bcc9355a86994362c44550ea431e5c5588fa5b8d Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 20 Sep 2025 21:03:01 -0700 Subject: [PATCH 0478/1018] DOC: Add two more cross-reference links [skip actions][skip azp][skip cirrus] --- numpy/linalg/_linalg.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 7e2f1ebf531b..8dde643781b8 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -2073,9 +2073,9 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): The default threshold to detect rank deficiency is a test on the magnitude of the singular values of `A`. By default, we identify singular values less than ``S.max() * max(M, N) * eps`` as indicating rank deficiency - (with the symbols defined above). This is the algorithm MATLAB uses [1]. + (with the symbols defined above). This is the algorithm MATLAB uses [1]_. It also appears in *Numerical recipes* in the discussion of SVD solutions - for linear least squares [2]. + for linear least squares [2]_. This default threshold is designed to detect rank deficiency accounting for the numerical errors of the SVD computation. Imagine that there From 0a54c50f06a940de0d73eb544eea26f5c819f1cc Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 20 Sep 2025 21:07:38 -0700 Subject: [PATCH 0479/1018] DOC: Three dots instead of two dots [skip actions][skip azp][skip cirrus] --- numpy/lib/_shape_base_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index 8b200cd8daa4..d1e55a48d711 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -1110,7 +1110,7 @@ def kron(a, b): ----- The function assumes that the number of dimensions of `a` and `b` are the same, if necessary prepending the smallest with ones. - If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``, + If ``a.shape = (r0,r1,...,rN)`` and ``b.shape = (s0,s1,...,sN)``, the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``. The elements are products of elements from `a` and `b`, organized explicitly by:: From 366855733f44369f89778d113f350944b5760577 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 21 Sep 2025 11:43:55 +0200 Subject: [PATCH 0480/1018] TYP: fix and improve ``{f,i}info`` stubs in ``_core.getlimits`` --- numpy/__init__.pyi | 60 +--------- numpy/_core/getlimits.pyi | 113 ++++++++++++++++++- numpy/typing/tests/data/reveal/getlimits.pyi | 16 +-- 3 files changed, 127 insertions(+), 62 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index caa5b6f5a724..d452bff03fc3 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -121,10 +121,7 @@ from numpy._typing import ( # type: ignore[deprecated] _FloatingCodes, _ComplexFloatingCodes, _InexactCodes, - _NumberCodes, _CharacterCodes, - _FlexibleCodes, - _GenericCodes, # Ufuncs _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1, @@ -314,6 +311,10 @@ from numpy._core.einsumfunc import ( einsum, einsum_path, ) +from numpy._core.getlimits import ( + finfo, + iinfo, +) from numpy._core.multiarray import ( array, @@ -779,9 +780,7 @@ _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) _InexactT = TypeVar("_InexactT", bound=inexact) _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) -_FloatingT_co = TypeVar("_FloatingT_co", bound=floating, default=floating, covariant=True) _IntegerT = TypeVar("_IntegerT", bound=integer) -_IntegerT_co = TypeVar("_IntegerT_co", bound=integer, default=integer, covariant=True) _NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64) _NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] @@ -944,7 +943,7 @@ _CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "same_value", "u _OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None _OrderACF: TypeAlias = L["A", "C", "F"] | None -_OrderCF: TypeAlias = L["C", "F"] | None +_OrderCF: TypeAlias = L["C", "F"] | None # noqa: PYI047 _ModeKind: TypeAlias = L["raise", "wrap", "clip"] _PartitionKind: TypeAlias = L["introselect"] @@ -3577,7 +3576,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # See https://github.com/numpy/numpy-stubs/pull/80 for more details. class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod - def __new__(self) -> None: ... + def __new__(cls) -> None: ... def __hash__(self) -> int: ... @overload def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... @@ -5793,53 +5792,6 @@ class busdaycalendar: @property def holidays(self) -> NDArray[datetime64]: ... -class finfo(Generic[_FloatingT_co]): - dtype: Final[dtype[_FloatingT_co]] - bits: Final[int] - eps: Final[_FloatingT_co] - epsneg: Final[_FloatingT_co] - iexp: Final[int] - machep: Final[int] - max: Final[_FloatingT_co] - maxexp: Final[int] - min: Final[_FloatingT_co] - minexp: Final[int] - negep: Final[int] - nexp: Final[int] - nmant: Final[int] - precision: Final[int] - resolution: Final[_FloatingT_co] - smallest_subnormal: Final[_FloatingT_co] - @property - def smallest_normal(self) -> _FloatingT_co: ... - @property - def tiny(self) -> _FloatingT_co: ... - @overload - def __new__(cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]]) -> finfo[floating[_NBit1]]: ... - @overload - def __new__(cls, dtype: complex | type[complex]) -> finfo[float64]: ... - @overload - def __new__(cls, dtype: str) -> finfo[floating]: ... - -class iinfo(Generic[_IntegerT_co]): - dtype: Final[dtype[_IntegerT_co]] - kind: Final[LiteralString] - bits: Final[int] - key: Final[LiteralString] - @property - def min(self) -> int: ... - @property - def max(self) -> int: ... - - @overload - def __new__( - cls, dtype: _IntegerT_co | _DTypeLike[_IntegerT_co] - ) -> iinfo[_IntegerT_co]: ... - @overload - def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ... - @overload - def __new__(cls, dtype: str) -> iinfo[Any]: ... - @final class nditer: def __new__( diff --git a/numpy/_core/getlimits.pyi b/numpy/_core/getlimits.pyi index 9d79b178f4dc..eb832f55bb7e 100644 --- a/numpy/_core/getlimits.pyi +++ b/numpy/_core/getlimits.pyi @@ -1,3 +1,114 @@ -from numpy import finfo, iinfo +from types import GenericAlias +from typing import Final, Generic, Self, overload +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import ( + _CLongDoubleCodes, + _Complex64Codes, + _Complex128Codes, + _DTypeLike, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntPCodes, + _LongDoubleCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, +) __all__ = ["finfo", "iinfo"] + +### + +_IntegerT_co = TypeVar("_IntegerT_co", bound=np.integer, default=np.integer, covariant=True) +_FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=np.floating, covariant=True) + +### + +class iinfo(Generic[_IntegerT_co]): + dtype: np.dtype[_IntegerT_co] + bits: Final[int] + kind: Final[str] + key: Final[str] + + @property + def min(self, /) -> int: ... + @property + def max(self, /) -> int: ... + + # + @overload + def __init__(self, /, int_type: _IntegerT_co | _DTypeLike[_IntegerT_co]) -> None: ... + @overload + def __init__(self: iinfo[np.int_], /, int_type: _IntPCodes | type[int] | int) -> None: ... + @overload + def __init__(self: iinfo[np.int8], /, int_type: _Int8Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint8], /, int_type: _UInt8Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int16], /, int_type: _Int16Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint16], /, int_type: _UInt16Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int32], /, int_type: _Int32Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint32], /, int_type: _UInt32Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int64], /, int_type: _Int64Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint64], /, int_type: _UInt64Codes) -> None: ... + @overload + def __init__(self, /, int_type: str) -> None: ... + + # + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... + +class finfo(Generic[_FloatingT_co]): + dtype: np.dtype[_FloatingT_co] + eps: _FloatingT_co + epsneg: _FloatingT_co + resolution: _FloatingT_co + smallest_subnormal: _FloatingT_co + max: _FloatingT_co + min: _FloatingT_co + + bits: Final[int] + iexp: Final[int] + machep: Final[int] + maxexp: Final[int] + minexp: Final[int] + negep: Final[int] + nexp: Final[int] + nmant: Final[int] + precision: Final[int] + + @property + def smallest_normal(self, /) -> _FloatingT_co: ... + @property + def tiny(self, /) -> _FloatingT_co: ... + + # + @overload + def __new__(cls, dtype: _FloatingT_co | _DTypeLike[_FloatingT_co]) -> Self: ... + @overload + def __new__(cls, dtype: _Float16Codes) -> finfo[np.float16]: ... + @overload + def __new__(cls, dtype: _Float32Codes | _Complex64Codes | _DTypeLike[np.complex64]) -> finfo[np.float32]: ... + @overload + def __new__(cls, dtype: _Float64Codes | _Complex128Codes | type[complex] | complex) -> finfo[np.float64]: ... + @overload + def __new__(cls, dtype: _LongDoubleCodes | _CLongDoubleCodes | _DTypeLike[np.clongdouble]) -> finfo[np.longdouble]: ... + @overload + def __new__(cls, dtype: str) -> finfo: ... + + # + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... diff --git a/numpy/typing/tests/data/reveal/getlimits.pyi b/numpy/typing/tests/data/reveal/getlimits.pyi index 3a1157121750..cc964d753055 100644 --- a/numpy/typing/tests/data/reveal/getlimits.pyi +++ b/numpy/typing/tests/data/reveal/getlimits.pyi @@ -1,11 +1,11 @@ -from typing import Any, LiteralString, assert_type +from typing import assert_type import numpy as np -from numpy._typing import _64Bit f: float f8: np.float64 c8: np.complex64 +c16: np.complex128 i: int i8: np.int64 @@ -15,9 +15,10 @@ finfo_f8: np.finfo[np.float64] iinfo_i8: np.iinfo[np.int64] assert_type(np.finfo(f), np.finfo[np.float64]) -assert_type(np.finfo(f8), np.finfo[np.floating[_64Bit]]) +assert_type(np.finfo(f8), np.finfo[np.float64]) assert_type(np.finfo(c8), np.finfo[np.float32]) -assert_type(np.finfo("f2"), np.finfo[np.floating]) +assert_type(np.finfo(c16), np.finfo[np.float64]) +assert_type(np.finfo("f2"), np.finfo[np.float16]) assert_type(finfo_f8.dtype, np.dtype[np.float64]) assert_type(finfo_f8.bits, int) @@ -41,11 +42,12 @@ assert_type(finfo_f8.smallest_subnormal, np.float64) assert_type(np.iinfo(i), np.iinfo[np.int_]) assert_type(np.iinfo(i8), np.iinfo[np.int64]) assert_type(np.iinfo(u4), np.iinfo[np.uint32]) -assert_type(np.iinfo("i2"), np.iinfo[Any]) +assert_type(np.iinfo("i2"), np.iinfo[np.int16]) +assert_type(np.iinfo("u2"), np.iinfo[np.uint16]) assert_type(iinfo_i8.dtype, np.dtype[np.int64]) -assert_type(iinfo_i8.kind, LiteralString) +assert_type(iinfo_i8.kind, str) assert_type(iinfo_i8.bits, int) -assert_type(iinfo_i8.key, LiteralString) +assert_type(iinfo_i8.key, str) assert_type(iinfo_i8.min, int) assert_type(iinfo_i8.max, int) From ba3dbe46604c0cf50310d1bdbfc3719fb00243ea Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sun, 21 Sep 2025 12:54:12 -0400 Subject: [PATCH 0481/1018] BLD: Upgrade spin requirement to version 0.15 --- environment.yml | 2 +- requirements/build_requirements.txt | 2 +- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/environment.yml b/environment.yml index 61eed13bfd67..fc171a285a9d 100644 --- a/environment.yml +++ b/environment.yml @@ -16,7 +16,7 @@ dependencies: - ninja - pkg-config - meson-python - - spin==0.13 + - spin==0.15 - ccache # For testing - pytest diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index 8bd2521933d7..1f6eb1435cfc 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 Cython>=3.0.6 ninja -spin==0.14 +spin==0.15 build diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 10ac5f7ecc9f..4cb678d5d047 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ -spin==0.14 +spin==0.15 # Keep this in sync with ci_requirements.txt scipy-openblas32==0.3.30.0.1 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index bd2f75ab8c1d..aab147bf8c17 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ -spin==0.14 +spin==0.15 # Keep this in sync with ci32_requirements.txt scipy-openblas32==0.3.30.0.1 scipy-openblas64==0.3.30.0.1 From 30a464b11da232e20d17d4b0dbdf8dc35644e71b Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sun, 21 Sep 2025 13:45:39 -0400 Subject: [PATCH 0482/1018] CI: Upgrade spin before building numpy in the clang_TSAN job. --- .github/workflows/compiler_sanitizers.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index e91468b85068..2721d77eb4a2 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -90,6 +90,9 @@ jobs: - name: Uninstall pytest-xdist (conflicts with TSAN) run: pip uninstall -y pytest-xdist + - name: Upgrade spin (gh-29777) + run: pip install -U spin + - name: Build NumPy with ThreadSanitizer run: python -m spin build -- -Db_sanitize=thread From b5023dbfe796f6b3a08491c7e154e148014d2e3a Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Mon, 22 Sep 2025 10:17:30 +0300 Subject: [PATCH 0483/1018] remove whitespace --- numpy/_core/_ufunc_config.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index 0b1c7ba3743a..b16147c18ee6 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -189,7 +189,6 @@ def setbufsize(size): """ if size < 0: raise ValueError("buffer size must be non-negative") - old = _get_extobj_dict()["bufsize"] extobj = _make_extobj(bufsize=size) _extobj_contextvar.set(extobj) From 19f2a720d867a944bc501e93fdeb6bcfb4be510d Mon Sep 17 00:00:00 2001 From: Sergey Fedorov Date: Mon, 22 Sep 2025 16:17:45 +0800 Subject: [PATCH 0484/1018] BUG: Fix pocketfft umath strides for AIX compatibility (#29768) * _pocketfft_umath.cpp: fix types Fixes: https://github.com/numpy/numpy/issues/29758 Credits to @seberg * _pocketfft_umath.cpp: fix unused variable warning * Move npts_in calculation into #ifndef --------- Co-authored-by: Sebastian Berg --- numpy/fft/_pocketfft_umath.cpp | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index ab8af5aa522e..f616fe9b0bdc 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -32,7 +32,7 @@ template static void wrap_legacy_cpp_ufunc(char **args, npy_intp const *dimensions, - ptrdiff_t const *steps, void *func) + npy_intp const *steps, void *func) { NPY_ALLOW_C_API_DEF try { @@ -86,14 +86,14 @@ copy_output(T buff[], char *out, npy_intp step_out, size_t n) */ template static void -fft_loop(char **args, npy_intp const *dimensions, ptrdiff_t const *steps, +fft_loop(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func) { char *ip = args[0], *fp = args[1], *op = args[2]; size_t n_outer = (size_t)dimensions[0]; - ptrdiff_t si = steps[0], sf = steps[1], so = steps[2]; + npy_intp si = steps[0], sf = steps[1], so = steps[2]; size_t nin = (size_t)dimensions[1], nout = (size_t)dimensions[2]; - ptrdiff_t step_in = steps[3], step_out = steps[4]; + npy_intp step_in = steps[3], step_out = steps[4]; bool direction = *((bool *)func); /* pocketfft::FORWARD or BACKWARD */ assert (nout > 0); @@ -144,9 +144,9 @@ rfft_impl(char **args, npy_intp const *dimensions, npy_intp const *steps, { char *ip = args[0], *fp = args[1], *op = args[2]; size_t n_outer = (size_t)dimensions[0]; - ptrdiff_t si = steps[0], sf = steps[1], so = steps[2]; + npy_intp si = steps[0], sf = steps[1], so = steps[2]; size_t nin = (size_t)dimensions[1], nout = (size_t)dimensions[2]; - ptrdiff_t step_in = steps[3], step_out = steps[4]; + npy_intp step_in = steps[3], step_out = steps[4]; assert (nout > 0 && nout == npts / 2 + 1); @@ -233,14 +233,13 @@ irfft_loop(char **args, npy_intp const *dimensions, npy_intp const *steps, void size_t nin = (size_t)dimensions[1], nout = (size_t)dimensions[2]; ptrdiff_t step_in = steps[3], step_out = steps[4]; - size_t npts_in = nout / 2 + 1; - assert(nout > 0); #ifndef POCKETFFT_NO_VECTORS /* * Call pocketfft directly if vectorization is possible. */ + size_t npts_in = nout / 2 + 1; constexpr auto vlen = pocketfft::detail::VLEN::val; if (vlen > 1 && n_outer >= vlen && nin >= npts_in && sf == 0) { std::vector axes = { 1 }; From 055bcb38207d98241c6d081a3a31d1553c913ecc Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 6 Sep 2025 11:34:49 +0200 Subject: [PATCH 0485/1018] BUG: Fix assert in nditer buffer setup When using a buffered iteration, there was a new assert to check that strides were set up nicely to ensure that we are not missing dimension coalescing. However, when the strides are set up to track an index, then they were still set up with a 0 for length 1. I _think_ that is just unnecessary (i.e. the assert is correct to point it out). But, let's do this first for backporting at least. Signed-off-by: Sebastian Berg --- numpy/_core/src/multiarray/nditer_constr.c | 10 ++++++++-- numpy/lib/tests/test_index_tricks.py | 5 +++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/nditer_constr.c b/numpy/_core/src/multiarray/nditer_constr.c index 3ed5cf1a0245..a8f13a73ee1e 100644 --- a/numpy/_core/src/multiarray/nditer_constr.c +++ b/numpy/_core/src/multiarray/nditer_constr.c @@ -2077,8 +2077,14 @@ npyiter_find_buffering_setup(NpyIter *iter, npy_intp buffersize) npy_intp *strides = NAD_STRIDES(axisdata); for (int iop = 0; iop < nop; iop++) { - /* Check that we set things up nicely (if shape is ever 1) */ - assert((axisdata->shape == 1) ? (prev_strides[iop] == strides[iop]) : 1); + /* + * Check that we set things up nicely so strides coalesc. Except + * for index operands, which currently disrupts coalescing. + * NOTE(seberg): presumably `npyiter_compute_index_strides` should + * not set the strides to 0, but this was safer for backporting. + */ + assert((axisdata->shape != 1) || (prev_strides[iop] == strides[iop]) + || (op_itflags[iop] & (NPY_ITER_C_INDEX|NPY_ITER_F_INDEX))); if (op_single_stride_dims[iop] == idim) { /* Best case: the strides still collapse for this operand. */ diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index 387fdfec28f1..81e47ec3dff2 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -199,6 +199,11 @@ def test_empty_array_unravel(self): with assert_raises(ValueError): np.unravel_index([1], (2, 1, 0)) + def test_regression_size_1_index(self): + # actually tests the nditer size one index tracking + # regression test for gh-29690 + np.unravel_index(np.array([[1, 0, 1, 0]], dtype=np.uint32), (4,)) + class TestGrid: def test_basic(self): a = mgrid[-1:1:10j] From ef2b624fff9074480744ae9152552b6821fdc187 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Sep 2025 17:06:32 +0000 Subject: [PATCH 0486/1018] MAINT: Bump pypa/cibuildwheel from 3.1.4 to 3.2.0 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.1.4 to 3.2.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/c923d83ad9c1bc00211c5041d0c3f73294ff88f6...7c619efba910c04005a835b110b057fc28fd6e93) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.2.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index c5f770c9d785..5af18aa14107 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -30,7 +30,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@c923d83ad9c1bc00211c5041d0c3f73294ff88f6 # v3.1.4 + - uses: pypa/cibuildwheel@7c619efba910c04005a835b110b057fc28fd6e93 # v3.2.0 env: CIBW_PLATFORM: pyodide CIBW_BUILD: cp312-* diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 1885577f7a12..8e8c8768cd9c 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -98,7 +98,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@c923d83ad9c1bc00211c5041d0c3f73294ff88f6 # v3.1.4 + uses: pypa/cibuildwheel@7c619efba910c04005a835b110b057fc28fd6e93 # v3.2.0 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 0e27997fcf31f0e138a110fbab5640b74672b524 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Tue, 23 Sep 2025 18:40:16 -0700 Subject: [PATCH 0487/1018] DOC: Add cross-reference to Glantz (2002) [skip actions][skip azp][skip cirrus] --- numpy/random/_generator.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index c1b78ce5b97e..b2ac08cd6ebb 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -1521,7 +1521,7 @@ cdef class Generator: Examples -------- - An example from Glantz[1], pp 47-40: + An example from Glantz [1]_, pp 47-40: Two groups, children of diabetics (25 people) and children from people without diabetes (25 controls). Fasting blood glucose was measured, From e456a3b2de3cb6204d367e60edb50fc5dbc3d59a Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Tue, 23 Sep 2025 18:42:31 -0700 Subject: [PATCH 0488/1018] DOC: Improve formatting of random.Generator.logseries [skip actions][skip azp][skip cirrus] --- numpy/random/_generator.pyx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index b2ac08cd6ebb..fa35d43f4f66 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -3670,8 +3670,8 @@ cdef class Generator: The log series distribution is frequently used to represent species richness and occurrence, first proposed by Fisher, Corbet, and - Williams in 1943 [2]. It may also be used to model the numbers of - occupants seen in cars [3]. + Williams in 1943 [2]_. It may also be used to model the numbers of + occupants seen in cars [3]_. References ---------- @@ -3699,7 +3699,7 @@ cdef class Generator: >>> bins = np.arange(-.5, max(s) + .5 ) >>> count, bins, _ = plt.hist(s, bins=bins, label='Sample count') - # plot against distribution + Plot against the distribution: >>> def logseries(k, p): ... return -p**k/(k*np.log(1-p)) From 04a17d8e7edee511fe968598f0bfa5a81d5476ca Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Tue, 23 Sep 2025 18:43:21 -0700 Subject: [PATCH 0489/1018] DOC: Correct a math typo [skip actions][skip azp][skip cirrus] --- numpy/random/_generator.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index fa35d43f4f66..0fabf98147c3 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -3773,7 +3773,7 @@ cdef class Generator: Covariance indicates the level to which two variables vary together. From the multivariate normal distribution, we draw N-dimensional - samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix + samples, :math:`X = [x_1, x_2, ..., x_N]`. The covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its "spread"). From 09abc4fe404daa3c52147199c3d97fb585db7bf4 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Wed, 24 Sep 2025 06:22:39 -0400 Subject: [PATCH 0490/1018] MAINT: Remove xfail and deprecation filter from a test. (#29802) --- numpy/_core/tests/test_numeric.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index db5269c507d5..fba8e8abbe6d 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -2835,16 +2835,12 @@ def test_clip_scalar_nan_propagation(self, arr, amin, amax): actual = np.clip(arr, amin, amax) assert_equal(actual, expected) - @pytest.mark.xfail(reason="propagation doesn't match spec") @pytest.mark.parametrize("arr, amin, amax", [ (np.array([1] * 10, dtype='m8'), np.timedelta64('NaT'), np.zeros(10, dtype=np.int32)), ]) - @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_NaT_propagation(self, arr, amin, amax): - # NOTE: the expected function spec doesn't - # propagate NaT, but clip() now does expected = np.minimum(np.maximum(arr, amin), amax) actual = np.clip(arr, amin, amax) assert_equal(actual, expected) From 43c52d791ffd628a2be8e6ee281e8857caa4c5c3 Mon Sep 17 00:00:00 2001 From: Jake Vanderplas Date: Wed, 24 Sep 2025 05:47:26 -0700 Subject: [PATCH 0491/1018] TST: clarify logic in float_alias_names test (#29796) Co-authored-by: Sebastian Berg --- numpy/_core/tests/test_dtype.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index d9bd17c48434..175680451f3e 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1687,9 +1687,8 @@ def test_integer_alias_names(self, int_, size): @pytest.mark.parametrize("name", ["Half", "Float", "Double", "CFloat", "CDouble"]) - def test_float_alias_names(self, name): - with pytest.raises(AttributeError): - getattr(numpy.dtypes, name + "DType") is numpy.dtypes.Float16DType + def test_float_alias_names_not_present(self, name): + assert not hasattr(numpy.dtypes, f"{name}DType") def test_scalar_helper_all_dtypes(self): for dtype in np.dtypes.__all__: From fe312b05d2c60a4d79842ff11c56b39b102b876f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 Sep 2025 17:05:38 +0000 Subject: [PATCH 0492/1018] MAINT: Bump actions/cache from 4.2.4 to 4.3.0 Bumps [actions/cache](https://github.com/actions/cache) from 4.2.4 to 4.3.0. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/0400d5f644dc74513175e3cd8d07132dd4860809...0057852bfaa89a56745cba8c7296529d2fc39830) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 4.3.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/macos.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 53780ae097a3..e0767d81213e 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -108,7 +108,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -213,7 +213,7 @@ jobs: sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 97ef55fb0228..ef9e86f6ba70 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -46,7 +46,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -70,7 +70,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From 869138f8f178f25a44f2d47efbed1f02479198c4 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 25 Sep 2025 14:06:24 +0300 Subject: [PATCH 0493/1018] BLD: refactor to avoid 'unused function' warnings' --- .../multiarray/lowlevel_strided_loops.c.src | 104 +++++++++--------- 1 file changed, 53 insertions(+), 51 deletions(-) diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 13a126c53850..e75dd37cc465 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -79,57 +79,6 @@ a = (x)[7]; (x)[7] = (x)[8]; (x)[8] = a; \ } -/* half-to-double, copied from CPP to allow inlining */ - -static NPY_GCC_OPT_3 -uint64_t ToDoubleBits(uint16_t h) -{ - uint16_t h_exp = (h&0x7c00u); - uint64_t d_sgn = ((uint64_t)h&0x8000u) << 48; - switch (h_exp) { - case 0x0000u: { // 0 or subnormal - uint16_t h_sig = (h&0x03ffu); - // Signed zero - if (h_sig == 0) { - return d_sgn; - } - // Subnormal - h_sig <<= 1; - while ((h_sig&0x0400u) == 0) { - h_sig <<= 1; - h_exp++; - } - uint64_t d_exp = ((uint64_t)(1023 - 15 - h_exp)) << 52; - uint64_t d_sig = ((uint64_t)(h_sig&0x03ffu)) << 42; - return d_sgn + d_exp + d_sig; - } - case 0x7c00u: // inf or NaN - // All-ones exponent and a copy of the significand - return d_sgn + 0x7ff0000000000000ULL + (((uint64_t)(h&0x03ffu)) << 42); - default: // normalized - // Just need to adjust the exponent and shift - return d_sgn + (((uint64_t)(h&0x7fffu) + 0xfc000u) << 42); - } -} - -NPY_FINLINE -npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ - /* - * Use npymath versions for all the special cases, only inline the - * x86_64 non-intrinsic case. Someday we will rewrite this in CPP and - * can then explore inlining more - */ - #if defined(NPY_HAVE_AVX512FP16) - return npy_halfbits_to_doublebits(h); - #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) - return npy_halfbits_to_doublebits(h); - #elif defined(__ARM_FP16_FORMAT_IEEE) - return npy_halfbits_to_doublebits(h); - #else - return (double)(ToDoubleBits(h)); - #endif -} - /************* STRIDED COPYING/SWAPPING SPECIALIZED FUNCTIONS *************/ /**begin repeat @@ -767,6 +716,59 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * typedef npy_half _npy_half; #endif +#if EMULATED_FP16 +/* half-to-double, copied from CPP to allow inlining */ + +static NPY_GCC_OPT_3 +uint64_t ToDoubleBits(uint16_t h) +{ + uint16_t h_exp = (h&0x7c00u); + uint64_t d_sgn = ((uint64_t)h&0x8000u) << 48; + switch (h_exp) { + case 0x0000u: { // 0 or subnormal + uint16_t h_sig = (h&0x03ffu); + // Signed zero + if (h_sig == 0) { + return d_sgn; + } + // Subnormal + h_sig <<= 1; + while ((h_sig&0x0400u) == 0) { + h_sig <<= 1; + h_exp++; + } + uint64_t d_exp = ((uint64_t)(1023 - 15 - h_exp)) << 52; + uint64_t d_sig = ((uint64_t)(h_sig&0x03ffu)) << 42; + return d_sgn + d_exp + d_sig; + } + case 0x7c00u: // inf or NaN + // All-ones exponent and a copy of the significand + return d_sgn + 0x7ff0000000000000ULL + (((uint64_t)(h&0x03ffu)) << 42); + default: // normalized + // Just need to adjust the exponent and shift + return d_sgn + (((uint64_t)(h&0x7fffu) + 0xfc000u) << 42); + } +} + +NPY_FINLINE +npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ + /* + * Use npymath versions for all the special cases, only inline the + * x86_64 non-intrinsic case. Someday we will rewrite this in CPP and + * can then explore inlining more + */ + #if defined(NPY_HAVE_AVX512FP16) + return npy_halfbits_to_doublebits(h); + #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) + return npy_halfbits_to_doublebits(h); + #elif defined(__ARM_FP16_FORMAT_IEEE) + return npy_halfbits_to_doublebits(h); + #else + return (double)(ToDoubleBits(h)); + #endif +} +#endif + /**begin repeat * * #NAME1 = BOOL, From 92ab40bed3cdc020239e2a0d0ed116cde292d03d Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 25 Sep 2025 14:32:31 +0200 Subject: [PATCH 0494/1018] BUG: linalg: emit a MemoryError on a malloc failure (#29811) Otherwise, a malloc failure in `init_gesv(...)` etc. is not acted upon, and the python return value is silently wrong. --- numpy/linalg/umath_linalg.cpp | 109 +++++++++++++++++++++++----------- 1 file changed, 73 insertions(+), 36 deletions(-) diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index 1b6850145bc8..4845b1261aca 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -448,6 +448,15 @@ set_fp_invalid_or_clear(int error_occurred) } } +static inline void +report_no_memory() +{ + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API; + PyErr_NoMemory(); + NPY_DISABLE_C_API; +} + /* ***************************************************************************** ** Some handy constants ** @@ -1199,10 +1208,7 @@ slogdet(char **args, } else { /* TODO: Requires use of new ufunc API to indicate error return */ - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); } } @@ -1255,10 +1261,7 @@ det(char **args, } else { /* TODO: Requires use of new ufunc API to indicate error return */ - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); } } @@ -1331,7 +1334,7 @@ init_evd(EIGH_PARAMS_t* params, char JOBZ, char UPLO, mem_buff = (npy_uint8 *)malloc(alloc_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; w = mem_buff + safe_N * safe_N * sizeof(typ); @@ -1365,7 +1368,7 @@ init_evd(EIGH_PARAMS_t* params, char JOBZ, char UPLO, mem_buff2 = (npy_uint8 *)malloc(lwork*sizeof(typ) + liwork*sizeof(fortran_int)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -1378,6 +1381,9 @@ init_evd(EIGH_PARAMS_t* params, char JOBZ, char UPLO, return 1; + no_memory: + report_no_memory(); + error: /* something failed */ memset(params, 0, sizeof(*params)); @@ -1440,7 +1446,7 @@ using fbasetyp = fortran_type_t; mem_buff = (npy_uint8 *)malloc(safe_N * safe_N * sizeof(typ) + safe_N * sizeof(basetyp)); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; w = mem_buff + safe_N * safe_N * sizeof(typ); @@ -1478,7 +1484,7 @@ using fbasetyp = fortran_type_t; lrwork*sizeof(basetyp) + liwork*sizeof(fortran_int)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -1495,6 +1501,8 @@ using fbasetyp = fortran_type_t; return 1; /* something failed */ +no_memory: + report_no_memory(); error: memset(params, 0, sizeof(*params)); free(mem_buff2); @@ -1733,7 +1741,10 @@ init_gesv(GESV_PARAMS_t *params, fortran_int N, fortran_int NRHS) params->LDB = ld; return 1; + error: + report_no_memory(); + free(mem_buff); memset(params, 0, sizeof(*params)); @@ -1977,6 +1988,8 @@ init_potrf(POTR_PARAMS_t *params, char UPLO, fortran_int N) return 1; error: + report_no_memory(); + free(mem_buff); memset(params, 0, sizeof(*params)); @@ -2175,7 +2188,7 @@ scalar_trait) vlr_size + vrr_size + w_size + vl_size + vr_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2218,7 +2231,7 @@ scalar_trait) mem_buff2 = (npy_uint8 *)malloc(work_count*sizeof(typ)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2226,6 +2239,10 @@ scalar_trait) params->WORK = (typ*)work; return 1; + + no_memory: + report_no_memory(); + error: free(mem_buff2); free(mem_buff); @@ -2392,7 +2409,7 @@ using realtyp = basetype_t; mem_buff = (npy_uint8 *)malloc(total_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2434,7 +2451,7 @@ using realtyp = basetype_t; mem_buff2 = (npy_uint8 *)malloc(work_count*sizeof(ftyp)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2443,6 +2460,9 @@ using realtyp = basetype_t; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); error: free(mem_buff2); free(mem_buff); @@ -2754,7 +2774,7 @@ init_gesdd(GESDD_PARAMS_t *params, mem_buff = (npy_uint8 *)malloc(a_size + s_size + u_size + vt_size + iwork_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2798,7 +2818,7 @@ init_gesdd(GESDD_PARAMS_t *params, mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2807,6 +2827,9 @@ init_gesdd(GESDD_PARAMS_t *params, params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -2894,7 +2917,7 @@ using frealtyp = basetype_t; rwork_size + iwork_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2939,7 +2962,7 @@ using frealtyp = basetype_t; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2948,6 +2971,10 @@ using frealtyp = basetype_t; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff2); @@ -3186,7 +3213,7 @@ using ftyp = fortran_doublereal; mem_buff = (npy_uint8 *)malloc(a_size + tau_size); if (!mem_buff) - goto error; + goto no_memory; a = mem_buff; tau = a + a_size; @@ -3219,13 +3246,17 @@ using ftyp = fortran_doublereal; work_size = (size_t) params->LWORK * sizeof(ftyp); mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3260,7 +3291,7 @@ using ftyp = fortran_doublecomplex; mem_buff = (npy_uint8 *)malloc(a_size + tau_size); if (!mem_buff) - goto error; + goto no_memory; a = mem_buff; tau = a + a_size; @@ -3295,13 +3326,17 @@ using ftyp = fortran_doublecomplex; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3433,7 +3468,7 @@ using ftyp = fortran_doublereal; mem_buff = (npy_uint8 *)malloc(q_size + tau_size + a_size); if (!mem_buff) - goto error; + goto no_memory; q = mem_buff; tau = q + q_size; @@ -3468,13 +3503,17 @@ using ftyp = fortran_doublereal; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3512,7 +3551,7 @@ using ftyp=fortran_doublecomplex; mem_buff = (npy_uint8 *)malloc(q_size + tau_size + a_size); if (!mem_buff) - goto error; + goto no_memory; q = mem_buff; tau = q + q_size; @@ -3548,7 +3587,7 @@ using ftyp=fortran_doublecomplex; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; @@ -3556,6 +3595,10 @@ using ftyp=fortran_doublecomplex; params->LWORK = work_count; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3898,10 +3941,7 @@ scalar_trait) return 1; no_memory: - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); error: TRACE_TXT("%s failed init\n", __FUNCTION__); @@ -4034,10 +4074,7 @@ using frealtyp = basetype_t; return 1; no_memory: - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); error: TRACE_TXT("%s failed init\n", __FUNCTION__); From fe0affe49b705dc06895e3da17e3b90032fdbc5b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Sep 2025 17:06:28 +0000 Subject: [PATCH 0495/1018] MAINT: Bump github/codeql-action from 3.30.3 to 3.30.4 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.3 to 3.30.4. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/192325c86100d080feab897ff886c34abd4c83a3...303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 6ea330ea2637..4a64b3ca7276 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 + uses: github/codeql-action/init@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 + uses: github/codeql-action/autobuild@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 + uses: github/codeql-action/analyze@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 5d8f1b91cbae..092f9ef9e696 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v2.1.27 + uses: github/codeql-action/upload-sarif@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v2.1.27 with: sarif_file: results.sarif From 087615c53726b255e995a5c1e247c7ab76b14365 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Fri, 26 Sep 2025 14:35:34 +0300 Subject: [PATCH 0496/1018] ENH: add warning when calling ufunc with 'where' and without 'out' (#29813) * add warning when calling ufunc with 'where' and without 'out' * DOC: add a release note Co-authored-by: Sebastian Berg --- .../upcoming_changes/29813.new_feature.rst | 6 ++++++ numpy/_core/src/umath/ufunc_object.c | 9 +++++++++ numpy/_core/tests/test_ufunc.py | 18 ++++++++++++----- numpy/_core/tests/test_umath.py | 20 +++++++++++++------ 4 files changed, 42 insertions(+), 11 deletions(-) create mode 100644 doc/release/upcoming_changes/29813.new_feature.rst diff --git a/doc/release/upcoming_changes/29813.new_feature.rst b/doc/release/upcoming_changes/29813.new_feature.rst new file mode 100644 index 000000000000..690d7ca88799 --- /dev/null +++ b/doc/release/upcoming_changes/29813.new_feature.rst @@ -0,0 +1,6 @@ +Warning emitted when using `where` without `out` +------------------------------------------------ +Ufuncs called with a ``where`` mask and without an ``out`` positional or kwarg will +now emit a warning. This usage tends to trip up users who expect some value in +output locations where the mask is ``False`` (the ufunc will not touch those +locations). The warning can be supressed by using ``out=None``. diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 75e651c29088..60eb2d32b7c6 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4486,6 +4486,15 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, return override; } + /* Warn if "where" is used without "out", issue 29561 */ + if ((where_obj != NULL) && (full_args.out == NULL) && (out_obj == NULL)) { + if (PyErr_Warn(PyExc_UserWarning, + "'where' used without 'out', expect unitialized memory in output. " + "If this is intentional, use out=None.") < 0) { + goto fail; + } + } + if (outer) { /* Outer uses special preparation of inputs (expand dims) */ PyObject *new_in = prepare_input_arguments_for_outer(full_args.in, ufunc); diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 84e31bd1bc3c..976f9fe2f615 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1710,9 +1710,6 @@ def test_where_param(self): assert_equal(a, [[0, 27], [14, 5]]) def test_where_param_buffer_output(self): - # This test is temporarily skipped because it requires - # adding masking features to the nditer to work properly - # With casting on output a = np.ones(10, np.int64) b = np.ones(10, np.int64) @@ -1724,12 +1721,12 @@ def test_where_param_alloc(self): # With casting and allocated output a = np.array([1], dtype=np.int64) m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) + assert_equal(np.sqrt(a, where=m, out=None), [1]) # No casting and allocated output a = np.array([1], dtype=np.float64) m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) + assert_equal(np.sqrt(a, where=m, out=None), [1]) def test_where_with_broadcasting(self): # See gh-17198 @@ -1743,6 +1740,17 @@ def test_where_with_broadcasting(self): assert_array_equal((a[where] < b_where), out[where].astype(bool)) assert not out[~where].any() # outside mask, out remains all 0 + def test_where_warns(self): + a = np.arange(7) + mask = a % 2 == 0 + with pytest.warns(UserWarning, match="'where' used without 'out'"): + result1 = np.add(a, a, where=mask) + # Does not warn + result2 = np.add(a, a, where=mask, out=None) + # Sanity check + assert np.all(result1[::2] == [0, 4, 8, 12]) + assert np.all(result2[::2] == [0, 4, 8, 12]) + @staticmethod def identityless_reduce_arrs(): yield np.empty((2, 3, 4), order='C') diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 9ae0cffd5cc8..572302541e47 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -3156,7 +3156,8 @@ def do_test(f_call, f_expected): do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) # Also check the where mask handling: - do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0)) + out = np.zeros([1], dtype=float) + do_test(lambda a: np.add(a, 0, where=False, out=None), lambda a: (a, 0)) do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a)) def test_wrap_with_iterable(self): @@ -3713,7 +3714,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): kwargs = kwargs.copy() if "out" in kwargs: - kwargs["out"] = self._unwrap(kwargs["out"]) + kwargs["out"] = self._unwrap(kwargs["out"])[0] if kwargs["out"] is NotImplemented: return NotImplemented @@ -3744,21 +3745,28 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): array = np.array([1, 2, 3]) where = np.array([True, False, True]) - expected = ufunc(array, where=where) + out = np.zeros(3, dtype=array.dtype) + expected = ufunc(array, where=where, out=out) with pytest.raises(TypeError): - ufunc(array, where=where.view(OverriddenArrayOld)) + ufunc( + array, + where=where.view(OverriddenArrayOld), + out=out, + ) result_1 = ufunc( array, - where=where.view(OverriddenArrayNew) + where=where.view(OverriddenArrayNew), + out=out, ) assert isinstance(result_1, OverriddenArrayNew) assert np.all(np.array(result_1) == expected, where=where) result_2 = ufunc( array.view(OverriddenArrayNew), - where=where.view(OverriddenArrayNew) + where=where.view(OverriddenArrayNew), + out=out.view(OverriddenArrayNew), ) assert isinstance(result_2, OverriddenArrayNew) assert np.all(np.array(result_2) == expected, where=where) From 4078485243a760818ac491f1237afadb6953b15c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Sep 2025 17:06:20 +0000 Subject: [PATCH 0497/1018] MAINT: Bump actions/dependency-review-action from 4.7.3 to 4.8.0 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.7.3 to 4.8.0. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/595b5aeba73380359d98a5e087f648dbb0edce1b...56339e523c0409420f6c2c9a2f4292bbb3c07dd3) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.8.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index be1c6f7c4bb1..a592a969eb94 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@595b5aeba73380359d98a5e087f648dbb0edce1b # v4.7.3 + uses: actions/dependency-review-action@56339e523c0409420f6c2c9a2f4292bbb3c07dd3 # v4.8.0 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 From 60a340442535663a24951da28fe21684777cde9a Mon Sep 17 00:00:00 2001 From: lvllvl <24905907+lvllvl@users.noreply.github.com> Date: Fri, 26 Sep 2025 13:56:19 -0400 Subject: [PATCH 0498/1018] BUG: resolve invalid grep with env neutral script (#29551) --- tools/ci/check_c_api_usage.py | 265 ++++++++++++++++++++++++++++++++++ tools/ci/check_c_api_usage.sh | 91 ------------ tools/linter.py | 13 +- 3 files changed, 273 insertions(+), 96 deletions(-) create mode 100644 tools/ci/check_c_api_usage.py delete mode 100644 tools/ci/check_c_api_usage.sh diff --git a/tools/ci/check_c_api_usage.py b/tools/ci/check_c_api_usage.py new file mode 100644 index 000000000000..49c317a1259c --- /dev/null +++ b/tools/ci/check_c_api_usage.py @@ -0,0 +1,265 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import os +import re +import sys +import tempfile +from concurrent.futures import ThreadPoolExecutor, as_completed +from pathlib import Path +from re import Pattern + +""" +Borrow-ref C API linter (Python version). + +- Recursively scans source files under --root (default: numpy) +- Matches suspicious CPython C-API calls as whole identifiers +- Skips: + - lines with '// noqa: borrowed-ref OK' or + '// noqa: borrowed-ref - manual fix needed' + - line comments (// ...) + - block comments (/* ... */), even when they span lines +- Prints findings and exits 1 if any issues found, else 0 +""" + +def strip_comments(line: str, in_block: bool) -> tuple[str, bool]: + """ + Return (code_without_comments, updated_in_block). + Removes // line comments and /* ... */ block comments (non-nesting, C-style). + """ + i = 0 + out_parts: list[str] = [] + n = len(line) + + while i < n: + if in_block: + end = line.find("*/", i) + if end == -1: + # Entire remainder is inside a block comment. + return ("".join(out_parts), True) + i = end + 2 + in_block = False + continue + + # Not in block: look for next // or /* from current i + sl = line.find("//", i) + bl = line.find("/*", i) + + if sl != -1 and (bl == -1 or sl < bl): + # Line comment starts first: take code up to '//' and stop + out_parts.append(line[i:sl]) + return ("".join(out_parts), in_block) + + if bl != -1: + # Block comment starts: take code up to '/*', then enter block + out_parts.append(line[i:bl]) + i = bl + 2 + in_block = True + continue + + # No more comments + out_parts.append(line[i:]) + break + + return ("".join(out_parts), in_block) + +def iter_source_files(root: Path, exts: set[str], excludes: set[str]) -> list[Path]: + """ + Return a list of source files under 'root', where filenames end with any of the + extensions in 'exts' (e.g., '.c.src', '.c', '.h'). + Excludes directories whose names are in 'excludes'. + """ + results: list[Path] = [] + + for dirpath, dirnames, filenames in os.walk(root): + # Prune excluded directories + dirnames[:] = [d for d in dirnames if d not in excludes] + for fn in filenames: + # endswith handles mult-suffice patterns, e.g., .c.src + if any(fn.endswith(ext) for ext in exts): + results.append(Path(dirpath) / fn) + return results + +def build_func_rx(funcs: tuple[str, ...]) -> Pattern[str]: + return re.compile(r"(? list[tuple[str, int, str, str]]: + """ + Scan a single file. + Returns list of (func_name, line_number, path_str, raw_line_str). + """ + hits: list[tuple[str, int, str, str]] = [] + in_block = False + noqa_set = set(noqa_markers) + + try: + with path.open("r", encoding="utf-8", errors="ignore") as f: + for lineno, raw in enumerate(f, 1): + # Skip if approved by noqa markers + if any(mark in raw for mark in noqa_set): + continue + + # Remove comments; if nothing remains, skip + code, in_block = strip_comments(raw.rstrip("\n"), in_block) + if not code.strip(): + continue + + # Find all suspicious calls in non-comment code + for m in func_rx.finditer(code): + hits.append((m.group(0), lineno, str(path), raw.rstrip("\n"))) + except FileNotFoundError: + # File may have disappeared; ignore gracefully + pass + return hits + + +def main(argv: list[str] | None = None) -> int: + # List of suspicious function calls: + suspicious_funcs: tuple[str, ...] = ( + "PyList_GetItem", + "PyDict_GetItem", + "PyDict_GetItemWithError", + "PyDict_GetItemString", + "PyDict_SetDefault", + "PyDict_Next", + "PyWeakref_GetObject", + "PyWeakref_GET_OBJECT", + "PyList_GET_ITEM", + "_PyDict_GetItemStringWithError", + "PySequence_Fast" + ) + func_rx = build_func_rx(suspicious_funcs) + noqa_markers = ( + "noqa: borrowed-ref OK", + "noqa: borrowed-ref - manual fix needed" + ) + default_exts = {".c", ".h", ".c.src", ".cpp"} + default_excludes = {"pythoncapi-compat"} + + ap = argparse.ArgumentParser(description="Borrow-ref C API linter (Python).") + ap.add_argument( + "--quiet", + action="store_true", + help="Suppress normal output; exit status alone indicates result (useful\ + for CI).", + ) + ap.add_argument( + "-j", "--jobs", + type=int, + default=0, + help="Number of worker threads (0=auto, 1=sequential).", + ) + ap.add_argument( + "--root", + default="numpy", + type=str, + help="Root directory to scan (default: numpy)" + ) + ap.add_argument( + "--ext", + action="append", + default=None, + help=f"File extension(s) to include (repeatable). Defaults to {default_exts}", + ) + ap.add_argument( + "--exclude", + action="append", + default=None, + help=f"Directory name(s) to exclude (repeatable). Default: {default_excludes}", + ) + args = ap.parse_args(argv) + + if args.ext: + exts = {e if e.startswith(".") else f".{e}" for e in args.ext} + else: + exts = set(default_exts) + excludes = set(args.exclude) if args.exclude else set(default_excludes) + + root = Path(args.root) + if not root.exists(): + print(f"error: root '{root}' does not exist", file=sys.stderr) + return 2 + + files = sorted(iter_source_files(root, exts, excludes), key=str) + + # Determine concurrency: auto picks a reasonable cap for I/O-bound work + if args.jobs is None or args.jobs <= 0: + max_workers = min(32, (os.cpu_count() or 1) * 5) + else: + max_workers = max(1, args.jobs) + print(f'Scanning {len(files)} C/C++ source files...\n') + + # Output file (mirrors your shell behavior) + tmpdir = Path(".tmp") + tmpdir.mkdir(exist_ok=True) + + findings = 0 + + # Run the scanning in parallel; only the main thread writes the report + all_hits: list[tuple[str, int, str, str]] = [] + if max_workers == 1: + for p in files: + all_hits.extend(scan_file(p, func_rx, noqa_markers)) + else: + with ThreadPoolExecutor(max_workers=max_workers) as ex: + fut_to_file = {ex.submit(scan_file, p, func_rx, noqa_markers): + p for p in files} + for fut in as_completed(fut_to_file): + try: + all_hits.extend(fut.result()) + except Exception as e: + print(f'Failed to scan {fut_to_file[fut]}: {e}') + + # Sort for deterministic output: by path, then line number + all_hits.sort(key=lambda t: (t[2], t[1])) + + # There no hits, linter passed + if not all_hits: + if not args.quiet: + print("All checks passed! C API borrow-ref linter found no issues.\n") + return 0 + + # There are some linter failures: create a log file + with tempfile.NamedTemporaryFile( + prefix="c_api_usage_report.", + suffix=".txt", + dir=tmpdir, + mode="w+", + encoding="utf-8", + delete=False, + ) as out: + report_path = Path(out.name) + out.write("Running Suspicious C API usage report workflow...\n\n") + for func, lineo, pstr, raw in all_hits: + findings += 1 + out.write(f"Found suspicious call to {func} in file: {pstr}\n") + out.write(f" -> {pstr}:{lineo}a:{raw}\n") + out.write("Recommendation:\n") + out.write( + "If this use is intentional and safe, add " + "'// noqa: borrowed-ref OK' on the same line " + "to silence this warning.\n" + ) + out.write( + "Otherwise, consider replacing the call " + "with a thread-safe API function.\n\n" + ) + + out.flush() + if not args.quiet: + out.seek(0) + sys.stdout.write(out.read()) + print(f"Report written to: {report_path}\n\n\ +C API borrow-ref linter FAILED.") + + return 1 + + +if __name__ == "__main__": + + sys.exit(main()) diff --git a/tools/ci/check_c_api_usage.sh b/tools/ci/check_c_api_usage.sh deleted file mode 100644 index b04410d7667d..000000000000 --- a/tools/ci/check_c_api_usage.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bash -set -e - -# List of suspicious function calls: -SUSPICIOUS_FUNCS=( - "PyList_GetItem" - "PyDict_GetItem" - "PyDict_GetItemWithError" - "PyDict_GetItemString" - "PyDict_SetDefault" - "PyDict_Next" - "PyWeakref_GetObject" - "PyWeakref_GET_OBJECT" - "PyList_GET_ITEM" - "_PyDict_GetItemStringWithError" - "PySequence_Fast" -) - -# Find all C/C++ source files in the repo -ALL_FILES=$(find numpy -type f \( -name "*.c" -o -name "*.h" -o -name "*.c.src" -o -name "*.cpp" \) ! -path "*/pythoncapi-compat/*") - -# For debugging: print out file count -echo "Scanning $(echo "$ALL_FILES" | wc -l) C/C++ source files..." - -# Prepare a result file -mkdir -p .tmp -OUTPUT=$(mktemp .tmp/c_api_usage_report.XXXXXX) -echo -e "Running Suspicious C API usage report workflow...\n" > $OUTPUT - -FAIL=0 - -# Scan each changed file -for file in $ALL_FILES; do - - for func in "${SUSPICIOUS_FUNCS[@]}"; do - # -n : show line number - # -P : perl-style boundaries - # (?> "$OUTPUT" - echo " -> $line" >> "$OUTPUT" - echo "Recommendation:" >> "$OUTPUT" - echo "If this use is intentional and safe, add '// noqa: borrowed-ref OK' on the same line to silence this warning." >> "$OUTPUT" - echo "Otherwise, consider replacing $func with a thread-safe API function." >> "$OUTPUT" - echo "" >> "$OUTPUT" - FAIL=1 - done <<< "$matches" - fi - done -done - -if [[ $FAIL -eq 1 ]]; then - echo "C API borrow-ref linter found issues." -else - echo "C API borrow-ref linter found no issues." > $OUTPUT -fi - -cat "$OUTPUT" -exit "$FAIL" diff --git a/tools/linter.py b/tools/linter.py index 4b5115c97a2b..f614be100159 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -47,15 +47,18 @@ def run_lint(self, fix: bool) -> None: sys.exit(retcode) def run_check_c_api(self) -> tuple[int, str]: - # Running borrowed ref checker + """Run C-API borrowed-ref checker""" print("Running C API borrow-reference linter...") - borrowed_ref_script = os.path.join(self.repository_root, "tools", "ci", - "check_c_api_usage.sh") + borrowed_ref_script = os.path.join( + self.repository_root, "tools", "ci", "check_c_api_usage.py" + ) borrowed_res = subprocess.run( - ["bash", borrowed_ref_script], + [sys.executable, borrowed_ref_script], + cwd=self.repository_root, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - encoding="utf-8", + text=True, + check=False, ) # Exit with non-zero if C API Check fails From dfc14f7adfaaa2084b1ba895d68de31d56854fec Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 28 Sep 2025 09:29:46 +0300 Subject: [PATCH 0499/1018] ENH: convert PyErr_Warn to PyErr_WarnEx --- numpy/_core/src/common/npy_longdouble.c | 4 ++-- numpy/_core/src/multiarray/arraytypes.c.src | 4 ++-- numpy/_core/src/multiarray/ctors.c | 4 ++-- numpy/_core/src/multiarray/flagsobject.c | 2 +- numpy/_core/src/umath/extobj.c | 2 +- numpy/_core/src/umath/ufunc_object.c | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/_core/src/common/npy_longdouble.c b/numpy/_core/src/common/npy_longdouble.c index ce80a9ae2bc3..644af776f9a9 100644 --- a/numpy/_core/src/common/npy_longdouble.c +++ b/numpy/_core/src/common/npy_longdouble.c @@ -144,8 +144,8 @@ npy_longdouble_from_PyLong(PyObject *long_obj) { result = NumPyOS_ascii_strtold(cstr, &end); if (errno == ERANGE) { /* strtold returns INFINITY of the correct sign. */ - if (PyErr_Warn(PyExc_RuntimeWarning, - "overflow encountered in conversion from python long") < 0) { + if (PyErr_WarnEx(PyExc_RuntimeWarning, + "overflow encountered in conversion from python long", 1) < 0) { goto fail; } } diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 52c9bdfb6bcc..dd51a937031a 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -541,8 +541,8 @@ string_to_long_double(PyObject*op) errno = 0; temp = NumPyOS_ascii_strtold(s, &end); if (errno == ERANGE) { - if (PyErr_Warn(PyExc_RuntimeWarning, - "overflow encountered in conversion from string") < 0) { + if (PyErr_WarnEx(PyExc_RuntimeWarning, + "overflow encountered in conversion from string", 1) < 0) { Py_XDECREF(b); return 0; } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index b7f0dcb521bb..76aa2fca86e7 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1304,12 +1304,12 @@ _array_from_buffer_3118(PyObject *memoryview) return NULL; } - if (PyErr_Warn( + if (PyErr_WarnEx( PyExc_RuntimeWarning, "A builtin ctypes object gave a PEP3118 format " "string that does not match its itemsize, so a " "best-guess will be made of the data type. " - "Newer versions of python may behave correctly.") < 0) { + "Newer versions of python may behave correctly.", 1) < 0) { Py_DECREF(descr); return NULL; } diff --git a/numpy/_core/src/multiarray/flagsobject.c b/numpy/_core/src/multiarray/flagsobject.c index 8257727030c0..2570d3ec5d16 100644 --- a/numpy/_core/src/multiarray/flagsobject.c +++ b/numpy/_core/src/multiarray/flagsobject.c @@ -185,7 +185,7 @@ static char *msg = "future versions will not create a writeable " PyArrayFlagsObject *self, void *NPY_UNUSED(ignored)) \ { \ if (self->flags & NPY_ARRAY_WARN_ON_WRITE) { \ - if (PyErr_Warn(PyExc_FutureWarning, msg) < 0) {\ + if (PyErr_WarnEx(PyExc_FutureWarning, msg, 1) < 0) {\ return NULL; \ } \ }\ diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index 755d8665b11d..91b0b4c62d30 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -398,7 +398,7 @@ _error_handler(const char *name, int method, PyObject *pyfunc, char *errtype, switch(method) { case UFUNC_ERR_WARN: PyOS_snprintf(msg, sizeof(msg), "%s encountered in %s", errtype, name); - if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) { + if (PyErr_WarnEx(PyExc_RuntimeWarning, msg, 1) < 0) { goto fail; } break; diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 60eb2d32b7c6..0bdc14e04cde 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4488,9 +4488,9 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* Warn if "where" is used without "out", issue 29561 */ if ((where_obj != NULL) && (full_args.out == NULL) && (out_obj == NULL)) { - if (PyErr_Warn(PyExc_UserWarning, + if (PyErr_WarnEx(PyExc_UserWarning, "'where' used without 'out', expect unitialized memory in output. " - "If this is intentional, use out=None.") < 0) { + "If this is intentional, use out=None.", 1) < 0) { goto fail; } } From d46e685dca68980e599efc9286f967ba82ee5d03 Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 28 Sep 2025 09:30:55 +0300 Subject: [PATCH 0500/1018] TST: remove xfail, test should pass everywhere --- numpy/_core/tests/test_multiarray.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 943e9642ec3c..c2fe0abb6137 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -1868,11 +1868,9 @@ def _test_cast_from_flexible(self, dtype): def test_cast_from_void(self): self._test_cast_from_flexible(np.void) - @pytest.mark.xfail(reason="See gh-9847") def test_cast_from_unicode(self): self._test_cast_from_flexible(np.str_) - @pytest.mark.xfail(reason="See gh-9847") def test_cast_from_bytes(self): self._test_cast_from_flexible(np.bytes_) From ad746c90ff15e4126f18e3ff85d5c8a06e9ec5ea Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Fri, 19 Sep 2025 12:12:04 +0530 Subject: [PATCH 0501/1018] DOC: Update Meson build examples in usage.rst [skip actions][skip azp][skip cirrus] --- doc/source/f2py/index.rst | 3 +-- doc/source/f2py/usage.rst | 7 +++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst index 31840e812379..46f1de0212d6 100644 --- a/doc/source/f2py/index.rst +++ b/doc/source/f2py/index.rst @@ -49,8 +49,7 @@ For Meson build examples, see :doc:`usage`. .. toctree:: :maxdepth: 3 - - usage + f2py-user f2py-reference windows/index diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index c0fb6c0428df..d6e042863b55 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -328,12 +328,11 @@ resulting package to work, you need to create a file named ``__init__.py`` defined entirely in terms of the ``add.pyf`` and ``add.f`` files. The conversion of the .pyf file to a .c file is handled by `numpy.distutils`. -=============================== Building with Meson (Examples) -=============================== +============================== Using f2py with Meson ---------------------- +~~~~~~~~~~~~~~~~~~~~~ Meson is a modern build system recommended for building Python extension modules, especially starting with Python 3.12 and NumPy 2.x. Meson provides @@ -370,7 +369,7 @@ user documentation or refer to SciPy's Meson build files for real-world examples: https://github.com/scipy/scipy/tree/main/meson.build Building NumPy ufunc Extensions with Meson ------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To build a NumPy ufunc extension (C API) using Meson, you can use the following template: From 69c8ba3d5681d5d6056ee1d13acdfddeeeb001d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Sep 2025 20:35:33 +0000 Subject: [PATCH 0502/1018] MAINT: Bump github/codeql-action from 3.30.4 to 3.30.5 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.4 to 3.30.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9...3599b3baa15b485a2e49ef411a7a4bb2452e7f93) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 4a64b3ca7276..9f015242f94b 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 + uses: github/codeql-action/init@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 + uses: github/codeql-action/autobuild@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 + uses: github/codeql-action/analyze@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 092f9ef9e696..3fb782a66a8c 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v2.1.27 + uses: github/codeql-action/upload-sarif@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v2.1.27 with: sarif_file: results.sarif From 9e9ea43efdacaa9e1ea5ac947520ceb427b9863d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Sep 2025 20:35:38 +0000 Subject: [PATCH 0503/1018] MAINT: Bump int128/hide-comment-action from 1.43.0 to 1.44.0 Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.43.0 to 1.44.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/a30d551065e4231e6d7a671bb5ce884f9ee6417b...9803637eab610cca14ac6f64c42c0d7ffe9327e0) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.44.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index e62f26edd85f..266d7a8f2074 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@a30d551065e4231e6d7a671bb5ce884f9ee6417b # v1.43.0 + uses: int128/hide-comment-action@9803637eab610cca14ac6f64c42c0d7ffe9327e0 # v1.44.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From 4a2defe2774943b2a658ac6adcb6181d85e299ea Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 29 Sep 2025 18:35:53 -0600 Subject: [PATCH 0504/1018] TST, WIP: Try updating circleci Python. [skip actions] [skip azp] [skip cirrus] --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8c2b443f1e84..1760eca727dd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.10 + - image: cimg/python:3.11.13 working_directory: ~/repo From e299cbfb40a2a3972db8255d93431dd4fc5f15dd Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 30 Sep 2025 15:29:58 +0300 Subject: [PATCH 0505/1018] pin pyparsing since 3.3 interacts badly with matplotlib==3.10.6 [skip actions][skip azp][skip cirrusci] --- requirements/doc_requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index a6eb6e97b5cf..9d006b961923 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -6,6 +6,7 @@ sphinx-copybutton sphinx-design scipy matplotlib +pyparsing<3.3 pandas breathe>4.33.0 ipython!=8.1.0 From 90642e37adfb6a44724b334f4a527674b9d562bd Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:47:57 +0200 Subject: [PATCH 0506/1018] TYP: Fix ``generic.__new__`` return type (#29842) --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d452bff03fc3..ddb30065fcfe 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3576,7 +3576,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # See https://github.com/numpy/numpy-stubs/pull/80 for more details. class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod - def __new__(cls) -> None: ... + def __new__(cls) -> Self: ... def __hash__(self) -> int: ... @overload def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... From 7ad8192a64a12231af4aeb5bb030f052ef4df8ab Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:49:29 +0200 Subject: [PATCH 0507/1018] TYP: remove unused ``# type: ignore``s (#29843) * TYP: remove unused ``# type: ignore``s * TYP: remove unused ``# type: ignore``s in ``testing._private.utils`` --- numpy/__init__.pyi | 8 ++++---- numpy/testing/_private/utils.pyi | 3 +-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ddb30065fcfe..884e0bcbd671 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5034,11 +5034,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... - def __mod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] - def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] + def __mod__(self, other: _Float64_co, /) -> float64: ... + def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] - def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] - def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] + def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... + def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[misc] half: TypeAlias = float16 single: TypeAlias = float32 diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index a9b773cf2247..e90071a3b450 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -137,8 +137,7 @@ NOGIL_BUILD: Final[bool] = ... class KnownFailureException(Exception): ... class IgnoreException(Exception): ... -# NOTE: `warnings.catch_warnings` is incorrectly defined as invariant in typeshed -class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): # type: ignore[type-var] # pyright: ignore[reportInvalidTypeArguments] +class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): class_modules: ClassVar[tuple[types.ModuleType, ...]] = () modules: Final[set[types.ModuleType]] @overload # record: True From 836d16d6c0d7b3a026b8ab75081aeffa276ee932 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:51:50 +0200 Subject: [PATCH 0508/1018] TYP: fix ``testing.assert_warns`` decorator order (#29844) mypy wants it to be the other way around, and reported > @overload should be placed before @deprecated --- numpy/testing/_private/utils.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index e90071a3b450..31c7fab7bdf8 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -358,11 +358,11 @@ def assert_array_max_ulp( ) -> NDArray[Any]: ... # -@deprecated("Please use warnings.catch_warnings or pytest.warns instead") @overload -def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... @deprecated("Please use warnings.catch_warnings or pytest.warns instead") +def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... @overload +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... # From 31592aca6903e78a8579f64276ecb98120de22b9 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:53:00 +0200 Subject: [PATCH 0509/1018] TYP: Add missing ``rtol`` kwarg to ``linalg.pinv`` (#29845) Apparently it was added in 2.0.0, but was missing from the stubs. --- numpy/linalg/_linalg.pyi | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index a63492013ade..8b09762d9536 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -28,6 +28,7 @@ from numpy import ( ) from numpy._core.fromnumeric import matrix_transpose from numpy._core.numeric import tensordot +from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, DTypeLike, @@ -324,20 +325,26 @@ def matrix_rank( @overload def pinv( a: _ArrayLikeInt_co, - rcond: _ArrayLikeFloat_co = None, + rcond: _ArrayLikeFloat_co | None = None, hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., ) -> NDArray[float64]: ... @overload def pinv( a: _ArrayLikeFloat_co, - rcond: _ArrayLikeFloat_co = None, + rcond: _ArrayLikeFloat_co | None = None, hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., ) -> NDArray[floating]: ... @overload def pinv( a: _ArrayLikeComplex_co, - rcond: _ArrayLikeFloat_co = None, + rcond: _ArrayLikeFloat_co | None = None, hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., ) -> NDArray[complexfloating]: ... # TODO: Returns a 2-tuple of scalars for 2D arrays and From f393cf81251f0ae3e400d516ad63e92db4d7016e Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:54:37 +0200 Subject: [PATCH 0510/1018] TYP: Fix signatures of ``linalg.matmul`` and ``linalg.outer`` (#29846) This fixes several issues in np.linalg.outer (positional-only params, a misplaced out=... param, and an orphanced -> _ArrayT), and also fixes linalg.matmul parameters to be positional-only. I also snuck in an an additional overload that improves the return type if both input array-likes have the same dtype. --- numpy/linalg/_linalg.pyi | 52 +++++++++-------------- numpy/typing/tests/data/reveal/linalg.pyi | 12 +++--- 2 files changed, 25 insertions(+), 39 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 8b09762d9536..876384d6cacc 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -79,7 +79,7 @@ __all__ = [ "vecdot", ] -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) +_NumberT = TypeVar("_NumberT", bound=np.number) _ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] @@ -183,33 +183,29 @@ def cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floati def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating]: ... @overload -def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never]) -> NDArray[Any]: ... +def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never], /) -> NDArray[Any]: ... @overload -def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co) -> NDArray[np.bool]: ... +def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload -def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... +def outer(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload -def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... +def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload -def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co) -> NDArray[floating]: ... +def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload -def outer( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, -) -> NDArray[complexfloating]: ... +def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload -def outer( - x1: _ArrayLikeTD64_co, - x2: _ArrayLikeTD64_co, - out: None = ..., -) -> NDArray[timedelta64]: ... +def outer(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... +@overload +def outer(x1: _ArrayLikeTD64_co, x2: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload -def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co) -> NDArray[object_]: ... +def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co, /) -> NDArray[object_]: ... @overload def outer( x1: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, x2: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, -) -> _ArrayT: ... + /, +) -> NDArray[Any]: ... @overload def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... @@ -492,22 +488,12 @@ def cross( ) -> NDArray[complexfloating]: ... @overload -def matmul( - x1: _ArrayLikeInt_co, - x2: _ArrayLikeInt_co, -) -> NDArray[signedinteger]: ... +def matmul(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload -def matmul( - x1: _ArrayLikeUInt_co, - x2: _ArrayLikeUInt_co, -) -> NDArray[unsignedinteger]: ... +def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload -def matmul( - x1: _ArrayLikeFloat_co, - x2: _ArrayLikeFloat_co, -) -> NDArray[floating]: ... +def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload -def matmul( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, -) -> NDArray[complexfloating]: ... +def matmul(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... +@overload +def matmul(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 663fb888f012..ef7819f448ca 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -43,9 +43,9 @@ assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.outer(AR_b, AR_b), npt.NDArray[np.bool]) assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) @@ -129,6 +129,6 @@ assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complex128]) From 820b45e29313eda5562f06c1ae9ada3c4b98223e Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:56:24 +0200 Subject: [PATCH 0511/1018] TYP: Fix incompatible defaults in ``polyfit``, ``histogram``, and ``histogramdd`` (#29847) The default values weren't assignable to the types of the parameters. --- numpy/lib/_histograms_impl.pyi | 4 ++-- numpy/lib/_polynomial_impl.pyi | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 7cb70ef21508..72a31dcedc1f 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -27,14 +27,14 @@ def histogram( a: ArrayLike, bins: _BinKind | SupportsIndex | ArrayLike = 10, range: tuple[float, float] | None = None, - density: bool = None, + density: bool | None = None, weights: ArrayLike | None = None, ) -> tuple[NDArray[Any], NDArray[Any]]: ... def histogramdd( sample: ArrayLike, bins: SupportsIndex | ArrayLike = 10, - range: Sequence[tuple[float, float]] = None, + range: Sequence[tuple[float, float]] | None = None, density: bool | None = None, weights: ArrayLike | None = None, ) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ... diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index 0cdab5f11b5c..9c02a7f867c5 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -138,7 +138,8 @@ def polyfit( rcond: float | None = None, full: L[False] = False, w: _ArrayLikeFloat_co | None = None, - cov: L[True, "unscaled"] = False, + *, + cov: L[True, "unscaled"], ) -> _2Tup[NDArray[float64]]: ... @overload def polyfit( @@ -148,7 +149,8 @@ def polyfit( rcond: float | None = None, full: L[False] = False, w: _ArrayLikeFloat_co | None = None, - cov: L[True, "unscaled"] = False, + *, + cov: L[True, "unscaled"], ) -> _2Tup[NDArray[complex128]]: ... @overload def polyfit( From 69e44403eb217df45c288efa3c941eb667ddd079 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:57:42 +0200 Subject: [PATCH 0512/1018] MAINT,TYP: bump `mypy` to `1.18.2` (#29848) https://mypy.readthedocs.io/en/stable/changelog.html#mypy-1-18-2 --- environment.yml | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index fc171a285a9d..6eeddb2311ea 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.18.1 + - mypy=1.18.2 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 95396afaf5a0..101cb6251251 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -12,7 +12,7 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.18.1; platform_python_implementation != "PyPy" +mypy==1.18.2; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer From 8fecead7a7091ce5e757529f10565412e26cbb88 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 04:12:29 +0200 Subject: [PATCH 0513/1018] CI: Use ``uv`` instead of ``pip`` in the mypy workflow (#29849) Because that's what the cool kids are doing nowadays --- .github/workflows/mypy.yml | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index e2dcdccf868b..c6b7c89b8f5c 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -55,17 +55,21 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 with: python-version: ${{ matrix.os_python[1] }} + activate-environment: true + cache-dependency-glob: | + requirements/build_requirements.txt + requirements/test_requirements.txt - name: Install dependencies - run: | - pip install -r requirements/build_requirements.txt - # orjson makes mypy faster but the default requirements.txt - # can't install it because orjson doesn't support 32 bit Linux - pip install orjson - pip install -r requirements/test_requirements.txt + # orjson makes mypy faster but the default requirements.txt + # can't install it because orjson doesn't support 32 bit Linux + run: >- + uv pip install + -r requirements/build_requirements.txt + -r requirements/test_requirements.txt + orjson - name: Build run: | spin build -j2 -- -Dallow-noblas=true -Ddisable-optimization=true --vsenv From c111c3c06d0c7bb92aaf56319a8edc9448815424 Mon Sep 17 00:00:00 2001 From: Arthur Lacote Date: Wed, 1 Oct 2025 09:52:03 +0200 Subject: [PATCH 0514/1018] ENH: speedup numpy.quantile when weights are provided (#29837) In the implementation numpy.quantile(..., weights=w, method="inverted_cdf"), the argsort doesn't need to be stable. See some elements of proof here: scikit-learn/scikit-learn#32285 Because it's just an optimization, I don't think this requires new tests. Speedup for big arrays might be up to ~2x. I wrote an initial proof in the equivalent PR for scikit-learn: scikit-learn/scikit-learn#32285. But while reading carefully the doc of numpy.quantile I found a new simpler formulation: The doc says: --- numpy/lib/_function_base_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 421dee5578ab..81ba8ea6bc3b 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4895,7 +4895,7 @@ def _quantile( weights = np.asanyarray(weights) if axis != 0: weights = np.moveaxis(weights, axis, destination=0) - index_array = np.argsort(arr, axis=0, kind="stable") + index_array = np.argsort(arr, axis=0) # arr = arr[index_array, ...] # but this adds trailing dimensions of # 1. From d7f9fa86b75ce50d3b608c5ae8adbfa7fe1cc0ea Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Wed, 1 Oct 2025 03:22:57 -0700 Subject: [PATCH 0515/1018] DOC: Add a few missing commas in math operations (#29852) * DOC: Add a missing comma in numpy.cov [skip actions][skip azp][skip cirrus] * DOC: Add missing comma in laguerre.py [skip actions][skip azp][skip cirrus] * DOC: Also add missing commas in legendre.py [skip actions][skip azp][skip cirrus] --- numpy/lib/_function_base_impl.py | 2 +- numpy/polynomial/laguerre.py | 2 +- numpy/polynomial/legendre.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 81ba8ea6bc3b..b8d6926bca88 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2715,7 +2715,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, Estimate a covariance matrix, given data and weights. Covariance indicates the level to which two variables vary together. - If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ..., x_N]^T`, then the covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i`. diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 38eb5a80b200..8d5d5ae67632 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -1185,7 +1185,7 @@ def lagvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index b43bdfa83034..5fdc5245b9d3 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -1164,7 +1164,7 @@ def legvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares From e7276da07f6fc69cc2947f4dc67a87472e4b3a8b Mon Sep 17 00:00:00 2001 From: "hpkfft.com" Date: Wed, 1 Oct 2025 03:31:27 -0700 Subject: [PATCH 0516/1018] DEP: Raise `TypeError` on attempt to convert array with `ndim > 0` to scalar (#29841) This PR removes deprecated functionality, as requested in gh-29835. The functionality will be deprecated for about 2.5 years (NumPy 1.25). --- .../upcoming_changes/29841.expired.rst | 6 +++ numpy/_core/src/multiarray/common.c | 18 -------- numpy/_core/tests/test_deprecations.py | 12 ----- numpy/_core/tests/test_multiarray.py | 45 ++++++++++--------- 4 files changed, 31 insertions(+), 50 deletions(-) create mode 100644 doc/release/upcoming_changes/29841.expired.rst diff --git a/doc/release/upcoming_changes/29841.expired.rst b/doc/release/upcoming_changes/29841.expired.rst new file mode 100644 index 000000000000..34977cec2f70 --- /dev/null +++ b/doc/release/upcoming_changes/29841.expired.rst @@ -0,0 +1,6 @@ +Raise `TypeError` on attempt to convert array with `ndim > 0` to scalar +----------------------------------------------------------------------- +Conversion of an array with `ndim > 0` to a scalar was deprecated in +NumPy 1.25. Now, attempting to do so raises `TypeError`. +Ensure you extract a single element from your array before performing +this operation. diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 4d1d9c238418..3bae4a28efba 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -449,24 +449,6 @@ check_is_convertible_to_scalar(PyArrayObject *v) return 0; } - /* Remove this if-else block when the deprecation expires */ - if (PyArray_SIZE(v) == 1) { - /* Numpy 1.25.0, 2023-01-02 */ - if (DEPRECATE( - "Conversion of an array with ndim > 0 to a scalar " - "is deprecated, and will error in future. " - "Ensure you extract a single element from your array " - "before performing this operation. " - "(Deprecated NumPy 1.25.)") < 0) { - return -1; - } - return 0; - } else { - PyErr_SetString(PyExc_TypeError, - "only length-1 arrays can be converted to Python scalars"); - return -1; - } - PyErr_SetString(PyExc_TypeError, "only 0-dimensional arrays can be converted to Python scalars"); return -1; diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 3c6ed420dbfa..4981f2fd0e30 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -241,18 +241,6 @@ def test_both_passed(self, func): func([0., 1.], 0., interpolation="nearest", method="nearest") -class TestScalarConversion(_DeprecationTestCase): - # 2023-01-02, 1.25.0 - def test_float_conversion(self): - self.assert_deprecated(float, args=(np.array([3.14]),)) - - def test_behaviour(self): - b = np.array([[3.14]]) - c = np.zeros(5) - with pytest.warns(DeprecationWarning): - c[0] = b - - class TestPyIntConversion(_DeprecationTestCase): message = r".*stop allowing conversion of out-of-bound.*" diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index c2fe0abb6137..347c92e48c73 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -3886,21 +3886,10 @@ def test__complex__(self): '?', 'O'] for dt in dtypes: a = np.array(7, dtype=dt) - b = np.array([7], dtype=dt) - c = np.array([[[[[7]]]]], dtype=dt) - msg = f'dtype: {dt}' ap = complex(a) assert_equal(ap, a, msg) - with pytest.warns(DeprecationWarning): - bp = complex(b) - assert_equal(bp, b, msg) - - with pytest.warns(DeprecationWarning): - cp = complex(c) - assert_equal(cp, c, msg) - def test__complex__should_not_work(self): dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', @@ -3908,7 +3897,11 @@ def test__complex__should_not_work(self): '?', 'O'] for dt in dtypes: a = np.array([1, 2, 3], dtype=dt) + b = np.array([7], dtype=dt) + c = np.array([[[[[7]]]]], dtype=dt) assert_raises(TypeError, complex, a) + assert_raises(TypeError, complex, b) + assert_raises(TypeError, complex, c) dt = np.dtype([('a', 'f8'), ('b', 'i1')]) b = np.array((1.0, 3), dtype=dt) @@ -3921,8 +3914,7 @@ def test__complex__should_not_work(self): assert_raises(TypeError, complex, d) e = np.array(['1+1j'], 'U') - with pytest.warns(DeprecationWarning): - assert_raises(TypeError, complex, e) + assert_raises(TypeError, complex, e) class TestCequenceMethods: def test_array_contains(self): @@ -9244,10 +9236,8 @@ def test_to_int_scalar(self): int_funcs = (int, lambda x: x.__int__()) for int_func in int_funcs: assert_equal(int_func(np.array(0)), 0) - with pytest.warns(DeprecationWarning): - assert_equal(int_func(np.array([1])), 1) - with pytest.warns(DeprecationWarning): - assert_equal(int_func(np.array([[42]])), 42) + assert_raises(TypeError, int_func, np.array([1])) + assert_raises(TypeError, int_func, np.array([[42]])) assert_raises(TypeError, int_func, np.array([1, 2])) # gh-9972 @@ -9260,9 +9250,24 @@ def __int__(self): raise NotImplementedError assert_raises(NotImplementedError, int_func, np.array(NotConvertible())) - with pytest.warns(DeprecationWarning): - assert_raises(NotImplementedError, - int_func, np.array([NotConvertible()])) + assert_raises(TypeError, + int_func, np.array([NotConvertible()])) + + def test_to_float_scalar(self): + float_funcs = (float, lambda x: x.__float__()) + for float_func in float_funcs: + assert_equal(float_func(np.array(0)), 0.0) + assert_equal(float_func(np.array(1.0, np.float64)), 1.0) + assert_raises(TypeError, float_func, np.array([2])) + assert_raises(TypeError, float_func, np.array([3.14])) + assert_raises(TypeError, float_func, np.array([[4.0]])) + + assert_equal(5.0, float_func(np.array('5'))) + assert_equal(5.1, float_func(np.array('5.1'))) + assert_equal(6.0, float_func(np.bytes_(b'6'))) + assert_equal(6.1, float_func(np.bytes_(b'6.1'))) + assert_equal(7.0, float_func(np.str_('7'))) + assert_equal(7.1, float_func(np.str_('7.1'))) class TestWhere: From f1b4145e7283bb724b509736f28f9c42733b2ed3 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 1 Oct 2025 22:12:35 +0300 Subject: [PATCH 0517/1018] CI: Fix loongarch64 CI (#29856) * CI: Fix loongarch64 CI by moving to the latest tag of tonistiigi/binfmt which come with qemu-10 --- .github/workflows/linux_qemu.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index e0767d81213e..c11eca8dc5ab 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -205,7 +205,7 @@ jobs: - name: Initialize binfmt_misc for qemu-user-static run: | - docker run --rm --privileged loongcr.lcpu.dev/multiarch/archlinux --reset -p yes + docker run --rm --privileged tonistiigi/binfmt:qemu-v10.0.4-56 --install all - name: Install GCC cross-compilers run: | From 17d5deda759e3a3ce79d88966ce69fdb323f623b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Oct 2025 13:24:33 -0600 Subject: [PATCH 0518/1018] MAINT: Bump ossf/scorecard-action from 2.4.2 to 2.4.3 (#29854) Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.4.2 to 2.4.3. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/05b42c624433fc40578a4040d5cf5e36ddca8cde...4eaacf0543bb3f2c246792bd56e8cdeffafb205a) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-version: 2.4.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecards.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 3fb782a66a8c..1855c3c2d54b 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -30,7 +30,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 + uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3 with: results_file: results.sarif results_format: sarif From ffe55df43f77e4fee273957fa936f455a70c2d2a Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 2 Oct 2025 10:26:19 +0100 Subject: [PATCH 0519/1018] TYP: Remove ``None`` from definition of ``DTypeLike`` type alias (#29739) Co-authored-by: Joren Hammudoglu --- doc/release/upcoming_changes/29739.change.rst | 15 ++++++ numpy/__init__.pyi | 18 +++---- numpy/_core/_asarray.pyi | 4 +- numpy/_core/fromnumeric.pyi | 40 ++++++++-------- numpy/_core/multiarray.pyi | 10 ++-- numpy/_core/numeric.pyi | 10 ++-- numpy/_core/numerictypes.pyi | 2 +- numpy/_core/records.pyi | 30 ++++++------ numpy/_core/shape_base.pyi | 6 +-- numpy/_typing/_dtype_like.py | 2 +- numpy/_typing/_ufunc.pyi | 48 +++++++++---------- numpy/ctypeslib/_ctypeslib.pyi | 4 +- numpy/lib/_function_base_impl.pyi | 4 +- numpy/lib/_npyio_impl.pyi | 6 +-- numpy/lib/_twodim_base_impl.pyi | 4 +- numpy/matlib.pyi | 2 +- numpy/matrixlib/defmatrix.pyi | 2 +- numpy/random/_generator.pyi | 4 +- numpy/typing/tests/data/pass/arithmetic.py | 2 +- 19 files changed, 114 insertions(+), 99 deletions(-) create mode 100644 doc/release/upcoming_changes/29739.change.rst diff --git a/doc/release/upcoming_changes/29739.change.rst b/doc/release/upcoming_changes/29739.change.rst new file mode 100644 index 000000000000..5d1316a1ba41 --- /dev/null +++ b/doc/release/upcoming_changes/29739.change.rst @@ -0,0 +1,15 @@ +``numpy.typing.DTypeLike`` no longer accepts ``None`` +----------------------------------------------------- +The type alias ``numpy.typing.DTypeLike`` no longer accepts ``None``. Instead of + +.. code-block:: python + + dtype: DTypeLike = None + +it should now be + +.. code-block:: python + + dtype: DTypeLike | None = None + +instead. diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 884e0bcbd671..d29bb196a070 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1553,10 +1553,10 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __rmul__(self, value: SupportsIndex, /) -> dtype: ... - def __gt__(self, other: DTypeLike, /) -> builtins.bool: ... - def __ge__(self, other: DTypeLike, /) -> builtins.bool: ... - def __lt__(self, other: DTypeLike, /) -> builtins.bool: ... - def __le__(self, other: DTypeLike, /) -> builtins.bool: ... + def __gt__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __ge__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __lt__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __le__(self, other: DTypeLike | None, /) -> builtins.bool: ... # Explicitly defined `__eq__` and `__ne__` to get around mypy's # `strict_equality` option; even though their signatures are @@ -2054,7 +2054,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __new__( cls, shape: _ShapeLike, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., buffer: _SupportsBuffer | None = ..., offset: SupportsIndex = ..., strides: _ShapeLike | None = ..., @@ -2558,7 +2558,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def astype( self, - dtype: DTypeLike, + dtype: DTypeLike | None, order: _OrderKACF = ..., casting: _CastingKind = ..., subok: builtins.bool = ..., @@ -3618,7 +3618,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @overload def astype( self, - dtype: DTypeLike, + dtype: DTypeLike | None, order: _OrderKACF = ..., casting: _CastingKind = ..., subok: builtins.bool = ..., @@ -5799,7 +5799,7 @@ class nditer: op: ArrayLike | Sequence[ArrayLike | None], flags: Sequence[_NDIterFlagsKind] | None = ..., op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., - op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike | None] | None = ..., order: _OrderKACF = ..., casting: _CastingKind = ..., op_axes: Sequence[Sequence[SupportsIndex]] | None = ..., @@ -6009,7 +6009,7 @@ class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): def __new__( subtype, # pyright: ignore[reportSelfClsParameterName] data: ArrayLike, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., copy: builtins.bool = ..., ) -> matrix[_2D, Incomplete]: ... def __array_finalize__(self, obj: object) -> None: ... diff --git a/numpy/_core/_asarray.pyi b/numpy/_core/_asarray.pyi index da5884d49aba..349933323b95 100644 --- a/numpy/_core/_asarray.pyi +++ b/numpy/_core/_asarray.pyi @@ -26,7 +26,7 @@ def require( @overload def require( a: object, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, requirements: _E | Iterable[_RequirementsWithE] | None = None, *, like: _SupportsArrayFunc | None = None @@ -34,7 +34,7 @@ def require( @overload def require( a: object, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, requirements: _Requirements | Iterable[_Requirements] | None = None, *, like: _SupportsArrayFunc | None = None diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 05816088f041..dc8a040e30e8 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -792,7 +792,7 @@ def sum( def sum( a: ArrayLike, axis: _ShapeLike | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., @@ -802,7 +802,7 @@ def sum( def sum( a: ArrayLike, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., @@ -812,7 +812,7 @@ def sum( def sum( a: ArrayLike, axis: _ShapeLike | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ..., @@ -930,21 +930,21 @@ def cumsum( def cumsum( a: ArrayLike, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, ) -> NDArray[Any]: ... @overload def cumsum( a: ArrayLike, axis: SupportsIndex | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload def cumsum( a: ArrayLike, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -985,7 +985,7 @@ def cumulative_sum( /, *, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, include_initial: bool = False, ) -> NDArray[Any]: ... @@ -995,7 +995,7 @@ def cumulative_sum( /, *, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: _ArrayT, include_initial: bool = False, ) -> _ArrayT: ... @@ -1288,21 +1288,21 @@ def cumprod( def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, ) -> NDArray[Any]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -1384,7 +1384,7 @@ def cumulative_prod( /, *, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, include_initial: bool = False, ) -> NDArray[Any]: ... @@ -1394,7 +1394,7 @@ def cumulative_prod( /, *, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: _ArrayT, include_initial: bool = False, ) -> _ArrayT: ... @@ -1489,7 +1489,7 @@ def mean( def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ..., *, @@ -1622,7 +1622,7 @@ def std( def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, keepdims: bool | _NoValueType = ..., @@ -1635,7 +1635,7 @@ def std( def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., @@ -1648,7 +1648,7 @@ def std( def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0, @@ -1714,7 +1714,7 @@ def var( def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, keepdims: bool | _NoValueType = ..., @@ -1727,7 +1727,7 @@ def var( def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., @@ -1740,7 +1740,7 @@ def var( def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0, diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 5a3fa46bee20..8ab52b6fe74d 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -559,7 +559,7 @@ def concatenate( axis: SupportsIndex | None = ..., *, out: _ArrayT, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., casting: _CastingKind | None = ... ) -> _ArrayT: ... @overload @@ -569,7 +569,7 @@ def concatenate( axis: SupportsIndex | None, out: _ArrayT, *, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., casting: _CastingKind | None = ... ) -> _ArrayT: ... @@ -605,7 +605,7 @@ def can_cast( def min_scalar_type(a: ArrayLike, /) -> dtype: ... -def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike) -> dtype: ... +def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... @overload def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ... @@ -941,7 +941,7 @@ def fromiter( @overload def fromiter( iter: Iterable[Any], - dtype: DTypeLike, + dtype: DTypeLike | None, count: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., @@ -1328,7 +1328,7 @@ def nested_iters( axes: Sequence[Sequence[SupportsIndex]], flags: Sequence[_NDIterFlagsKind] | None = ..., op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., - op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike | None] | None = ..., order: _OrderKACF = ..., casting: _CastingKind = ..., buffersize: SupportsIndex = ..., diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index d08eb540743d..f9ce1ce3a4a3 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1221,19 +1221,19 @@ def indices( @overload def indices( dimensions: Sequence[int], - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., sparse: L[False] = False, ) -> NDArray[Any]: ... @overload def indices( dimensions: Sequence[int], - dtype: DTypeLike, + dtype: DTypeLike | None, sparse: L[True], ) -> tuple[NDArray[Any], ...]: ... @overload def indices( dimensions: Sequence[int], - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, sparse: L[True], ) -> tuple[NDArray[Any], ...]: ... @@ -1242,7 +1242,7 @@ def fromfunction( function: Callable[..., _T], shape: Sequence[int], *, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., like: _SupportsArrayFunc | None = None, **kwargs: Any, ) -> _T: ... @@ -1320,7 +1320,7 @@ def astype( @overload def astype( x: ndarray[_ShapeT, dtype], - dtype: DTypeLike, + dtype: DTypeLike | None, /, *, copy: py_bool = True, diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 0a6ac988163d..beec14079b48 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -155,7 +155,7 @@ class _TypeCodes(TypedDict): All: L["?bhilqnpBHILQNPefdgFDGSUVOMm"] def isdtype(dtype: dtype | type, kind: DTypeLike | tuple[DTypeLike, ...]) -> py_bool: ... -def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> py_bool: ... +def issubdtype(arg1: DTypeLike | None, arg2: DTypeLike | None) -> py_bool: ... typecodes: Final[_TypeCodes] = ... ScalarType: Final[ diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index ead165918478..200d5310f955 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -77,7 +77,7 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = 0, strides: _ShapeLike | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, byteorder: _ByteOrder | None = None, @@ -88,7 +88,7 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): def __new__( subtype, shape: _ShapeLike, - dtype: DTypeLike, + dtype: DTypeLike | None, buf: _SupportsBuffer | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, @@ -115,7 +115,7 @@ class format_parser: def __init__( self, /, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None, titles: str | Sequence[str] | None, aligned: bool = False, @@ -140,7 +140,7 @@ def fromarrays( dtype: None = None, shape: _ShapeLike | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -164,7 +164,7 @@ def fromrecords( dtype: None = None, shape: _ShapeLike | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -175,7 +175,7 @@ def fromrecords( @overload def fromstring( datastring: _SupportsBuffer, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, formats: None = None, @@ -191,7 +191,7 @@ def fromstring( shape: _ShapeLike | None = None, offset: int = 0, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -202,7 +202,7 @@ def fromstring( @overload def fromfile( fd: StrOrBytesPath | _SupportsReadInto, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, formats: None = None, @@ -218,7 +218,7 @@ def fromfile( shape: _ShapeLike | None = None, offset: int = 0, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -243,7 +243,7 @@ def array( @overload def array( obj: ArrayLike, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, strides: tuple[int, ...] | None = None, @@ -262,7 +262,7 @@ def array( offset: int = 0, strides: tuple[int, ...] | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -272,7 +272,7 @@ def array( @overload def array( obj: None, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike, offset: int = 0, strides: tuple[int, ...] | None = None, @@ -291,7 +291,7 @@ def array( shape: _ShapeLike, offset: int = 0, strides: tuple[int, ...] | None = None, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -301,7 +301,7 @@ def array( @overload def array( obj: _SupportsReadInto, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, strides: tuple[int, ...] | None = None, @@ -320,7 +320,7 @@ def array( offset: int = 0, strides: tuple[int, ...] | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index 9a5f8c3b9d60..b4e8b473ed71 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -82,7 +82,7 @@ def vstack( def vstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @@ -104,7 +104,7 @@ def hstack( def hstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @@ -132,7 +132,7 @@ def stack( axis: SupportsIndex = 0, out: None = None, *, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index 526fb86dd322..b44dd06e0d7f 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -97,7 +97,7 @@ def dtype(self) -> _DTypeT_co: ... # Anything that can be coerced into numpy.dtype. # Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html -DTypeLike: TypeAlias = _DTypeLike[Any] | _VoidDTypeLike | str | None +DTypeLike: TypeAlias = _DTypeLike[Any] | _VoidDTypeLike | str # NOTE: while it is possible to provide the dtype as a dict of # dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 790149d9c7fb..3f58801004d1 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -110,7 +110,7 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., ) -> Any: ... @@ -124,7 +124,7 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., ) -> NDArray[Any]: ... @@ -138,7 +138,7 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., ) -> Any: ... @@ -242,7 +242,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i self, array: ArrayLike, axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: NDArray[Any] | EllipsisType | None = ..., keepdims: bool = ..., initial: Any = ..., @@ -253,7 +253,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i self, array: ArrayLike, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: NDArray[Any] | EllipsisType | None = ..., ) -> NDArray[Any]: ... @@ -262,7 +262,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i array: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: NDArray[Any] | EllipsisType | None = ..., ) -> NDArray[Any]: ... @@ -353,7 +353,7 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., ) -> _2Tuple[Any]: ... @@ -369,7 +369,7 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., ) -> _2Tuple[NDArray[Any]]: ... @@ -385,7 +385,7 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., ) -> _2Tuple[Any]: ... @@ -428,7 +428,7 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _4Tuple[str | None] = ..., ) -> _2Tuple[Any]: ... @@ -445,7 +445,7 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _4Tuple[str | None] = ..., ) -> _2Tuple[NDArray[Any]]: ... @@ -486,7 +486,7 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] *, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., @@ -501,7 +501,7 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] *, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., @@ -672,7 +672,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, keepdims: bool = ..., initial: _ScalarLike_co = ..., @@ -684,7 +684,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT | tuple[_ArrayT], keepdims: bool = ..., @@ -697,7 +697,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: EllipsisType | None = ..., *, keepdims: Literal[True], @@ -710,7 +710,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: EllipsisType | None = ..., keepdims: bool = ..., initial: _ScalarLike_co = ..., @@ -724,7 +724,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload @@ -734,7 +734,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT | tuple[_ArrayT], ) -> _ArrayT: ... @@ -745,7 +745,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: EllipsisType | None = ..., ) -> NDArray[np.object_]: ... @overload @@ -755,7 +755,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: _SupportsArrayUFunc, indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., ) -> Any: ... @@ -765,7 +765,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload @@ -774,7 +774,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT | tuple[_ArrayT], ) -> _ArrayT: ... @@ -784,7 +784,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: EllipsisType | None = ..., ) -> NDArray[np.object_]: ... diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index ebe9dbf91f04..4ab670037e2b 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -111,7 +111,7 @@ def ndpointer( ) -> type[_concrete_ndptr[dtype[_ScalarT]]]: ... @overload def ndpointer( - dtype: DTypeLike, + dtype: DTypeLike | None, ndim: int | None = None, *, shape: _ShapeLike, @@ -126,7 +126,7 @@ def ndpointer( ) -> type[_ndptr[dtype[_ScalarT]]]: ... @overload def ndpointer( - dtype: DTypeLike, + dtype: DTypeLike | None, ndim: int | None = None, shape: None = None, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index bee5da4c58ae..8e58cf342ae0 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -219,7 +219,7 @@ def asarray_chkfinite( @overload def asarray_chkfinite( a: Any, - dtype: DTypeLike, + dtype: DTypeLike | None, order: _OrderKACF = None, ) -> NDArray[Any]: ... @@ -472,7 +472,7 @@ def cov( fweights: ArrayLike | None = None, aweights: ArrayLike | None = None, *, - dtype: DTypeLike, + dtype: DTypeLike | None, ) -> NDArray[Any]: ... # NOTE `bias` and `ddof` are deprecated and ignored diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 18fce91382ab..cdef1003cff5 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -159,7 +159,7 @@ def loadtxt( @overload def loadtxt( fname: _FName, - dtype: DTypeLike, + dtype: DTypeLike | None, comments: str | Sequence[str] | None = "#", delimiter: str | None = None, converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, @@ -197,7 +197,7 @@ def fromregex( def fromregex( file: _FNameRead, regexp: str | bytes | Pattern[Any], - dtype: DTypeLike, + dtype: DTypeLike | None, encoding: str | None = None, ) -> NDArray[Any]: ... @@ -262,7 +262,7 @@ def genfromtxt( @overload def genfromtxt( fname: _FName, - dtype: DTypeLike, + dtype: DTypeLike | None, comments: str = "#", delimiter: str | int | Iterable[int] | None = None, skip_header: int = 0, diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 2c4de938444f..fba688b26206 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -119,7 +119,7 @@ def eye( N: int, M: int | None = None, k: int = 0, - dtype: DTypeLike = ..., # = float + dtype: DTypeLike | None = ..., # = float order: _OrderCF = "C", *, device: L["cpu"] | None = None, @@ -168,7 +168,7 @@ def tri( N: int, M: int | None = None, k: int = 0, - dtype: DTypeLike = ..., # = float + dtype: DTypeLike | None = ..., # = float *, like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index 93bee1975df3..552e518fe9b9 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -559,7 +559,7 @@ def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_ScalarT], order: _Orde @overload def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload -def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike = ..., order: _Order = "C") -> _Matrix[Any]: ... +def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike | None = ..., order: _Order = "C") -> _Matrix[Any]: ... # @overload diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index 05e4c77303c0..462e8b209d2c 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -13,5 +13,5 @@ def bmat( ) -> matrix[tuple[int, int], Any]: ... def asmatrix( - data: ArrayLike, dtype: DTypeLike = None + data: ArrayLike, dtype: DTypeLike | None = None ) -> matrix[tuple[int, int], Any]: ... diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index dc78a76eda70..b090663a104f 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -462,7 +462,7 @@ class Generator: low: int, high: int | None = None, size: None = None, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., endpoint: bool = False, ) -> Any: ... @overload @@ -471,7 +471,7 @@ class Generator: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., endpoint: bool = False, ) -> NDArray[Any]: ... diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index 62c978a2fc51..e347ec096e21 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -29,7 +29,7 @@ class Object: - def __array__(self, dtype: np.typing.DTypeLike = None, + def __array__(self, dtype: np.typing.DTypeLike | None = None, copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: ret = np.empty((), dtype=object) ret[()] = self From aed883c3892376b8277855d86153c3530897595b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Oct 2025 17:06:34 +0000 Subject: [PATCH 0520/1018] MAINT: Bump github/codeql-action from 3.30.5 to 3.30.6 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.5 to 3.30.6. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/3599b3baa15b485a2e49ef411a7a4bb2452e7f93...64d10c13136e1c5bce3e5fbde8d4906eeaafc885) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 9f015242f94b..0b055370119d 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5 + uses: github/codeql-action/init@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5 + uses: github/codeql-action/autobuild@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5 + uses: github/codeql-action/analyze@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 1855c3c2d54b..3ea1074c5a9d 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v2.1.27 + uses: github/codeql-action/upload-sarif@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v2.1.27 with: sarif_file: results.sarif From 9a35bbd0274f48bf9aac2f806110734037717a11 Mon Sep 17 00:00:00 2001 From: Britney Whittington Date: Tue, 30 Sep 2025 20:58:15 -0400 Subject: [PATCH 0521/1018] TST: Remove parametrize data races --- numpy/_core/tests/test_nditer.py | 5 +++-- numpy/_core/tests/test_ufunc.py | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index f0e10333808c..6396a19fe97f 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -2895,7 +2895,7 @@ def _is_buffered(iterator): return True return False -@pytest.mark.parametrize("a", +@pytest.mark.parametrize("arrs", [np.zeros((3,), dtype='f8'), np.zeros((9876, 3 * 5), dtype='f8')[::2, :], np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :], @@ -2904,10 +2904,11 @@ def _is_buffered(iterator): np.zeros((9,), dtype='f8')[::3], np.zeros((9876, 3 * 10), dtype='f8')[::2, ::5], np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]]) -def test_iter_writemasked(a): +def test_iter_writemasked(arrs): # Note, the slicing above is to ensure that nditer cannot combine multiple # axes into one. The repetition is just to make things a bit more # interesting. + a = arrs.copy() shape = a.shape reps = shape[-1] // 3 msk = np.empty(shape, dtype=bool) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 976f9fe2f615..d5f85d4b5c3b 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1768,10 +1768,11 @@ def identityless_reduce_arrs(): a = a[1:, 1:, 1:] yield a - @pytest.mark.parametrize("a", identityless_reduce_arrs()) + @pytest.mark.parametrize("arrs", identityless_reduce_arrs()) @pytest.mark.parametrize("pos", [(1, 0, 0), (0, 1, 0), (0, 0, 1)]) - def test_identityless_reduction(self, a, pos): + def test_identityless_reduction(self, arrs, pos): # np.minimum.reduce is an identityless reduction + a = arrs.copy() a[...] = 1 a[pos] = 0 From f6402e9cf270bf79ad1dc1ba9585c81ff0df3fa3 Mon Sep 17 00:00:00 2001 From: Britney Whittington Date: Thu, 2 Oct 2025 14:26:00 -0400 Subject: [PATCH 0522/1018] TST: Fix roundtrip thread safety --- numpy/lib/tests/test_io.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index b8c8e6cfcac1..b4b0db36ae2a 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -125,8 +125,6 @@ def roundtrip(self, save_func, *args, **kwargs): arr_reloaded = np.load(load_file, **load_kwds) - self.arr = arr - self.arr_reloaded = arr_reloaded finally: if not isinstance(target_file, BytesIO): target_file.close() @@ -135,6 +133,8 @@ def roundtrip(self, save_func, *args, **kwargs): if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): os.remove(target_file.name) + return arr, arr_reloaded + def check_roundtrips(self, a): self.roundtrip(a) self.roundtrip(a, file_on_disk=True) @@ -195,26 +195,26 @@ def test_format_2_0(self): class TestSaveLoad(RoundtripTest): def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.save, *args, **kwargs) - assert_equal(self.arr[0], self.arr_reloaded) - assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) - assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) + arr, arr_reloaded = RoundtripTest.roundtrip(self, np.save, *args, **kwargs) + assert_equal(arr[0], arr_reloaded) + assert_equal(arr[0].dtype, arr_reloaded.dtype) + assert_equal(arr[0].flags.fnc, arr_reloaded.flags.fnc) class TestSavezLoad(RoundtripTest): def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) + arr, arr_reloaded = RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) try: - for n, arr in enumerate(self.arr): - reloaded = self.arr_reloaded['arr_%d' % n] - assert_equal(arr, reloaded) - assert_equal(arr.dtype, reloaded.dtype) - assert_equal(arr.flags.fnc, reloaded.flags.fnc) + for n, a in enumerate(arr): + reloaded = arr_reloaded['arr_%d' % n] + assert_equal(a, reloaded) + assert_equal(a.dtype, reloaded.dtype) + assert_equal(a.flags.fnc, reloaded.flags.fnc) finally: # delete tempfile, must be done here on windows - if self.arr_reloaded.fid: - self.arr_reloaded.fid.close() - os.remove(self.arr_reloaded.fid.name) + if arr_reloaded.fid: + arr_reloaded.fid.close() + os.remove(arr_reloaded.fid.name) def test_load_non_npy(self): """Test loading non-.npy files and name mapping in .npz.""" From 420967e2b7db85a7849b9fa1822b526a6bf279b0 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Fri, 3 Oct 2025 12:02:47 +0100 Subject: [PATCH 0523/1018] MAINT: Add Cython linter to spin (#29861) Add Cython linter and fix e.g. unused variables. --- environment.yml | 1 + numpy/_core/tests/examples/cython/checks.pyx | 4 ++-- numpy/random/_common.pyx | 10 +++------- .../cython/extending_distributions.pyx | 1 - numpy/random/_generator.pyx | 11 ++--------- numpy/random/_philox.pyx | 2 -- numpy/random/bit_generator.pyx | 1 - numpy/random/mtrand.pyx | 7 +------ requirements/linter_requirements.txt | 1 + tools/linter.py | 19 +++++++++++++++++++ 10 files changed, 29 insertions(+), 28 deletions(-) diff --git a/environment.yml b/environment.yml index 6eeddb2311ea..3bf6fcbc4319 100644 --- a/environment.yml +++ b/environment.yml @@ -45,6 +45,7 @@ dependencies: # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting + - cython-lint - ruff=0.12.0 - gitpython # Used in some tests diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index 57df05c1e3b5..f0f427d2167f 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -244,9 +244,9 @@ def npyiter_has_multi_index(it: "nditer"): def test_get_multi_index_iter_next(it: "nditer", cnp.ndarray[cnp.float64_t, ndim=2] arr): cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) - cdef cnp.NpyIter_GetMultiIndexFunc get_multi_index = \ + cdef cnp.NpyIter_GetMultiIndexFunc _get_multi_index = \ cnp.NpyIter_GetGetMultiIndex(cit, NULL) - cdef cnp.NpyIter_IterNextFunc iternext = \ + cdef cnp.NpyIter_IterNextFunc _iternext = \ cnp.NpyIter_GetIterNext(cit, NULL) return 1 diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx index f8420b3951cc..1fc2f7a02e11 100644 --- a/numpy/random/_common.pyx +++ b/numpy/random/_common.pyx @@ -240,7 +240,6 @@ cdef np.ndarray int_to_array(object value, object name, object bits, object uint cdef validate_output_shape(iter_shape, np.ndarray output): cdef np.npy_intp *dims cdef np.npy_intp ndim, i - cdef bint error dims = np.PyArray_DIMS(output) ndim = np.PyArray_NDIM(output) output_shape = tuple((dims[i] for i in range(ndim))) @@ -296,7 +295,7 @@ cdef object double_fill(void *func, bitgen_t *state, object size, object lock, o cdef double out_val cdef double *out_array_data cdef np.ndarray out_array - cdef np.npy_intp i, n + cdef np.npy_intp n if size is None and out is None: with lock: @@ -320,7 +319,7 @@ cdef object float_fill(void *func, bitgen_t *state, object size, object lock, ob cdef float out_val cdef float *out_array_data cdef np.ndarray out_array - cdef np.npy_intp i, n + cdef np.npy_intp n if size is None and out is None: with lock: @@ -427,7 +426,6 @@ cdef int check_array_constraint(np.ndarray val, object name, constraint_type con cdef int check_constraint(double val, object name, constraint_type cons) except -1: - cdef bint is_nan if cons == CONS_NON_NEGATIVE: if not isnan(val) and signbit(val): raise ValueError(f"{name} < 0") @@ -760,7 +758,6 @@ cdef object discrete_broadcast_di(void *func, void *state, object size, object l np.ndarray a_arr, object a_name, constraint_type a_constraint, np.ndarray b_arr, object b_name, constraint_type b_constraint): cdef np.ndarray randoms - cdef int64_t *randoms_data cdef np.broadcast it cdef random_uint_di f = (func) cdef np.npy_intp i, n @@ -777,7 +774,6 @@ cdef object discrete_broadcast_di(void *func, void *state, object size, object l it = np.PyArray_MultiIterNew2(a_arr, b_arr) randoms = np.empty(it.shape, np.int64) - randoms_data = np.PyArray_DATA(randoms) n = np.PyArray_SIZE(randoms) it = np.PyArray_MultiIterNew3(randoms, a_arr, b_arr) @@ -1047,7 +1043,7 @@ cdef object cont_f(void *func, bitgen_t *state, object size, object lock, object a, object a_name, constraint_type a_constraint, object out): - cdef np.ndarray a_arr, b_arr, c_arr + cdef np.ndarray a_arr cdef float _a cdef bint is_scalar = True cdef int requirements = np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_FORCECAST diff --git a/numpy/random/_examples/cython/extending_distributions.pyx b/numpy/random/_examples/cython/extending_distributions.pyx index e1d1ea6c820b..8de722686304 100644 --- a/numpy/random/_examples/cython/extending_distributions.pyx +++ b/numpy/random/_examples/cython/extending_distributions.pyx @@ -92,7 +92,6 @@ def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64): Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The default dtype value is 'd' """ - cdef Py_ssize_t i cdef bitgen_t *rng cdef const char *capsule_name = "BitGenerator" cdef np.ndarray randoms diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 0fabf98147c3..d9c2730de8ca 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -16,13 +16,11 @@ from numpy.lib.array_utils import normalize_axis_index from .c_distributions cimport * from libc cimport string from libc.math cimport sqrt -from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, - int32_t, int64_t, INT64_MAX, SIZE_MAX) +from libc.stdint cimport (uint64_t, int64_t, INT64_MAX, SIZE_MAX) from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64, _rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16, _rand_uint8, _gen_mask) from ._pcg64 import PCG64 -from ._mt19937 import MT19937 from numpy.random cimport bitgen_t from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE, CONS_NON_NEGATIVE, CONS_BOUNDED_0_1, CONS_BOUNDED_GT_0_1, @@ -352,7 +350,6 @@ cdef class Generator: [-1.23204345, -1.75224494]]) """ - cdef double temp _dtype = np.dtype(dtype) if _dtype == np.float64: return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, out) @@ -845,7 +842,7 @@ cdef class Generator: """ - cdef int64_t val, t, loc, size_i, pop_size_i + cdef int64_t val, loc, size_i, pop_size_i cdef int64_t *idx_data cdef np.npy_intp j cdef uint64_t set_size, mask @@ -1080,7 +1077,6 @@ cdef class Generator: >>> plt.show() """ - cdef bint is_scalar = True cdef np.ndarray alow, ahigh, arange cdef double _low, _high, rng cdef object temp @@ -1370,7 +1366,6 @@ cdef class Generator: >>> plt.show() """ - cdef void *func _dtype = np.dtype(dtype) if _dtype == np.float64: return cont(&random_standard_gamma, &self._bitgen, size, self.lock, 1, @@ -2932,7 +2927,6 @@ cdef class Generator: >>> plt.show() """ - cdef bint is_scalar = True cdef double fleft, fmode, fright cdef np.ndarray oleft, omode, oright @@ -3595,7 +3589,6 @@ cdef class Generator: """ cdef double HYPERGEOM_MAX = 10**9 - cdef bint is_scalar = True cdef np.ndarray ongood, onbad, onsample cdef int64_t lngood, lnbad, lnsample diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index 5faa281818fd..3f33c7078f83 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -1,7 +1,5 @@ #cython: binding=True -from cpython.pycapsule cimport PyCapsule_New - import numpy as np cimport numpy as np diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index fbedb0fd5786..d9a733c7a618 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -34,7 +34,6 @@ SOFTWARE. """ import abc -import sys from itertools import cycle import re from secrets import randbits diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 719d1d860d3c..2bf6441bb368 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -380,7 +380,6 @@ cdef class RandomState: if len(state) > 3: st['has_gauss'] = state[3] st['gauss'] = state[4] - value = st self._aug_state.gauss = st.get('gauss', 0.0) self._aug_state.has_gauss = st.get('has_gauss', 0) @@ -437,7 +436,6 @@ cdef class RandomState: [-1.23204345, -1.75224494]]) """ - cdef double temp return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, None) def random(self, size=None): @@ -1159,7 +1157,6 @@ cdef class RandomState: >>> plt.show() """ - cdef bint is_scalar = True cdef np.ndarray alow, ahigh, arange cdef double _low, _high, range cdef object temp @@ -3325,7 +3322,6 @@ cdef class RandomState: >>> plt.show() """ - cdef bint is_scalar = True cdef double fleft, fmode, fright cdef np.ndarray oleft, omode, oright @@ -3947,7 +3943,6 @@ cdef class RandomState: # answer = 0.003 ... pretty unlikely! """ - cdef bint is_scalar = True cdef np.ndarray ongood, onbad, onsample cdef int64_t lngood, lnbad, lnsample @@ -4248,7 +4243,7 @@ cdef class RandomState: # GH10839, ensure double to make tol meaningful cov = cov.astype(np.double) - (u, s, v) = svd(cov) + (_u, s, v) = svd(cov) if check_valid != 'ignore': if check_valid != 'warn' and check_valid != 'raise': diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 05319a9bdb8a..a28b989fbb03 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,3 +1,4 @@ # keep in sync with `environment.yml` +cython-lint ruff==0.12.0 GitPython>=3.1.30 diff --git a/tools/linter.py b/tools/linter.py index f614be100159..4e9aed85054a 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -31,6 +31,18 @@ def run_ruff(self, fix: bool) -> tuple[int, str]: ) return res.returncode, res.stdout + def run_cython_lint(self) -> tuple[int, str]: + print("Running cython-lint...") + command = ["cython-lint", "--no-pycodestyle", "numpy"] + + res = subprocess.run( + command, + stdout=subprocess.PIPE, + cwd=self.repository_root, + encoding="utf-8", + ) + return res.returncode, res.stdout + def run_lint(self, fix: bool) -> None: # Ruff Linter @@ -44,6 +56,13 @@ def run_lint(self, fix: bool) -> None: retcode, c_API_errors = self.run_check_c_api() c_API_errors and print(c_API_errors) + if retcode: + sys.exit(retcode) + + # Cython Linter + retcode, cython_errors = self.run_cython_lint() + cython_errors and print(cython_errors) + sys.exit(retcode) def run_check_c_api(self) -> tuple[int, str]: From 538654066cb0f44c8b816c80420f76dcd56f3d9c Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 3 Oct 2025 13:03:27 +0200 Subject: [PATCH 0524/1018] ENH: Improve performance of numpy scalar __copy__ and __deepcopy__ (#29656) The numpy scalars are immutable (expect for subclasses of np.void), so copy or deepcopy can return the same object. This improves performance, since the current implementation converts to array, copies and converts back to scalar. --- numpy/_core/src/multiarray/scalartypes.c.src | 35 +++++++++++++++++--- numpy/_core/tests/test_multiarray.py | 14 ++++++++ 2 files changed, 45 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index f84c120ad409..ad49de0c231e 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -2093,12 +2093,11 @@ gentype_wraparray(PyObject *NPY_UNUSED(scalar), PyObject *args) /* * These gentype_* functions do not take keyword arguments. - * The proper flag is METH_VARARGS. + * The proper flag is METH_VARARGS or METH_NOARGS. */ /**begin repeat * - * #name = tolist, item, __deepcopy__, __copy__, - * swapaxes, conj, conjugate, nonzero, + * #name = tolist, item, swapaxes, conj, conjugate, nonzero, * fill, transpose# */ static PyObject * @@ -2108,6 +2107,34 @@ gentype_@name@(PyObject *self, PyObject *args) } /**end repeat**/ +static PyObject * +gentype___copy__(PyObject *self) +{ + // scalars are immutable, so we can return a new reference + // the only expections are scalars with void dtype + if (PyObject_IsInstance(self, (PyObject *)&PyVoidArrType_Type)) { + // path via array + return gentype_generic_method(self, NULL, NULL, "__copy__"); + } + return Py_NewRef(self); +} + +static PyObject * +gentype___deepcopy__(PyObject *self, PyObject *args) +{ + // note: maybe the signature needs to be updated as __deepcopy__ can accept the keyword memo + + // scalars are immutable, so we can return a new reference + // the only expections are scalars with void dtype + // if the number of arguments is not 1, we let gentype_generic_method do the + // error handling + if (PyObject_IsInstance(self, (PyObject *)&PyVoidArrType_Type) || (PyTuple_Size(args)!=1)) { + // path via array + return gentype_generic_method(self, args, NULL, "__deepcopy__"); + } + return Py_NewRef(self); +} + static PyObject * gentype_byteswap(PyObject *self, PyObject *args, PyObject *kwds) { @@ -2652,7 +2679,7 @@ static PyMethodDef gentype_methods[] = { /* for the copy module */ {"__copy__", (PyCFunction)gentype___copy__, - METH_VARARGS, NULL}, + METH_NOARGS, NULL}, {"__deepcopy__", (PyCFunction)gentype___deepcopy__, METH_VARARGS, NULL}, diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 347c92e48c73..bf339c07f068 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -2476,6 +2476,20 @@ def test__deepcopy__(self, dtype): with pytest.raises(AssertionError): assert_array_equal(a, b) + def test__deepcopy___void_scalar(self): + # see comments in gh-29643 + value = np.void('Rex', dtype=[('name', 'U10')]) + value_deepcopy = value.__deepcopy__(None) + value[0] = None + assert value_deepcopy[0] == 'Rex' + + @pytest.mark.parametrize("sctype", [np.int64, np.float32, np.float64]) + def test__deepcopy__scalar(self, sctype): + # test optimization from gh-29656 + value = sctype(1.1) + value_deepcopy = value.__deepcopy__(None) + assert value is value_deepcopy + def test__deepcopy__catches_failure(self): class MyObj: def __deepcopy__(self, *args, **kwargs): From 70e5ac69e1a85389f67e85ff2981c441629f4b8d Mon Sep 17 00:00:00 2001 From: Alverok Date: Fri, 3 Oct 2025 14:54:27 +0530 Subject: [PATCH 0525/1018] DOC: Add warning and examples for sliding_window_view [skip azp][skip actions][skip cirrus] --- numpy/lib/_stride_tricks_impl.py | 33 ++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index d4780783a638..f6f3b8370a72 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -173,6 +173,14 @@ def sliding_window_view(x, window_shape, axis=None, *, Notes ----- + .. warning:: + + This function creates views with overlapping memory. When + ``writeable=True``, writing to the view will modify the original array + and may affect multiple view positions. See the examples below and + :doc:`this guide ` + about the difference between copies and views. + For many applications using a sliding window view can be convenient, but potentially very slow. Often specialized solutions exist, for example: @@ -297,6 +305,31 @@ def sliding_window_view(x, window_shape, axis=None, *, >>> moving_average array([1., 2., 3., 4.]) + The two examples below demonstrate the effect of ``writeable=True``. + + Creating a view with the default ``writeable=False`` and then writing to + it raises an error. + + >>> v = sliding_window_view(x, 3) + >>> v[0,1] = 10 + Traceback (most recent call last): + ... + ValueError: assignment destination is read-only + + Creating a view with ``writeable=True`` and then writing to it changes + the original array and multiple view positions. + + >>> x = np.arange(6) # reset x for the second example + >>> v = sliding_window_view(x, 3, writeable=True) + >>> v[0,1] = 10 + >>> x + array([ 0, 10, 2, 3, 4, 5]) + >>> v + array([[ 0, 10, 2], + [10, 2, 3], + [ 2, 3, 4], + [ 3, 4, 5]]) + Note that a sliding window approach is often **not** optimal (see Notes). """ window_shape = (tuple(window_shape) From f001a708ef4fade19b85ec59c6fc5f3934e90d6b Mon Sep 17 00:00:00 2001 From: Swayam Date: Sun, 5 Oct 2025 18:56:44 +0530 Subject: [PATCH 0526/1018] ENH, FEAT: Reorganize finfo and add new constant slot (#29836) Introduce `NPY_DT_get_constant` as a slot and a corresponding function that fills in a single element pointer for an arbitrary (corresponding) dtype/descriptor. Since we do want some integer values for `finfo`, some values are set to always fill in an `npy_intp` value instead. The slot assumes that it will be used with the GIL held and on uninitialized data, since I suspect that this is the typical use-case (i.e. it is unlikely that we need to fetch a constant deep during a calculation, especially in a context where the correct type/value isn't known anyway). This completely re-organizes and simplifies `finfo` by moving all definitions to C. Co-authored-by: Sebastian Berg --- numpy/__init__.py | 3 - numpy/_core/__init__.py | 10 +- numpy/_core/_machar.py | 355 ------------- numpy/_core/_machar.pyi | 55 -- numpy/_core/getlimits.py | 476 ++++-------------- numpy/_core/include/numpy/dtype_api.h | 39 +- numpy/_core/meson.build | 2 - numpy/_core/src/multiarray/arraytypes.c.src | 203 +++++++- numpy/_core/src/multiarray/dtypemeta.c | 41 +- numpy/_core/src/multiarray/dtypemeta.h | 11 +- numpy/_core/src/multiarray/multiarraymodule.c | 105 ++++ numpy/_core/src/multiarray/usertypes.c | 5 +- numpy/_core/tests/test_deprecations.py | 8 - numpy/_core/tests/test_finfo.py | 83 +++ numpy/_core/tests/test_getlimits.py | 54 +- numpy/_core/tests/test_longdouble.py | 3 +- numpy/_core/tests/test_machar.py | 30 -- numpy/_core/tests/test_numeric.py | 4 +- .../typing/tests/data/reveal/arrayterator.pyi | 2 +- 19 files changed, 573 insertions(+), 916 deletions(-) delete mode 100644 numpy/_core/_machar.py delete mode 100644 numpy/_core/_machar.pyi create mode 100644 numpy/_core/tests/test_finfo.py delete mode 100644 numpy/_core/tests/test_machar.py diff --git a/numpy/__init__.py b/numpy/__init__.py index d6702bba4622..7bfc12119694 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -677,9 +677,6 @@ from ._array_api_info import __array_namespace_info__ - # now that numpy core module is imported, can initialize limits - _core.getlimits._register_known_types() - __all__ = list( __numpy_submodules__ | set(_core.__all__) | diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index b0be8d1cbab6..7b53022c3bc3 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -106,15 +106,7 @@ from .numerictypes import sctypeDict, sctypes multiarray.set_typeDict(nt.sctypeDict) -from . import ( - _machar, - einsumfunc, - fromnumeric, - function_base, - getlimits, - numeric, - shape_base, -) +from . import einsumfunc, fromnumeric, function_base, getlimits, numeric, shape_base from .einsumfunc import * from .fromnumeric import * from .function_base import * diff --git a/numpy/_core/_machar.py b/numpy/_core/_machar.py deleted file mode 100644 index b49742a15802..000000000000 --- a/numpy/_core/_machar.py +++ /dev/null @@ -1,355 +0,0 @@ -""" -Machine arithmetic - determine the parameters of the -floating-point arithmetic system - -Author: Pearu Peterson, September 2003 - -""" -__all__ = ['MachAr'] - -from ._ufunc_config import errstate -from .fromnumeric import any - -# Need to speed this up...especially for longdouble - -# Deprecated 2021-10-20, NumPy 1.22 -class MachAr: - """ - Diagnosing machine parameters. - - Attributes - ---------- - ibeta : int - Radix in which numbers are represented. - it : int - Number of base-`ibeta` digits in the floating point mantissa M. - machep : int - Exponent of the smallest (most negative) power of `ibeta` that, - added to 1.0, gives something different from 1.0 - eps : float - Floating-point number ``beta**machep`` (floating point precision) - negep : int - Exponent of the smallest power of `ibeta` that, subtracted - from 1.0, gives something different from 1.0. - epsneg : float - Floating-point number ``beta**negep``. - iexp : int - Number of bits in the exponent (including its sign and bias). - minexp : int - Smallest (most negative) power of `ibeta` consistent with there - being no leading zeros in the mantissa. - xmin : float - Floating-point number ``beta**minexp`` (the smallest [in - magnitude] positive floating point number with full precision). - maxexp : int - Smallest (positive) power of `ibeta` that causes overflow. - xmax : float - ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] - usable floating value). - irnd : int - In ``range(6)``, information on what kind of rounding is done - in addition, and on how underflow is handled. - ngrd : int - Number of 'guard digits' used when truncating the product - of two mantissas to fit the representation. - epsilon : float - Same as `eps`. - tiny : float - An alias for `smallest_normal`, kept for backwards compatibility. - huge : float - Same as `xmax`. - precision : float - ``- int(-log10(eps))`` - resolution : float - ``- 10**(-precision)`` - smallest_normal : float - The smallest positive floating point number with 1 as leading bit in - the mantissa following IEEE-754. Same as `xmin`. - smallest_subnormal : float - The smallest positive floating point number with 0 as leading bit in - the mantissa following IEEE-754. - - Parameters - ---------- - float_conv : function, optional - Function that converts an integer or integer array to a float - or float array. Default is `float`. - int_conv : function, optional - Function that converts a float or float array to an integer or - integer array. Default is `int`. - float_to_float : function, optional - Function that converts a float array to float. Default is `float`. - Note that this does not seem to do anything useful in the current - implementation. - float_to_str : function, optional - Function that converts a single float to a string. Default is - ``lambda v:'%24.16e' %v``. - title : str, optional - Title that is printed in the string representation of `MachAr`. - - See Also - -------- - finfo : Machine limits for floating point types. - iinfo : Machine limits for integer types. - - References - ---------- - .. [1] Press, Teukolsky, Vetterling and Flannery, - "Numerical Recipes in C++," 2nd ed, - Cambridge University Press, 2002, p. 31. - - """ - - def __init__(self, float_conv=float, int_conv=int, - float_to_float=float, - float_to_str=lambda v: f'{v:24.16e}', - title='Python floating point number'): - """ - - float_conv - convert integer to float (array) - int_conv - convert float (array) to integer - float_to_float - convert float array to float - float_to_str - convert array float to str - title - description of used floating point numbers - - """ - # We ignore all errors here because we are purposely triggering - # underflow to detect the properties of the running arch. - with errstate(under='ignore'): - self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) - - def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): - max_iterN = 10000 - msg = "Did not converge after %d tries with %s" - one = float_conv(1) - two = one + one - zero = one - one - - # Do we really need to do this? Aren't they 2 and 2.0? - # Determine ibeta and beta - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - b = one - for _ in range(max_iterN): - b = b + b - temp = a + b - itemp = int_conv(temp - a) - if any(itemp != 0): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - ibeta = itemp - beta = float_conv(ibeta) - - # Determine it and irnd - it = -1 - b = one - for _ in range(max_iterN): - it = it + 1 - b = b * beta - temp = b + one - temp1 = temp - b - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - - betah = beta / two - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - temp = a + betah - irnd = 0 - if any(temp - a != zero): - irnd = 1 - tempa = a + beta - temp = tempa + betah - if irnd == 0 and any(temp - tempa != zero): - irnd = 2 - - # Determine negep and epsneg - negep = it + 3 - betain = one / beta - a = one - for i in range(negep): - a = a * betain - b = a - for _ in range(max_iterN): - temp = one - a - if any(temp - one != zero): - break - a = a * beta - negep = negep - 1 - # Prevent infinite loop on PPC with gcc 4.0: - if negep < 0: - raise RuntimeError("could not determine machine tolerance " - "for 'negep', locals() -> %s" % (locals())) - else: - raise RuntimeError(msg % (_, one.dtype)) - negep = -negep - epsneg = a - - # Determine machep and eps - machep = - it - 3 - a = b - - for _ in range(max_iterN): - temp = one + a - if any(temp - one != zero): - break - a = a * beta - machep = machep + 1 - else: - raise RuntimeError(msg % (_, one.dtype)) - eps = a - - # Determine ngrd - ngrd = 0 - temp = one + eps - if irnd == 0 and any(temp * one - one != zero): - ngrd = 1 - - # Determine iexp - i = 0 - k = 1 - z = betain - t = one + eps - nxres = 0 - for _ in range(max_iterN): - y = z - z = y * y - a = z * one # Check here for underflow - temp = z * t - if any(a + a == zero) or any(abs(z) >= y): - break - temp1 = temp * betain - if any(temp1 * beta == z): - break - i = i + 1 - k = k + k - else: - raise RuntimeError(msg % (_, one.dtype)) - if ibeta != 10: - iexp = i + 1 - mx = k + k - else: - iexp = 2 - iz = ibeta - while k >= iz: - iz = iz * ibeta - iexp = iexp + 1 - mx = iz + iz - 1 - - # Determine minexp and xmin - for _ in range(max_iterN): - xmin = y - y = y * betain - a = y * one - temp = y * t - if any((a + a) != zero) and any(abs(y) < xmin): - k = k + 1 - temp1 = temp * betain - if any(temp1 * beta == y) and any(temp != y): - nxres = 3 - xmin = y - break - else: - break - else: - raise RuntimeError(msg % (_, one.dtype)) - minexp = -k - - # Determine maxexp, xmax - if mx <= k + k - 3 and ibeta != 10: - mx = mx + mx - iexp = iexp + 1 - maxexp = mx + minexp - irnd = irnd + nxres - if irnd >= 2: - maxexp = maxexp - 2 - i = maxexp + minexp - if ibeta == 2 and not i: - maxexp = maxexp - 1 - if i > 20: - maxexp = maxexp - 1 - if any(a != y): - maxexp = maxexp - 2 - xmax = one - epsneg - if any(xmax * one != xmax): - xmax = one - beta * epsneg - xmax = xmax / (xmin * beta * beta * beta) - i = maxexp + minexp + 3 - for j in range(i): - if ibeta == 2: - xmax = xmax + xmax - else: - xmax = xmax * beta - - smallest_subnormal = abs(xmin / beta ** (it)) - - self.ibeta = ibeta - self.it = it - self.negep = negep - self.epsneg = float_to_float(epsneg) - self._str_epsneg = float_to_str(epsneg) - self.machep = machep - self.eps = float_to_float(eps) - self._str_eps = float_to_str(eps) - self.ngrd = ngrd - self.iexp = iexp - self.minexp = minexp - self.xmin = float_to_float(xmin) - self._str_xmin = float_to_str(xmin) - self.maxexp = maxexp - self.xmax = float_to_float(xmax) - self._str_xmax = float_to_str(xmax) - self.irnd = irnd - - self.title = title - # Commonly used parameters - self.epsilon = self.eps - self.tiny = self.xmin - self.huge = self.xmax - self.smallest_normal = self.xmin - self._str_smallest_normal = float_to_str(self.xmin) - self.smallest_subnormal = float_to_float(smallest_subnormal) - self._str_smallest_subnormal = float_to_str(smallest_subnormal) - - import math - self.precision = int(-math.log10(float_to_float(self.eps))) - ten = two + two + two + two + two - resolution = ten ** (-self.precision) - self.resolution = float_to_float(resolution) - self._str_resolution = float_to_str(resolution) - - def __str__(self): - fmt = ( - 'Machine parameters for %(title)s\n' - '---------------------------------------------------------------------\n' - 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n' - 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n' - 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n' - 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n' - 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n' - 'smallest_normal=%(smallest_normal)s ' - 'smallest_subnormal=%(smallest_subnormal)s\n' - '---------------------------------------------------------------------\n' - ) - return fmt % self.__dict__ - - -if __name__ == '__main__': - print(MachAr()) diff --git a/numpy/_core/_machar.pyi b/numpy/_core/_machar.pyi deleted file mode 100644 index 02637a17b6a8..000000000000 --- a/numpy/_core/_machar.pyi +++ /dev/null @@ -1,55 +0,0 @@ -from collections.abc import Iterable -from typing import Any, Final, TypeVar, overload - -import numpy as np -from numpy import _CastingKind -from numpy._utils import set_module as set_module - -### - -_T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]]) -_ExceptionT = TypeVar("_ExceptionT", bound=Exception) - -### - -class UFuncTypeError(TypeError): - ufunc: Final[np.ufunc] - def __init__(self, /, ufunc: np.ufunc) -> None: ... - -class _UFuncNoLoopError(UFuncTypeError): - dtypes: tuple[np.dtype, ...] - def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... - -class _UFuncBinaryResolutionError(_UFuncNoLoopError): - dtypes: tuple[np.dtype, np.dtype] - def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... - -class _UFuncCastingError(UFuncTypeError): - casting: Final[_CastingKind] - from_: Final[np.dtype] - to: Final[np.dtype] - def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ... - -class _UFuncInputCastingError(_UFuncCastingError): - in_i: Final[int] - def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... - -class _UFuncOutputCastingError(_UFuncCastingError): - out_i: Final[int] - def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... - -class _ArrayMemoryError(MemoryError): - shape: tuple[int, ...] - dtype: np.dtype - def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ... - @property - def _total_size(self) -> int: ... - @staticmethod - def _size_to_string(num_bytes: int) -> str: ... - -@overload -def _unpack_tuple(tup: tuple[_T]) -> _T: ... -@overload -def _unpack_tuple(tup: _TupleT) -> _TupleT: ... -def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index a3d0086974b1..1317c8b1ea0a 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -3,15 +3,15 @@ """ __all__ = ['finfo', 'iinfo'] +import math import types import warnings +from functools import cached_property from numpy._utils import set_module from . import numeric, numerictypes as ntypes -from ._machar import MachAr -from .numeric import array, inf, nan -from .umath import exp2, isnan, log10, nextafter +from ._multiarray_umath import _populate_finfo_constants def _fr0(a): @@ -30,96 +30,6 @@ def _fr1(a): return a -class MachArLike: - """ Object to simulate MachAr instance """ - def __init__(self, ftype, *, eps, epsneg, huge, tiny, - ibeta, smallest_subnormal=None, **kwargs): - self.params = _MACHAR_PARAMS[ftype] - self.ftype = ftype - self.title = self.params['title'] - # Parameter types same as for discovered MachAr object. - if not smallest_subnormal: - self._smallest_subnormal = nextafter( - self.ftype(0), self.ftype(1), dtype=self.ftype) - else: - self._smallest_subnormal = smallest_subnormal - self.epsilon = self.eps = self._float_to_float(eps) - self.epsneg = self._float_to_float(epsneg) - self.xmax = self.huge = self._float_to_float(huge) - self.xmin = self._float_to_float(tiny) - self.smallest_normal = self.tiny = self._float_to_float(tiny) - self.ibeta = self.params['itype'](ibeta) - self.__dict__.update(kwargs) - self.precision = int(-log10(self.eps)) - self.resolution = self._float_to_float( - self._float_conv(10) ** (-self.precision)) - self._str_eps = self._float_to_str(self.eps) - self._str_epsneg = self._float_to_str(self.epsneg) - self._str_xmin = self._float_to_str(self.xmin) - self._str_xmax = self._float_to_str(self.xmax) - self._str_resolution = self._float_to_str(self.resolution) - self._str_smallest_normal = self._float_to_str(self.xmin) - - @property - def smallest_subnormal(self): - """Return the value for the smallest subnormal. - - Returns - ------- - smallest_subnormal : float - value for the smallest subnormal. - - Warns - ----- - UserWarning - If the calculated value for the smallest subnormal is zero. - """ - # Check that the calculated value is not zero, in case it raises a - # warning. - value = self._smallest_subnormal - if self.ftype(0) == value: - warnings.warn( - f'The value of the smallest subnormal for {self.ftype} type is zero.', - UserWarning, stacklevel=2) - - return self._float_to_float(value) - - @property - def _str_smallest_subnormal(self): - """Return the string representation of the smallest subnormal.""" - return self._float_to_str(self.smallest_subnormal) - - def _float_to_float(self, value): - """Converts float to float. - - Parameters - ---------- - value : float - value to be converted. - """ - return _fr1(self._float_conv(value)) - - def _float_conv(self, value): - """Converts float to conv. - - Parameters - ---------- - value : float - value to be converted. - """ - return array([value], self.ftype) - - def _float_to_str(self, value): - """Converts float to str. - - Parameters - ---------- - value : float - value to be converted. - """ - return self.params['fmt'] % array(_fr0(value)[0], self.ftype) - - _convert_to_float = { ntypes.csingle: ntypes.single, ntypes.complex128: ntypes.float64, @@ -146,240 +56,6 @@ def _float_to_str(self, value): 'fmt': '%12.5e', 'title': _title_fmt.format('half')}} -# Key to identify the floating point type. Key is result of -# -# ftype = np.longdouble # or float64, float32, etc. -# v = (ftype(-1.0) / ftype(10.0)) -# v.view(v.dtype.newbyteorder('<')).tobytes() -# -# Uses division to work around deficiencies in strtold on some platforms. -# See: -# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure - -_KNOWN_TYPES = {} -def _register_type(machar, bytepat): - _KNOWN_TYPES[bytepat] = machar - - -_float_ma = {} - - -def _register_known_types(): - # Known parameters for float16 - # See docstring of MachAr class for description of parameters. - f16 = ntypes.float16 - float16_ma = MachArLike(f16, - machep=-10, - negep=-11, - minexp=-14, - maxexp=16, - it=10, - iexp=5, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f16(-10)), - epsneg=exp2(f16(-11)), - huge=f16(65504), - tiny=f16(2 ** -14)) - _register_type(float16_ma, b'f\xae') - _float_ma[16] = float16_ma - - # Known parameters for float32 - f32 = ntypes.float32 - float32_ma = MachArLike(f32, - machep=-23, - negep=-24, - minexp=-126, - maxexp=128, - it=23, - iexp=8, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f32(-23)), - epsneg=exp2(f32(-24)), - huge=f32((1 - 2 ** -24) * 2**128), - tiny=exp2(f32(-126))) - _register_type(float32_ma, b'\xcd\xcc\xcc\xbd') - _float_ma[32] = float32_ma - - # Known parameters for float64 - f64 = ntypes.float64 - epsneg_f64 = 2.0 ** -53.0 - tiny_f64 = 2.0 ** -1022.0 - float64_ma = MachArLike(f64, - machep=-52, - negep=-53, - minexp=-1022, - maxexp=1024, - it=52, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=2.0 ** -52.0, - epsneg=epsneg_f64, - huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4), - tiny=tiny_f64) - _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf') - _float_ma[64] = float64_ma - - # Known parameters for IEEE 754 128-bit binary float - ld = ntypes.longdouble - epsneg_f128 = exp2(ld(-113)) - tiny_f128 = exp2(ld(-16382)) - # Ignore runtime error when this is not f128 - with numeric.errstate(all='ignore'): - huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4) - float128_ma = MachArLike(ld, - machep=-112, - negep=-113, - minexp=-16382, - maxexp=16384, - it=112, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-112)), - epsneg=epsneg_f128, - huge=huge_f128, - tiny=tiny_f128) - # IEEE 754 128-bit binary float - _register_type(float128_ma, - b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') - _float_ma[128] = float128_ma - - # Known parameters for float80 (Intel 80-bit extended precision) - epsneg_f80 = exp2(ld(-64)) - tiny_f80 = exp2(ld(-16382)) - # Ignore runtime error when this is not f80 - with numeric.errstate(all='ignore'): - huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4) - float80_ma = MachArLike(ld, - machep=-63, - negep=-64, - minexp=-16382, - maxexp=16384, - it=63, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-63)), - epsneg=epsneg_f80, - huge=huge_f80, - tiny=tiny_f80) - # float80, first 10 bytes containing actual storage - _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf') - _float_ma[80] = float80_ma - - # Guessed / known parameters for double double; see: - # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic - # These numbers have the same exponent range as float64, but extended - # number of digits in the significand. - huge_dd = nextafter(ld(inf), ld(0), dtype=ld) - # As the smallest_normal in double double is so hard to calculate we set - # it to NaN. - smallest_normal_dd = nan - # Leave the same value for the smallest subnormal as double - smallest_subnormal_dd = ld(nextafter(0., 1.)) - float_dd_ma = MachArLike(ld, - machep=-105, - negep=-106, - minexp=-1022, - maxexp=1024, - it=105, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-105)), - epsneg=exp2(ld(-106)), - huge=huge_dd, - tiny=smallest_normal_dd, - smallest_subnormal=smallest_subnormal_dd) - # double double; low, high order (e.g. PPC 64) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf') - # double double; high, low order (e.g. PPC 64 le) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<') - _float_ma['dd'] = float_dd_ma - - -def _get_machar(ftype): - """ Get MachAr instance or MachAr-like instance - - Get parameters for floating point type, by first trying signatures of - various known floating point types, then, if none match, attempting to - identify parameters by analysis. - - Parameters - ---------- - ftype : class - Numpy floating point type class (e.g. ``np.float64``) - - Returns - ------- - ma_like : instance of :class:`MachAr` or :class:`MachArLike` - Object giving floating point parameters for `ftype`. - - Warns - ----- - UserWarning - If the binary signature of the float type is not in the dictionary of - known float types. - """ - params = _MACHAR_PARAMS.get(ftype) - if params is None: - raise ValueError(repr(ftype)) - # Detect known / suspected types - # ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold - # may be deficient - key = (ftype(-1.0) / ftype(10.)) - key = key.view(key.dtype.newbyteorder("<")).tobytes() - ma_like = None - if ftype == ntypes.longdouble: - # Could be 80 bit == 10 byte extended precision, where last bytes can - # be random garbage. - # Comparing first 10 bytes to pattern first to avoid branching on the - # random garbage. - ma_like = _KNOWN_TYPES.get(key[:10]) - if ma_like is None: - # see if the full key is known. - ma_like = _KNOWN_TYPES.get(key) - if ma_like is None and len(key) == 16: - # machine limits could be f80 masquerading as np.float128, - # find all keys with length 16 and make new dict, but make the keys - # only 10 bytes long, the last bytes can be random garbage - _kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16} - ma_like = _kt.get(key[:10]) - if ma_like is not None: - return ma_like - # Fall back to parameter discovery - warnings.warn( - f'Signature {key} for {ftype} does not match any known type: ' - 'falling back to type probe function.\n' - 'This warnings indicates broken support for the dtype!', - UserWarning, stacklevel=2) - return _discovered_machar(ftype) - - -def _discovered_machar(ftype): - """ Create MachAr instance with found information on float types - - TODO: MachAr should be retired completely ideally. We currently only - ever use it system with broken longdouble (valgrind, WSL). - """ - params = _MACHAR_PARAMS[ftype] - return MachAr(lambda v: array([v], ftype), - lambda v: _fr0(v.astype(params['itype']))[0], - lambda v: array(_fr0(v)[0], ftype), - lambda v: params['fmt'] % array(_fr0(v)[0], ftype), - params['title']) - @set_module('numpy') class finfo: @@ -548,75 +224,105 @@ def __new__(cls, dtype): def _init(self, dtype): self.dtype = numeric.dtype(dtype) - machar = _get_machar(dtype) - - for word in ['precision', 'iexp', - 'maxexp', 'minexp', 'negep', - 'machep']: - setattr(self, word, getattr(machar, word)) - for word in ['resolution', 'epsneg', 'smallest_subnormal']: - setattr(self, word, getattr(machar, word).flat[0]) self.bits = self.dtype.itemsize * 8 - self.max = machar.huge.flat[0] - self.min = -self.max - self.eps = machar.eps.flat[0] - self.nexp = machar.iexp - self.nmant = machar.it - self._machar = machar - self._str_tiny = machar._str_xmin.strip() - self._str_max = machar._str_xmax.strip() - self._str_epsneg = machar._str_epsneg.strip() - self._str_eps = machar._str_eps.strip() - self._str_resolution = machar._str_resolution.strip() - self._str_smallest_normal = machar._str_smallest_normal.strip() - self._str_smallest_subnormal = machar._str_smallest_subnormal.strip() + self._fmt = None + self._repr = None + _populate_finfo_constants(self, self.dtype) return self + @cached_property + def epsneg(self): + # Assume typical floating point logic. Could also use nextafter. + return self.eps / self._radix + + @cached_property + def resolution(self): + return self.dtype.type(10)**-self.precision + + @cached_property + def machep(self): + return int(math.log2(self.eps)) + + @cached_property + def negep(self): + return int(math.log2(self.epsneg)) + + @cached_property + def nexp(self): + # considering all ones (inf/nan) and all zeros (subnormal/zero) + return math.ceil(math.log2(self.maxexp - self.minexp + 2)) + + @cached_property + def iexp(self): + # Calculate exponent bits from it's range: + return math.ceil(math.log2(self.maxexp - self.minexp)) + def __str__(self): + if self._fmt is not None: + return self._fmt + + def get_str(name, pad=None): + if (val := getattr(self, name)) is None: + return "" + if pad is not None: + s = str(val).ljust(pad) + return str(val) + + precision = get_str("precision", 3) + machep = get_str("machep", 6) + negep = get_str("negep", 6) + minexp = get_str("minexp", 6) + maxexp = get_str("maxexp", 6) + resolution = get_str("resolution") + eps = get_str("eps") + epsneg = get_str("epsneg") + tiny = get_str("tiny") + smallest_normal = get_str("smallest_normal") + smallest_subnormal = get_str("smallest_subnormal") + nexp = get_str("nexp", 6) + max_ = get_str("max") + if hasattr(self, "min") and hasattr(self, "max") and -self.min == self.max: + min_ = "-max" + else: + min_ = get_str("min") + fmt = ( - 'Machine parameters for %(dtype)s\n' - '---------------------------------------------------------------\n' - 'precision = %(precision)3s resolution = %(_str_resolution)s\n' - 'machep = %(machep)6s eps = %(_str_eps)s\n' - 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n' - 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n' - 'maxexp = %(maxexp)6s max = %(_str_max)s\n' - 'nexp = %(nexp)6s min = -max\n' - 'smallest_normal = %(_str_smallest_normal)s ' - 'smallest_subnormal = %(_str_smallest_subnormal)s\n' - '---------------------------------------------------------------\n' - ) - return fmt % self.__dict__ + f'Machine parameters for {self.dtype}\n' + f'---------------------------------------------------------------\n' + f'precision = {precision} resolution = {resolution}\n' + f'machep = {machep} eps = {eps}\n' + f'negep = {negep} epsneg = {epsneg}\n' + f'minexp = {minexp} tiny = {tiny}\n' + f'maxexp = {maxexp} max = {max_}\n' + f'nexp = {nexp} min = {min_}\n' + f'smallest_normal = {smallest_normal} ' + f'smallest_subnormal = {smallest_subnormal}\n' + f'---------------------------------------------------------------\n' + ) + self._fmt = fmt + return fmt def __repr__(self): + if self._repr is not None: + return self._repr + c = self.__class__.__name__ - d = self.__dict__.copy() - d['klass'] = c - return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," - " max=%(_str_max)s, dtype=%(dtype)s)") % d) - @property - def smallest_normal(self): - """Return the value for the smallest normal. + # Use precision+1 digits in exponential notation + fmt_str = _MACHAR_PARAMS.get(self.dtype.type, {}).get('fmt', '%s') + if fmt_str != '%s' and hasattr(self, 'max') and hasattr(self, 'min'): + max_str = (fmt_str % self.max).strip() + min_str = (fmt_str % self.min).strip() + else: + max_str = str(self.max) + min_str = str(self.min) - Returns - ------- - smallest_normal : float - Value for the smallest normal. + resolution_str = str(self.resolution) - Warns - ----- - UserWarning - If the calculated value for the smallest normal is requested for - double-double. - """ - # This check is necessary because the value for smallest_normal is - # platform dependent for longdouble types. - if isnan(self._machar.smallest_normal.flat[0]): - warnings.warn( - 'The value of smallest normal is undefined for double double', - UserWarning, stacklevel=2) - return self._machar.smallest_normal.flat[0] + repr_str = (f"{c}(resolution={resolution_str}, min={min_str}," + f" max={max_str}, dtype={self.dtype})") + self._repr = repr_str + return repr_str @property def tiny(self): diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index 8c3ff720b372..cc61a826c103 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -379,6 +379,7 @@ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, #define NPY_DT_get_clear_loop 9 #define NPY_DT_get_fill_zero_loop 10 #define NPY_DT_finalize_descr 11 +#define NPY_DT_get_constant 12 // These PyArray_ArrFunc slots will be deprecated and replaced eventually // getitem and setitem can be defined as a performance optimization; @@ -389,7 +390,7 @@ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, // used to separate dtype slots from arrfuncs slots // intended only for internal use but defined here for clarity -#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10) +#define _NPY_DT_ARRFUNCS_OFFSET (1 << 11) // Cast is disabled // #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET @@ -479,6 +480,42 @@ typedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype); */ typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype); +/* + * Constants that can be queried and used e.g. by reduce identies defaults. + * These are also used to expose .finfo and .iinfo for example. + */ +/* Numerical constants */ +#define NPY_CONSTANT_zero 1 +#define NPY_CONSTANT_one 2 +#define NPY_CONSTANT_all_bits_set 3 +#define NPY_CONSTANT_maximum_finite 4 +#define NPY_CONSTANT_minimum_finite 5 +#define NPY_CONSTANT_inf 6 +#define NPY_CONSTANT_ninf 7 +#define NPY_CONSTANT_nan 8 +#define NPY_CONSTANT_finfo_radix 9 +#define NPY_CONSTANT_finfo_eps 10 +#define NPY_CONSTANT_finfo_smallest_normal 11 +#define NPY_CONSTANT_finfo_smallest_subnormal 12 +/* Constants that are always of integer type, value is `npy_intp/Py_ssize_t` */ +#define NPY_CONSTANT_finfo_nmant (1 << 16) + 0 +#define NPY_CONSTANT_finfo_min_exp (1 << 16) + 1 +#define NPY_CONSTANT_finfo_max_exp (1 << 16) + 2 +#define NPY_CONSTANT_finfo_decimal_digits (1 << 16) + 3 + +/* It may make sense to continue with other constants here, e.g. pi, etc? */ + +/* + * Function to get a constant value for the dtype. Data may be unaligned, the + * function is always called with the GIL held. + * + * @param descr The dtype instance (i.e. self) + * @param ID The ID of the constant to get. + * @param data Pointer to the data to be written too, may be unaligned. + * @returns 1 on success, 0 if the constant is not available, or -1 with an error set. + */ +typedef int (PyArrayDTypeMeta_GetConstant)(PyArray_Descr *descr, int ID, void *data); + /* * TODO: These two functions are currently only used for experimental DType * API support. Their relation should be "reversed": NumPy should diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index e05f25da39ca..0d8c513d4fde 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1390,8 +1390,6 @@ python_sources = [ '_exceptions.pyi', '_internal.py', '_internal.pyi', - '_machar.py', - '_machar.pyi', '_methods.py', '_methods.pyi', '_simd.pyi', diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index dd51a937031a..0a063f6ba07a 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -3,6 +3,7 @@ #include #include #include +#include #include #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -15,6 +16,7 @@ #include "npy_pycompat.h" #include "numpy/npy_math.h" #include "numpy/halffloat.h" +#include "numpy/dtype_api.h" #include "npy_config.h" #include "npy_sort.h" @@ -4318,10 +4320,202 @@ PyArray_DescrFromType(int type) /* ***************************************************************************** - ** SETUP TYPE INFO ** + ** NEWSTYLE TYPE METHODS ** ***************************************************************************** */ +static int +BOOL_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + switch (constant_id) { + case NPY_CONSTANT_zero: + case NPY_CONSTANT_minimum_finite: + *(npy_bool *)ptr = NPY_FALSE; + return 1; + case NPY_CONSTANT_one: + case NPY_CONSTANT_maximum_finite: + *(npy_bool *)ptr = NPY_TRUE; + return 1; + default: + return 0; + } +} + +/**begin repeat + * #NAME = BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG# + * #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, + * npy_long, npy_ulong, npy_longlong, npy_ulonglong# + * #IS_UNSIGNED = 0, 1, 0, 1, 0, 1, + * 0, 1, 0, 1# + * #MIN = NPY_MIN_BYTE, 0, NPY_MIN_SHORT, 0, NPY_MIN_INT, 0, + * NPY_MIN_LONG, 0, NPY_MIN_LONGLONG, 0# + */ +static int +@NAME@_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + @type@ val; + switch (constant_id) { + case NPY_CONSTANT_zero: + val = 0; + break; + case NPY_CONSTANT_one: + val = 1; + break; + case NPY_CONSTANT_minimum_finite: +#if @IS_UNSIGNED@ + val = 0; +#else + val = @MIN@; +#endif + break; + case NPY_CONSTANT_maximum_finite: + val = NPY_MAX_@NAME@; + break; + default: + return 0; + } + @NAME@_copyswap(ptr, &val, !PyArray_ISNBO(descr->byteorder), NULL); + return 1; +} +/**end repeat**/ + +/* +Keeping Half macros consistent with standard C +Refernce: https://en.cppreference.com/w/c/types/limits.html +*/ +#define HALF_MAX 31743 /* Bit pattern for 65504.0 */ +#define HALF_MIN 1024 /* Bit pattern for smallest positive normal: 2^-14 */ +#define HALF_NEG_MAX 64511 /* Bit pattern for -65504.0 */ +#define HALF_EPSILON 5120 +#define HALF_TRUE_MIN 0x0001 /* Bit pattern for smallest positive subnormal: 2^-24 */ +#define HALF_MAX_EXP 16 +#define HALF_MIN_EXP -13 +#define HALF_MANT_DIG 11 /* 10 + 1 (implicit) */ +#define HALF_DIG 3 + +/* + * On PPC64 systems with IBM double-double format pair of IEEE binary64 + * values (not a true IEEE quad). We derived the values based on the Interval machine epsilon definition of epsilon, + * difference between 1.0 and the next representable floating-point number larger than 1.0 + * ~106 bits of mantissa precision (53+53) gives epsilon of 2^-105, but glibc returns 2^-1074 (DBL_TRUE_MIN). + */ +#if defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ + defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) + #undef LDBL_EPSILON + #define LDBL_EPSILON 0x1p-105L /* 2^-105 */ +#endif + +/* + * Define *_TRUE_MIN macros for smallest subnormal values if not available. + * Use nextafter(0, 1) to get the smallest positive representable value. + */ +#ifndef FLT_TRUE_MIN + #define FLT_TRUE_MIN npy_nextafterf(0.0f, 1.0f) +#endif +#ifndef DBL_TRUE_MIN + #define DBL_TRUE_MIN npy_nextafter(0.0, 1.0) +#endif +#ifndef LDBL_TRUE_MIN + #define LDBL_TRUE_MIN npy_nextafterl(0.0L, 1.0L) +#endif + +/**begin repeat + * #NAME = HALF,FLOAT, DOUBLE, LONGDOUBLE# + * #ABB = HALF, FLT, DBL, LDBL# + * #type = npy_half, npy_float, npy_double, npy_longdouble# + * #RADIX = 16384, 2, 2, 2# + * #NEG_MAX = HALF_NEG_MAX, -FLT_MAX, -DBL_MAX, -LDBL_MAX# + */ +static int +@NAME@_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + @type@ val; + switch (constant_id) { + case NPY_CONSTANT_zero: + val = 0; + break; + case NPY_CONSTANT_one: + val = 1; + break; + case NPY_CONSTANT_minimum_finite: + val = @NEG_MAX@; + break; + case NPY_CONSTANT_maximum_finite: + #if defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ + defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) + /* For IBM double-double, use nextafter(inf, 0) to get the true + * maximum representable value (matches old MachArLike behavior) */ + if (sizeof(@type@) == sizeof(npy_longdouble)) { + val = npy_nextafterl((@type@)NPY_INFINITY, (@type@)0.0L); + break; + } + #endif + val = @ABB@_MAX; + break; + case NPY_CONSTANT_inf: + val = (@type@)NPY_INFINITYF; + break; + case NPY_CONSTANT_nan: + val = (@type@)NPY_NANF; + break; + case NPY_CONSTANT_finfo_radix: + val = @RADIX@; + break; + case NPY_CONSTANT_finfo_eps: + val = @ABB@_EPSILON; + break; + case NPY_CONSTANT_finfo_smallest_normal: + val = @ABB@_MIN; + break; + case NPY_CONSTANT_finfo_smallest_subnormal: + val = @ABB@_TRUE_MIN; + break; + case NPY_CONSTANT_finfo_nmant: + *(npy_intp *)ptr = @ABB@_MANT_DIG - 1; + return 1; + case NPY_CONSTANT_finfo_min_exp: + /* + Definition: Minimum negative integer such that FLT_RADIX raised by power one less than that integer is a normalized float, double and long double respectively + + refernce: https://en.cppreference.com/w/c/types/limits.html + */ + *(npy_intp *)ptr = @ABB@_MIN_EXP - 1; + return 1; + case NPY_CONSTANT_finfo_max_exp: + *(npy_intp *)ptr = @ABB@_MAX_EXP; + return 1; + case NPY_CONSTANT_finfo_decimal_digits: + *(npy_intp *)ptr = @ABB@_DIG; + return 1; + default: + return 0; + } + @NAME@_copyswap(ptr, &val, !PyArray_ISNBO(descr->byteorder), NULL); + return 1; +} +/**end repeat**/ + + +/**begin repeat + * #NAME = CFLOAT, CDOUBLE, CLONGDOUBLE, + * OBJECT, STRING, UNICODE, VOID, + * DATETIME, TIMEDELTA# + */ +static int +@NAME@_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + // TODO: We currently don't use this, but we could quickly for + // reduction identity/initial value so should implement these. + return 0; +} +/**end repeat**/ + +/* + ***************************************************************************** + ** SETUP TYPE INFO ** + ***************************************************************************** + */ /* * This function is called during numpy module initialization, @@ -4331,6 +4525,7 @@ NPY_NO_EXPORT int set_typeinfo(PyObject *dict) { PyObject *infodict = NULL; + PyArray_DTypeMeta *dtypemeta; // borrowed int i; _PyArray_LegacyDescr *dtype; @@ -4385,7 +4580,7 @@ set_typeinfo(PyObject *dict) * PyArray_ComplexAbstractDType*3, * PyArrayDescr_Type*6 # */ - if (dtypemeta_wrap_legacy_descriptor( + dtypemeta = dtypemeta_wrap_legacy_descriptor( _builtin_descrs[NPY_@NAME@], &_Py@Name@_ArrFuncs, (PyTypeObject *)&@scls@, @@ -4395,9 +4590,11 @@ set_typeinfo(PyObject *dict) #else NULL #endif - ) < 0) { + ); + if (dtypemeta == NULL) { return -1; } + NPY_DT_SLOTS(dtypemeta)->get_constant = @NAME@_get_constant; /**end repeat**/ diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 293d7dfee1b3..3f569cc3ccec 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -118,6 +118,17 @@ use_new_as_default(PyArray_DTypeMeta *self) } +/* + * By default fill in zero, one, and negative one via the Python casts, + * users should override this, but this allows us to use it for legacy user dtypes. + */ +static int +default_get_constant(PyArray_Descr *descr, int constant_id, void *data) +{ + return 0; +} + + static int legacy_setitem_using_DType(PyObject *obj, void *data, void *arr) { @@ -195,6 +206,7 @@ dtypemeta_initialize_struct_from_spec( NPY_DT_SLOTS(DType)->get_clear_loop = NULL; NPY_DT_SLOTS(DType)->get_fill_zero_loop = NULL; NPY_DT_SLOTS(DType)->finalize_descr = NULL; + NPY_DT_SLOTS(DType)->get_constant = default_get_constant; NPY_DT_SLOTS(DType)->f = default_funcs; PyType_Slot *spec_slot = spec->slots; @@ -1068,9 +1080,9 @@ object_common_dtype( * Some may have more aliases, as `intp` is not its own thing, * as of writing this, these are not added here. * - * @returns 0 on success, -1 on failure. + * @returns A borrowed references to the new DType or NULL. */ -NPY_NO_EXPORT int +NPY_NO_EXPORT PyArray_DTypeMeta * dtypemeta_wrap_legacy_descriptor( _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, PyTypeObject *dtype_super_class, const char *name, const char *alias) @@ -1097,19 +1109,20 @@ dtypemeta_wrap_legacy_descriptor( "that of an existing dtype (with the assumption it is just " "copied over and can be replaced).", descr->typeobj, Py_TYPE(descr)); - return -1; + return NULL; } NPY_DType_Slots *dt_slots = PyMem_Malloc(sizeof(NPY_DType_Slots)); if (dt_slots == NULL) { - return -1; + return NULL; } memset(dt_slots, '\0', sizeof(NPY_DType_Slots)); + dt_slots->get_constant = default_get_constant; PyArray_DTypeMeta *dtype_class = PyMem_Malloc(sizeof(PyArray_DTypeMeta)); if (dtype_class == NULL) { PyMem_Free(dt_slots); - return -1; + return NULL; } /* @@ -1148,12 +1161,12 @@ dtypemeta_wrap_legacy_descriptor( /* Let python finish the initialization */ if (PyType_Ready((PyTypeObject *)dtype_class) < 0) { Py_DECREF(dtype_class); - return -1; + return NULL; } dt_slots->castingimpls = PyDict_New(); if (dt_slots->castingimpls == NULL) { Py_DECREF(dtype_class); - return -1; + return NULL; } /* @@ -1169,13 +1182,15 @@ dtypemeta_wrap_legacy_descriptor( /* Set default functions (correct for most dtypes, override below) */ dt_slots->default_descr = nonparametric_default_descr; dt_slots->discover_descr_from_pyobject = ( - nonparametric_discover_descr_from_pyobject); + nonparametric_discover_descr_from_pyobject); dt_slots->is_known_scalar_type = python_builtins_are_known_scalar_types; dt_slots->common_dtype = default_builtin_common_dtype; dt_slots->common_instance = NULL; dt_slots->ensure_canonical = ensure_native_byteorder; dt_slots->get_fill_zero_loop = NULL; dt_slots->finalize_descr = NULL; + dt_slots->setitem = NULL; + dt_slots->getitem = NULL; if (PyTypeNum_ISSIGNED(dtype_class->type_num)) { /* Convert our scalars (raise on too large unsigned and NaN, etc.) */ @@ -1233,7 +1248,7 @@ dtypemeta_wrap_legacy_descriptor( if (_PyArray_MapPyTypeToDType(dtype_class, descr->typeobj, PyTypeNum_ISUSERDEF(dtype_class->type_num)) < 0) { Py_DECREF(dtype_class); - return -1; + return NULL; } /* Finally, replace the current class of the descr */ @@ -1243,23 +1258,23 @@ dtypemeta_wrap_legacy_descriptor( if (!PyTypeNum_ISUSERDEF(descr->type_num)) { if (npy_cache_import_runtime("numpy.dtypes", "_add_dtype_helper", &npy_runtime_imports._add_dtype_helper) == -1) { - return -1; + return NULL; } if (PyObject_CallFunction( npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)dtype_class, alias) == NULL) { - return -1; + return NULL; } } else { // ensure the within dtype cast is populated for legacy user dtypes if (PyArray_GetCastingImpl(dtype_class, dtype_class) == NULL) { - return -1; + return NULL; } } - return 0; + return dtype_class; } diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index a8b78e3f7518..202116a4cc91 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -67,6 +67,11 @@ typedef struct { * parameters, if any, as the operand dtype. */ PyArrayDTypeMeta_FinalizeDescriptor *finalize_descr; + /* + * Function to fetch constants. Always defined, but may return "undefined" + * for all values. + */ + PyArrayDTypeMeta_GetConstant *get_constant; /* * The casting implementation (ArrayMethod) to convert between two * instances of this DType, stored explicitly for fast access: @@ -89,7 +94,7 @@ typedef struct { // This must be updated if new slots before within_dtype_castingimpl // are added -#define NPY_NUM_DTYPE_SLOTS 11 +#define NPY_NUM_DTYPE_SLOTS 12 #define NPY_NUM_DTYPE_PYARRAY_ARRFUNCS_SLOTS 22 #define NPY_DT_MAX_ARRFUNCS_SLOT \ NPY_NUM_DTYPE_PYARRAY_ARRFUNCS_SLOTS + _NPY_DT_ARRFUNCS_OFFSET @@ -124,6 +129,8 @@ typedef struct { NPY_DT_SLOTS(NPY_DTYPE(descr))->getitem(descr, data_ptr) #define NPY_DT_CALL_setitem(descr, value, data_ptr) \ NPY_DT_SLOTS(NPY_DTYPE(descr))->setitem(descr, value, data_ptr) +#define NPY_DT_CALL_get_constant(descr, constant_id, data_ptr) \ + NPY_DT_SLOTS(NPY_DTYPE(descr))->get_constant(descr, constant_id, data_ptr) /* @@ -153,7 +160,7 @@ NPY_NO_EXPORT int python_builtins_are_known_scalar_types( PyArray_DTypeMeta *cls, PyTypeObject *pytype); -NPY_NO_EXPORT int +NPY_NO_EXPORT PyArray_DTypeMeta * dtypemeta_wrap_legacy_descriptor( _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, PyTypeObject *dtype_super_class, const char *name, const char *alias); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index a7fdf3efba17..4ab3f5bae02c 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4327,6 +4327,109 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), } +static PyObject * +_populate_finfo_constants(PyObject *NPY_UNUSED(self), PyObject *args) +{ + if (PyTuple_Size(args) != 2) { + PyErr_SetString(PyExc_TypeError, "Expected 2 arguments"); + return NULL; + } + PyObject *finfo = PyTuple_GetItem(args, 0); + if (finfo == NULL || finfo == Py_None) { + PyErr_SetString(PyExc_TypeError, "First argument cannot be None"); + return NULL; + } + PyArray_Descr *descr = (PyArray_Descr *)PyTuple_GetItem(args, 1); + if (!PyArray_DescrCheck(descr)) { + PyErr_SetString(PyExc_TypeError, "Second argument must be a dtype"); + return NULL; + } + + static const struct { + char *name; + int id; + npy_bool is_int; + } finfo_constants[] = { + {"max", NPY_CONSTANT_maximum_finite, 0}, + {"min", NPY_CONSTANT_minimum_finite, 0}, + {"_radix", NPY_CONSTANT_finfo_radix, 0}, + {"eps", NPY_CONSTANT_finfo_eps, 0}, + {"smallest_normal", NPY_CONSTANT_finfo_smallest_normal, 0}, + {"smallest_subnormal", NPY_CONSTANT_finfo_smallest_subnormal, 0}, + {"nmant", NPY_CONSTANT_finfo_nmant, 1}, + {"minexp", NPY_CONSTANT_finfo_min_exp, 1}, + {"maxexp", NPY_CONSTANT_finfo_max_exp, 1}, + {"precision", NPY_CONSTANT_finfo_decimal_digits, 1}, + }; + static const int n_finfo_constants = sizeof(finfo_constants) / sizeof(finfo_constants[0]); + + int n_float_constants = 0; + for (int i = 0; i < n_finfo_constants; i++) { + if (!finfo_constants[i].is_int) { + n_float_constants++; + } + } + + PyArrayObject *buffer_array = NULL; + char *buffer_data = NULL; + npy_intp dims[1] = {n_float_constants}; + + Py_INCREF(descr); + buffer_array = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, + descr, 1, dims, NULL, NULL, 0, NULL); + if (buffer_array == NULL) { + return NULL; + } + buffer_data = PyArray_BYTES(buffer_array); + npy_intp elsize = PyArray_DESCR(buffer_array)->elsize; + + for (int i = 0; i < n_finfo_constants; i++) + { + PyObject *value_obj; + if (!finfo_constants[i].is_int) { + int res = NPY_DT_CALL_get_constant(descr, + finfo_constants[i].id, buffer_data); + if (res < 0) { + goto fail; + } + if (res == 0) { + buffer_data += elsize; // Move to next element + continue; + } + // Return as 0-d array item to preserve numpy scalar type + value_obj = PyArray_ToScalar(buffer_data, buffer_array); + buffer_data += elsize; // Move to next element + } + else { + npy_intp int_value; + int res = NPY_DT_CALL_get_constant(descr, finfo_constants[i].id, &int_value); + if (res < 0) { + goto fail; + } + if (res == 0) { + continue; + } + value_obj = PyLong_FromSsize_t(int_value); + } + if (value_obj == NULL) { + goto fail; + } + int res = PyObject_SetAttrString(finfo, finfo_constants[i].name, value_obj); + Py_DECREF(value_obj); + if (res < 0) { + goto fail; + } + } + + Py_DECREF(buffer_array); + Py_RETURN_NONE; + fail: + Py_XDECREF(buffer_array); + return NULL; +} + + + static PyObject * _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) { @@ -4556,6 +4659,8 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"_load_from_filelike", (PyCFunction)_load_from_filelike, METH_FASTCALL | METH_KEYWORDS, NULL}, + {"_populate_finfo_constants", (PyCFunction)_populate_finfo_constants, + METH_VARARGS, NULL}, /* from umath */ {"frompyfunc", (PyCFunction) ufunc_frompyfunc, diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 9d026f32044f..71c95a8ae39c 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -306,8 +306,9 @@ PyArray_RegisterDataType(PyArray_DescrProto *descr_proto) descr->type_num = typenum; /* update prototype to notice duplicate registration */ descr_proto->type_num = typenum; - if (dtypemeta_wrap_legacy_descriptor( - descr, descr_proto->f, &PyArrayDescr_Type, name, NULL) < 0) { + PyArray_DTypeMeta *wrapped_dtype = dtypemeta_wrap_legacy_descriptor( + descr, descr_proto->f, &PyArrayDescr_Type, name, NULL); + if (wrapped_dtype == NULL) { descr->type_num = -1; NPY_NUMUSERTYPES--; /* Override the type, it might be wrong and then decref crashes */ diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 4981f2fd0e30..27e98a563930 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -215,14 +215,6 @@ def test_not_deprecated(self, name: str) -> None: self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) -class TestMachAr(_DeprecationTestCase): - # Deprecated 2022-11-22, NumPy 1.25 - warning_cls = DeprecationWarning - - def test_deprecated_module(self): - self.assert_deprecated(lambda: np._core.MachAr) - - class TestQuantileInterpolationDeprecation(_DeprecationTestCase): # Deprecated 2021-11-08, NumPy 1.22 @pytest.mark.parametrize("func", diff --git a/numpy/_core/tests/test_finfo.py b/numpy/_core/tests/test_finfo.py new file mode 100644 index 000000000000..572490c1eb08 --- /dev/null +++ b/numpy/_core/tests/test_finfo.py @@ -0,0 +1,83 @@ +import pytest + +import numpy as np +from numpy import exp2, log10 +from numpy._core import numerictypes as ntypes + + +class MachArLike: + """Minimal class to simulate machine arithmetic parameters.""" + def __init__(self, dtype, machep, negep, minexp, maxexp, nmant, iexp): + self.dtype = dtype + self.machep = machep + self.negep = negep + self.minexp = minexp + self.maxexp = maxexp + self.nmant = nmant + self.iexp = iexp + self.eps = exp2(dtype(-nmant)) + self.epsneg = exp2(dtype(negep)) + self.precision = int(-log10(self.eps)) + self.resolution = dtype(10) ** (-self.precision) + + +@pytest.fixture +def float16_ma(): + """Machine arithmetic parameters for float16.""" + f16 = ntypes.float16 + return MachArLike(f16, + machep=-10, + negep=-11, + minexp=-14, + maxexp=16, + nmant=10, + iexp=5) + + +@pytest.fixture +def float32_ma(): + """Machine arithmetic parameters for float32.""" + f32 = ntypes.float32 + return MachArLike(f32, + machep=-23, + negep=-24, + minexp=-126, + maxexp=128, + nmant=23, + iexp=8) + + +@pytest.fixture +def float64_ma(): + """Machine arithmetic parameters for float64.""" + f64 = ntypes.float64 + return MachArLike(f64, + machep=-52, + negep=-53, + minexp=-1022, + maxexp=1024, + nmant=52, + iexp=11) + + +@pytest.mark.parametrize("dtype,ma_fixture", [ + (np.half, "float16_ma"), + (np.float32, "float32_ma"), + (np.float64, "float64_ma"), +]) +@pytest.mark.parametrize("prop", [ + 'machep', 'negep', 'minexp', 'maxexp', 'nmant', 'iexp', + 'eps', 'epsneg', 'precision', 'resolution' +]) +def test_finfo_properties(dtype, ma_fixture, prop, request): + """Test that finfo properties match expected machine arithmetic values.""" + ma = request.getfixturevalue(ma_fixture) + finfo = np.finfo(dtype) + + actual = getattr(finfo, prop) + expected = getattr(ma, prop) + + assert actual == expected, ( + f"finfo({dtype}) property '{prop}' mismatch: " + f"expected {expected}, got {actual}" + ) diff --git a/numpy/_core/tests/test_getlimits.py b/numpy/_core/tests/test_getlimits.py index 721c6ac6cdf9..4e911b89e89f 100644 --- a/numpy/_core/tests/test_getlimits.py +++ b/numpy/_core/tests/test_getlimits.py @@ -9,7 +9,6 @@ import numpy as np from numpy import double, half, longdouble, single from numpy._core import finfo, iinfo -from numpy._core.getlimits import _discovered_machar, _float_ma from numpy.testing import assert_, assert_equal, assert_raises ################################################## @@ -139,53 +138,20 @@ def test_instances(): finfo(np.int64(1)) -def assert_ma_equal(discovered, ma_like): - # Check MachAr-like objects same as calculated MachAr instances - for key, value in discovered.__dict__.items(): - assert_equal(value, getattr(ma_like, key)) - if hasattr(value, 'shape'): - assert_equal(value.shape, getattr(ma_like, key).shape) - assert_equal(value.dtype, getattr(ma_like, key).dtype) - - -def test_known_types(): - # Test we are correctly compiling parameters for known types - for ftype, ma_like in ((np.float16, _float_ma[16]), - (np.float32, _float_ma[32]), - (np.float64, _float_ma[64])): - assert_ma_equal(_discovered_machar(ftype), ma_like) - # Suppress warning for broken discovery of double double on PPC - with np.errstate(all='ignore'): - ld_ma = _discovered_machar(np.longdouble) - bytes = np.dtype(np.longdouble).itemsize - if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): - # 80-bit extended precision - assert_ma_equal(ld_ma, _float_ma[80]) - elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: - # IEE 754 128-bit - assert_ma_equal(ld_ma, _float_ma[128]) - - def test_subnormal_warning(): """Test that the subnormal is zero warning is not being raised.""" - with np.errstate(all='ignore'): - ld_ma = _discovered_machar(np.longdouble) - bytes = np.dtype(np.longdouble).itemsize with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') - if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): - # 80-bit extended precision - ld_ma.smallest_subnormal - assert len(w) == 0 - elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: - # IEE 754 128-bit - ld_ma.smallest_subnormal - assert len(w) == 0 - else: - # Double double - ld_ma.smallest_subnormal - # This test may fail on some platforms - assert len(w) == 0 + # Test for common float types + for dtype in [np.float16, np.float32, np.float64]: + f = finfo(dtype) + _ = f.smallest_subnormal + # Also test longdouble + with np.errstate(all='ignore'): + fld = finfo(np.longdouble) + _ = fld.smallest_subnormal + # Check no warnings were raised + assert len(w) == 0 def test_plausible_finfo(): diff --git a/numpy/_core/tests/test_longdouble.py b/numpy/_core/tests/test_longdouble.py index f7edd9774573..a7aa9145711a 100644 --- a/numpy/_core/tests/test_longdouble.py +++ b/numpy/_core/tests/test_longdouble.py @@ -291,7 +291,8 @@ def test_array_repr(): b = np.array([1], dtype=np.longdouble) if not np.all(a != b): raise ValueError("precision loss creating arrays") - assert_(repr(a) != repr(b)) + with np.printoptions(precision=LD_INFO.precision + 1): + assert_(repr(a) != repr(b)) # # Locale tests: scalar types formatting should be independent of the locale diff --git a/numpy/_core/tests/test_machar.py b/numpy/_core/tests/test_machar.py deleted file mode 100644 index 2d772dd51233..000000000000 --- a/numpy/_core/tests/test_machar.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -Test machar. Given recent changes to hardcode type data, we might want to get -rid of both MachAr and this test at some point. - -""" -import numpy._core.numerictypes as ntypes -from numpy import array, errstate -from numpy._core._machar import MachAr - - -class TestMachAr: - def _run_machar_highprec(self): - # Instantiate MachAr instance with high enough precision to cause - # underflow - try: - hiprec = ntypes.float96 - MachAr(lambda v: array(v, hiprec)) - except AttributeError: - # Fixme, this needs to raise a 'skip' exception. - "Skipping test: no ntypes.float96 available on this platform." - - def test_underlow(self): - # Regression test for #759: - # instantiating MachAr for dtype = np.float96 raises spurious warning. - with errstate(all='raise'): - try: - self._run_machar_highprec() - except FloatingPointError as e: - msg = f"Caught {e} exception, should not have been raised." - raise AssertionError(msg) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index fba8e8abbe6d..751c66584275 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1000,7 +1000,7 @@ def test_floating_exceptions(self, typecode): if np.dtype(ftype).kind == 'f': # Get some extreme values for the type fi = np.finfo(ftype) - ft_tiny = fi._machar.tiny + ft_tiny = fi.tiny ft_max = fi.max ft_eps = fi.eps underflow = 'underflow' @@ -1009,7 +1009,7 @@ def test_floating_exceptions(self, typecode): # 'c', complex, corresponding real dtype rtype = type(ftype(0).real) fi = np.finfo(rtype) - ft_tiny = ftype(fi._machar.tiny) + ft_tiny = ftype(fi.tiny) ft_max = ftype(fi.max) ft_eps = ftype(fi.eps) # The complex types raise different exceptions diff --git a/numpy/typing/tests/data/reveal/arrayterator.pyi b/numpy/typing/tests/data/reveal/arrayterator.pyi index 470160c24de3..85eeff4add08 100644 --- a/numpy/typing/tests/data/reveal/arrayterator.pyi +++ b/numpy/typing/tests/data/reveal/arrayterator.pyi @@ -13,7 +13,7 @@ assert_type(ar_iter.start, list[int]) assert_type(ar_iter.stop, list[int]) assert_type(ar_iter.step, list[int]) assert_type(ar_iter.shape, tuple[Any, ...]) -assert_type(ar_iter.flat, Generator[np.int64, None, None]) +assert_type(ar_iter.flat, Generator[np.int64]) assert_type(ar_iter.__array__(), npt.NDArray[np.int64]) From b5b2d3d3d527319befcb2656f7c6ce5ff311019b Mon Sep 17 00:00:00 2001 From: Sayed Awad Date: Mon, 6 Oct 2025 03:11:17 +0300 Subject: [PATCH 0527/1018] MAINT: Fix MSVC warnings and add CI check with allowlist (#29868) --- .github/check-warnings/action.yml | 38 +++++++++++++++++++ .../check-warnings/msvc-allowed-warnings.txt | 31 +++++++++++++++ .github/workflows/windows.yml | 9 ++++- numpy/_core/src/common/npy_config.h | 5 +++ numpy/_core/src/multiarray/arraytypes.c.src | 2 +- numpy/_core/src/multiarray/item_selection.c | 27 ++++++------- .../multiarray/lowlevel_strided_loops.c.src | 16 ++++---- numpy/_core/src/multiarray/nditer_constr.c | 6 +-- .../multiarray/stringdtype/static_string.c | 7 ++-- .../src/umath/loops_autovec.dispatch.c.src | 2 +- .../src/umath/loops_logical.dispatch.cpp | 4 +- .../src/umath/loops_minmax.dispatch.c.src | 2 +- numpy/_core/src/umath/scalarmath.c.src | 16 +++++--- numpy/_core/src/umath/string_buffer.h | 6 +++ numpy/linalg/lapack_lite/f2c.c | 2 +- .../random/src/distributions/distributions.c | 10 ++--- numpy/random/src/distributions/logfactorial.c | 2 +- .../random/src/legacy/legacy-distributions.c | 2 +- 18 files changed, 141 insertions(+), 46 deletions(-) create mode 100644 .github/check-warnings/action.yml create mode 100644 .github/check-warnings/msvc-allowed-warnings.txt diff --git a/.github/check-warnings/action.yml b/.github/check-warnings/action.yml new file mode 100644 index 000000000000..f3f6778e229b --- /dev/null +++ b/.github/check-warnings/action.yml @@ -0,0 +1,38 @@ +name: "Check Warnings" +description: "Filter build warnings against an allowlist" + +inputs: + log-file: + description: "Path to build log file" + required: true + allowlist: + description: "Path to allowed warnings regex file" + required: true + warning-regex: + description: "Regex to extract warnings from the log" + required: true + +runs: + using: "composite" + steps: + - name: Extract warnings + shell: bash + run: | + echo "Extracting warnings from ${{ inputs.log-file }} using regex: ${{ inputs['warning-regex'] }}" + grep -E "${{ inputs['warning-regex'] }}" "${{ inputs.log-file }}" | tee warnings.log || true + + if [ ! -s warnings.log ]; then + echo "No warnings found." + exit 0 + fi + + echo "Filtering against allowlist ${{ inputs.allowlist }}" + grep -v -F -f "${{ inputs.allowlist }}" warnings.log | tee disallowed.log || true + + if [ -s disallowed.log ]; then + echo "::error::Disallowed warnings detected:" + cat disallowed.log + exit 1 + else + echo "All warnings are allowed." + fi diff --git a/.github/check-warnings/msvc-allowed-warnings.txt b/.github/check-warnings/msvc-allowed-warnings.txt new file mode 100644 index 000000000000..7d2c149629ec --- /dev/null +++ b/.github/check-warnings/msvc-allowed-warnings.txt @@ -0,0 +1,31 @@ +../numpy/linalg/lapack_lite/f2c.c(194): warning C4244: 'return': conversion from 'double' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(230): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(250): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(1530): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(1625): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(1645): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_config.c(1368): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c.c(194): warning C4244: 'return': conversion from 'double' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(230): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(250): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(1530): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(1625): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(1645): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +numpy/random/_generator.cp311-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp311-win_arm64.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp311-win32.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp311-win32.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' +cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' +cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX512' +cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' +D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(52): warning C4309: 'argument': truncation of constant value +D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(53): warning C4309: 'argument': truncation of constant value +D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(65): warning C4309: 'argument': truncation of constant value +D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(139): warning C4556: + diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 2cfb98e67aba..b86543189941 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -96,7 +96,14 @@ jobs: - name: Build and install run: | - python -m pip install . -v -Ccompile-args="-j2" -Csetup-args="-Dallow-noblas=true" + python -m pip install . -v -Ccompile-args="-j2" -Csetup-args="-Dallow-noblas=true" 2>&1 | tee build.log + + - name: Check warnings + uses: ./.github/check-warnings + with: + log-file: ./build.log + allowlist: ./.github/check-warnings/msvc-allowed-warnings.txt + warning-regex: "warning C|Command line warning" - name: Install test dependencies run: | diff --git a/numpy/_core/src/common/npy_config.h b/numpy/_core/src/common/npy_config.h index 82641a85509e..ccb81ca7110b 100644 --- a/numpy/_core/src/common/npy_config.h +++ b/numpy/_core/src/common/npy_config.h @@ -1,6 +1,11 @@ #ifndef NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_ #define NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_ +#if defined(_MSC_VER) +// Suppress warn C4146: -x is valid for unsigned (wraps around) +#pragma warning(disable:4146) +#endif + #include "config.h" #include "npy_cpu_dispatch.h" // brings NPY_HAVE_[CPU features] #include "numpy/numpyconfig.h" diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 0a063f6ba07a..2828b9543fbd 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -1353,7 +1353,7 @@ static void while (n--) { @type1@ t = (@type1@)*ip++; - *op++ = t; + *op++ = (@type2@)t; #if @steps@ == 2 /* complex type */ *op++ = 0; #endif diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 9ec70acde6dd..5e622ed59e08 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -625,25 +625,26 @@ npy_fastputmask( npy_intp ni, npy_intp nv, npy_intp chunk) { if (chunk == 1) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 2) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 2) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 4) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 4) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 8) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 8) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 16) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 16) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 32) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 32) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + } + else { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index e75dd37cc465..368d299d5d01 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -764,7 +764,7 @@ npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ #elif defined(__ARM_FP16_FORMAT_IEEE) return npy_halfbits_to_doublebits(h); #else - return (double)(ToDoubleBits(h)); + return ToDoubleBits(h); #endif } #endif @@ -825,12 +825,12 @@ npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ * #type2max = 0, * UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, ULLONG_MAX, * SCHAR_MAX, SHRT_MAX, INT_MAX, LONG_MAX, LLONG_MAX, - * 65500, FLT_MAX, DBL_MAX, LDBL_MAX, + * 65500.0f, FLT_MAX, DBL_MAX, LDBL_MAX, * FLT_MAX, DBL_MAX, LDBL_MAX# * #type2min = 0, * 0, 0, 0, 0, 0, * SCHAR_MIN, SHRT_MIN, INT_MIN, LONG_MIN, LLONG_MIN, - * -65500, -FLT_MAX, -DBL_MAX, -LDBL_MAX, + * -65500.0f, -FLT_MAX, -DBL_MAX, -LDBL_MAX, * -FLT_MAX, -DBL_MAX, -LDBL_MAX# * #is_bool2 = 1, 0*17# * #is_emu_half2 = 0*11, EMULATED_FP16, 0*6# @@ -848,7 +848,10 @@ npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ #if !(NPY_USE_UNALIGNED_ACCESS && !@aligned@) -/* For emulated half types, don't use actual double/float types in conversion */ +/* + * For emulated half types, don't use actual double/float types in conversion + * except for *_check_same_value_*(), follow _ROUND_TRIP and _TO_RTYPE1. + */ #if @is_emu_half1@ || @is_emu_half2@ # if @is_float1@ @@ -896,13 +899,12 @@ npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ #elif @is_emu_half2@ # define _TO_RTYPE1(x) (@rtype1@)(x) - # if @is_float1@ # define _CONVERT_FN(x) npy_floatbits_to_halfbits(x) -# define _ROUND_TRIP(x) (@rtype1@)npy_halfbits_to_floatbits(_CONVERT_FN(x)) +# define _ROUND_TRIP(x) npy_half_to_float(npy_float_to_half(x)) # elif @is_double1@ # define _CONVERT_FN(x) npy_doublebits_to_halfbits(x) -# define _ROUND_TRIP(x) (@rtype1@)_npy_halfbits_to_doublebits(_CONVERT_FN(x)) +# define _ROUND_TRIP(x) npy_half_to_double(npy_double_to_half(x)) # elif @is_emu_half1@ # define _CONVERT_FN(x) (x) # define _ROUND_TRIP(x) (x) diff --git a/numpy/_core/src/multiarray/nditer_constr.c b/numpy/_core/src/multiarray/nditer_constr.c index a8f13a73ee1e..ffe37e80c9be 100644 --- a/numpy/_core/src/multiarray/nditer_constr.c +++ b/numpy/_core/src/multiarray/nditer_constr.c @@ -2126,11 +2126,11 @@ npyiter_find_buffering_setup(NpyIter *iter, npy_intp buffersize) break; /* Avoid a zero coresize. */ } - double bufsize = size; - if (bufsize > maximum_size && + double bufsize = (double)size; + if (size > maximum_size && (cost > 1 || !(itflags & NPY_ITFLAG_GROWINNER))) { /* If we need buffering, limit size in cost calculation. */ - bufsize = maximum_size; + bufsize = (double)maximum_size; } NPY_IT_DBG_PRINT(" dim=%d, n_buffered=%d, cost=%g @bufsize=%g (prev scaled cost=%g)\n", diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index 89b53bcb8538..f5c2025e183a 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -177,14 +177,15 @@ arena_malloc(npy_string_arena *arena, npy_string_realloc_func r, size_t size) } else if (((ARENA_EXPAND_FACTOR * arena->size) - arena->cursor) > string_storage_size) { - newsize = ARENA_EXPAND_FACTOR * arena->size; + newsize = (size_t)(ARENA_EXPAND_FACTOR * arena->size); } else { newsize = arena->size + string_storage_size; } - if ((arena->cursor + size) >= newsize) { + // If there enough room for both the payload and its header + if ((arena->cursor + string_storage_size) > newsize) { // need extra room beyond the expansion factor, leave some padding - newsize = ARENA_EXPAND_FACTOR * (arena->cursor + size); + newsize = (size_t)(ARENA_EXPAND_FACTOR * (arena->cursor + string_storage_size)); } // passing a NULL buffer to realloc is the same as malloc char *newbuf = r(arena->buffer, newsize); diff --git a/numpy/_core/src/umath/loops_autovec.dispatch.c.src b/numpy/_core/src/umath/loops_autovec.dispatch.c.src index 983fa1b5eb80..a8fb2084a241 100644 --- a/numpy/_core/src/umath/loops_autovec.dispatch.c.src +++ b/numpy/_core/src/umath/loops_autovec.dispatch.c.src @@ -51,7 +51,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_square) NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_reciprocal) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { - UNARY_LOOP_FAST(@type@, @type@, *out = 1.0 / in); + UNARY_LOOP_FAST(@type@, @type@, *out = (@type@)(1.0 / in)); } /**begin repeat1 diff --git a/numpy/_core/src/umath/loops_logical.dispatch.cpp b/numpy/_core/src/umath/loops_logical.dispatch.cpp index c05584f467aa..5c1834cc29e2 100644 --- a/numpy/_core/src/umath/loops_logical.dispatch.cpp +++ b/numpy/_core/src/umath/loops_logical.dispatch.cpp @@ -61,12 +61,12 @@ HWY_INLINE HWY_ATTR Vec simd_logical_or_u8(Vec a, Vec return byte_to_true(r); } -HWY_INLINE HWY_ATTR npy_bool simd_any_u8(Vec v) +HWY_INLINE HWY_ATTR bool simd_any_u8(Vec v) { return hn::ReduceMax(_Tag(), v) != 0; } -HWY_INLINE HWY_ATTR npy_bool simd_all_u8(Vec v) +HWY_INLINE HWY_ATTR bool simd_all_u8(Vec v) { return hn::ReduceMin(_Tag(), v) != 0; } diff --git a/numpy/_core/src/umath/loops_minmax.dispatch.c.src b/numpy/_core/src/umath/loops_minmax.dispatch.c.src index c11f391f9159..a33297ca83d5 100644 --- a/numpy/_core/src/umath/loops_minmax.dispatch.c.src +++ b/numpy/_core/src/umath/loops_minmax.dispatch.c.src @@ -123,7 +123,7 @@ simd_reduce_c_@intrin@_@sfx@(const npyv_lanetype_@sfx@ *ip, npyv_lanetype_@sfx@ npyv_@sfx@ acc = npyv_setall_@sfx@(op1[0]); for (; len >= wstep; len -= wstep, ip += wstep) { #ifdef NPY_HAVE_SSE2 - NPY_PREFETCH(ip + wstep, 0, 3); + NPY_PREFETCH((const char*)(ip + wstep), 0, 3); #endif npyv_@sfx@ v0 = npyv_load_@sfx@(ip + vstep * 0); npyv_@sfx@ v1 = npyv_load_@sfx@(ip + vstep * 1); diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index a565eee8f939..3c57d6866c56 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -826,6 +826,10 @@ typedef enum { * npy_long, npy_ulong, npy_longlong, npy_ulonglong, * npy_half, npy_float, npy_double, npy_longdouble, * npy_cfloat, npy_cdouble, npy_clongdouble# + * #scalar_type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, + * npy_long, npy_ulong, npy_longlong, npy_ulonglong, + * npy_half, npy_float, npy_double, npy_longdouble, + * npy_float, npy_double, npy_longdouble# * #c = x*14, f, , l# */ @@ -846,10 +850,10 @@ typedef enum { *result = npy_float_to_half((float)(value)) #elif defined(IS_CFLOAT) || defined(IS_CDOUBLE) || defined(IS_CLONGDOUBLE) #define CONVERT_TO_RESULT(value) \ - npy_csetreal@c@(result, value); \ + npy_csetreal@c@(result, ((@scalar_type@)(value))); \ npy_csetimag@c@(result, 0) #else - #define CONVERT_TO_RESULT(value) *result = value + #define CONVERT_TO_RESULT(value) *result = ((@type@)(value)) #endif @@ -1386,7 +1390,7 @@ static PyObject * case DEFER_TO_OTHER_KNOWN_SCALAR: Py_RETURN_NOTIMPLEMENTED; case CONVERSION_SUCCESS: - other_val = other_val_conv; /* Need a float value */ + other_val = (double)other_val_conv; /* Need a float value */ break; /* successfully extracted value we can proceed */ case OTHER_IS_UNKNOWN_OBJECT: case PROMOTION_REQUIRED: @@ -1405,12 +1409,12 @@ static PyObject * npy_clear_floatstatus_barrier((char*)&arg1); if (is_forward) { - arg1 = PyArrayScalar_VAL(a, @Name@); + arg1 = (double)PyArrayScalar_VAL(a, @Name@); arg2 = other_val; } else { arg1 = other_val; - arg2 = PyArrayScalar_VAL(b, @Name@); + arg2 = (double)PyArrayScalar_VAL(b, @Name@); } /* Note that arguments are already float64, so we can just divide */ @@ -1831,7 +1835,7 @@ static PyObject * } return @func@(@to_ctype@(npy_creal@n@(PyArrayScalar_VAL(obj, @Name@)))); #else - return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@))); + return @func@((double)(@to_ctype@(PyArrayScalar_VAL(obj, @Name@)))); #endif } /**end repeat**/ diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index dafedcbc03ff..1e7bea49a365 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -17,6 +17,12 @@ #define CHECK_OVERFLOW(index) if (buf + (index) >= after) return 0 #define MSB(val) ((val) >> 7 & 1) +#ifdef _MSC_VER +// MSVC sometimes complains (C4715: "not all control paths return a value") +// on switch statements over enum classes, even though all enum values are covered. +// This warning is suppressed here to avoid invasive changes. +# pragma warning(disable:4715) +#endif enum class ENCODING { ASCII, UTF32, UTF8 diff --git a/numpy/linalg/lapack_lite/f2c.c b/numpy/linalg/lapack_lite/f2c.c index 47e4d5729b83..9afac89e61d1 100644 --- a/numpy/linalg/lapack_lite/f2c.c +++ b/numpy/linalg/lapack_lite/f2c.c @@ -191,7 +191,7 @@ integer i_dnnt(x) doublereal *x; integer i_dnnt(doublereal *x) #endif { -return( (*x)>=0 ? +return (integer)( (*x)>=0 ? floor(*x + .5) : -floor(.5 - *x) ); } diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 2b7c69481064..8102fee72323 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -597,7 +597,7 @@ static RAND_INT_TYPE random_poisson_ptrs(bitgen_t *bitgen_state, double lam) { /* log(V) == log(0.0) ok here */ /* if U==0.0 so that us==0.0, log is ok since always returns */ if ((log(V) + log(invalpha) - log(a / (us * us) + b)) <= - (-lam + k * loglam - random_loggam(k + 1))) { + (-lam + (double)k * loglam - random_loggam((double)k + 1))) { return k; } } @@ -733,10 +733,10 @@ RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, RAND_INT_TYPE n, if (A > (t + rho)) goto Step10; - x1 = y + 1; - f1 = m + 1; - z = n + 1 - m; - w = n - y + 1; + x1 = (double)y + 1; + f1 = (double)m + 1; + z = (double)n + 1 - (double)m; + w = (double)n - (double)y + 1; x2 = x1 * x1; f2 = f1 * f1; z2 = z * z; diff --git a/numpy/random/src/distributions/logfactorial.c b/numpy/random/src/distributions/logfactorial.c index 1305164699fa..337ec1a98db5 100644 --- a/numpy/random/src/distributions/logfactorial.c +++ b/numpy/random/src/distributions/logfactorial.c @@ -154,5 +154,5 @@ double logfactorial(int64_t k) * was within 2 ULP of the best 64 bit floating point value for * k up to 10000000.) */ - return (k + 0.5)*log(k) - k + (halfln2pi + (1.0/k)*(1/12.0 - 1/(360.0*k*k))); + return (k + 0.5)*log((double)k) - k + (halfln2pi + (1.0/k)*(1/12.0 - 1/(360.0*k*k))); } diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index 385c4c239a57..e84bd19fdaee 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -469,7 +469,7 @@ int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) { void legacy_random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, double *pix, npy_intp d, binomial_t *binomial) { - return random_multinomial(bitgen_state, n, mnix, pix, d, binomial); + random_multinomial(bitgen_state, n, mnix, pix, d, binomial); } double legacy_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { From 891fa0d236fb2e9ed6de67f41f62e1713c5204fa Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sun, 5 Oct 2025 21:54:29 -0400 Subject: [PATCH 0528/1018] DOC: Remove unused arrays from the structured dtype ufunc example. (#29882) [skip actions][skip azp][skip cirrus] --- doc/source/user/c-info.ufunc-tutorial.rst | 6 ------ 1 file changed, 6 deletions(-) diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 76e8af63462f..8d8b267a6bd1 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -865,12 +865,6 @@ The C file is given below. } } - /* This a pointer to the above function */ - PyUFuncGenericFunction funcs[1] = {&add_uint64_triplet}; - - /* These are the input and return dtypes of add_uint64_triplet. */ - static const char types[3] = {NPY_UINT64, NPY_UINT64, NPY_UINT64}; - static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "struct_ufunc_test", From 3c7801da49f4c59fd088a17b81ddee2cee419872 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 6 Oct 2025 09:26:19 +0200 Subject: [PATCH 0529/1018] MAINT: Rewrite setitem to use the new API (mostly) (#29880) This has a mild speed advantage since `PyArray_Pack` had this hack in place to ensure that we have an "array" to pass on. This hack is now only used in extreme cases (i.e. user dtypes). This gives a very slight speed advantage, but mostly, I like to change this. It would be nice to change `getitem` as well, but `getitem` is trickier due to structured dtypes. I wanted to first just remove `->setitem` but one (brutal!) test failed and I reconsidered things thinking that someone might be using `descr->setitem` without an array which would be broken without keeping an explicit definition around. The 0-D unpacking path should be unimportant these days (it only applies to _nested_ arrays so the tests don't even notice it) but I kept it around for now anyway. Signed-off-by: Sebastian Berg * Small style/docs fixup based on review Signed-off-by: Sebastian Berg --------- Signed-off-by: Sebastian Berg --- numpy/_core/src/multiarray/array_coercion.c | 15 +- numpy/_core/src/multiarray/arraytypes.c.src | 276 ++++++++++---------- numpy/_core/src/multiarray/arraytypes.h.src | 2 +- numpy/_core/src/multiarray/common.c | 24 ++ numpy/_core/src/multiarray/common.h | 11 + numpy/_core/src/multiarray/dtypemeta.c | 25 +- numpy/_core/src/multiarray/dtypemeta.h | 3 +- numpy/_core/src/multiarray/scalarapi.c | 11 +- numpy/_core/src/umath/_scaled_float_dtype.c | 7 +- numpy/_core/src/umath/scalarmath.c.src | 8 +- numpy/_core/tests/test_array_coercion.py | 5 - 11 files changed, 207 insertions(+), 180 deletions(-) diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index 8271fb6812d1..2de639611bf6 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -479,20 +479,13 @@ npy_cast_raw_scalar_item( NPY_NO_EXPORT int PyArray_Pack(PyArray_Descr *descr, void *item, PyObject *value) { - PyArrayObject_fields arr_fields = { - .flags = NPY_ARRAY_WRITEABLE, /* assume array is not behaved. */ - }; - Py_SET_TYPE(&arr_fields, &PyArray_Type); - Py_SET_REFCNT(&arr_fields, 1); - if (NPY_UNLIKELY(descr->type_num == NPY_OBJECT)) { /* * We always have store objects directly, casting will lose some * type information. Any other dtype discards the type information. * TODO: For a Categorical[object] this path may be necessary? */ - arr_fields.descr = descr; - return PyDataType_GetArrFuncs(descr)->setitem(value, item, &arr_fields); + return NPY_DT_CALL_setitem(descr, value, item); } /* discover_dtype_from_pyobject includes a check for is_known_scalar_type */ @@ -527,8 +520,7 @@ PyArray_Pack(PyArray_Descr *descr, void *item, PyObject *value) if (DType == NPY_DTYPE(descr) || DType == (PyArray_DTypeMeta *)Py_None) { /* We can set the element directly (or at least will try to) */ Py_XDECREF(DType); - arr_fields.descr = descr; - return PyDataType_GetArrFuncs(descr)->setitem(value, item, &arr_fields); + return NPY_DT_CALL_setitem(descr, value, item); } PyArray_Descr *tmp_descr; tmp_descr = NPY_DT_CALL_discover_descr_from_pyobject(DType, value); @@ -546,8 +538,7 @@ PyArray_Pack(PyArray_Descr *descr, void *item, PyObject *value) if (PyDataType_FLAGCHK(tmp_descr, NPY_NEEDS_INIT)) { memset(data, 0, tmp_descr->elsize); } - arr_fields.descr = tmp_descr; - if (PyDataType_GetArrFuncs(tmp_descr)->setitem(value, data, &arr_fields) < 0) { + if (NPY_DT_CALL_setitem(tmp_descr, value, data) < 0) { PyObject_Free(data); Py_DECREF(tmp_descr); return -1; diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 2828b9543fbd..da00596e7e24 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -46,6 +46,20 @@ #include "umathmodule.h" #include "npy_static_data.h" +/**begin repeat + * #NAME = BOOL, + * BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG, + * HALF, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * DATETIME, TIMEDELTA# + */ +static inline void +@NAME@_copyswap(void *dst, void *src, int swap, void *arr); + +/**end repeat**/ + + /* * Define a stack allocated dummy array with only the minimum information set: * 1. The descr, the main field interesting here. @@ -98,7 +112,7 @@ MyPyFloat_AsDouble(PyObject *obj) } num = PyNumber_Float(obj); if (num == NULL) { - return NPY_NAN; + return -1; } ret = PyFloat_AS_DOUBLE(num); Py_DECREF(num); @@ -127,7 +141,7 @@ MyPyFloat_AsHalf(PyObject *obj) npy_half res = npy_double_to_half(d_val); if (NPY_UNLIKELY(npy_half_isinf(res) && !npy_isinf(d_val))) { if (PyUFunc_GiveFloatingpointErrors("cast", NPY_FPE_OVERFLOW) < 0) { - return npy_double_to_half(-1.); + return -1; // exception return as integer } } return res; @@ -139,10 +153,16 @@ MyPyFloat_FromHalf(npy_half h) return PyFloat_FromDouble(npy_half_to_double(h)); } -/* Handle case of assigning from an array scalar in setitem */ +/* + * Handle case of assigning from an array scalar in setitem. + * NOTE/TODO(seberg): This was important, but is now only used + * for *nested* 0-D arrays which makes it dubious whether it should + * remain used. + * (At the point of writing, I did not want to worry about BC though.) + */ static int -convert_to_scalar_and_retry(PyObject *op, void *ov, void *vap, - int (*setitem)(PyObject *op, void *ov, void *vap)) +convert_to_scalar_and_retry(PyArray_Descr *descr, PyObject *op, char *ov, + int (*setitem)(PyArray_Descr *descr, PyObject *op, char *ov)) { PyObject *temp; @@ -153,7 +173,7 @@ convert_to_scalar_and_retry(PyObject *op, void *ov, void *vap, return -1; } else { - int res = setitem(temp, ov, vap); + int res = setitem(descr, temp, ov); Py_DECREF(temp); return res; } @@ -326,9 +346,8 @@ static PyObject * } NPY_NO_EXPORT int -@TYPE@_setitem(PyObject *op, void *ov, void *vap) +@TYPE@_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; @type@ temp; /* ensures alignment */ #if @is_int@ @@ -366,28 +385,23 @@ NPY_NO_EXPORT int } else { temp = (@type@)@func2@(op); - } - if (PyErr_Occurred()) { - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); - if (PySequence_NoString_Check(op)) { - PyErr_SetString(PyExc_ValueError, - "setting an array element with a sequence."); - npy_PyErr_ChainExceptionsCause(type, value, traceback); - } - else { - PyErr_Restore(type, value, traceback); + if (temp == (@type@)-1 && PyErr_Occurred()) { + PyObject *type, *value, *traceback; + PyErr_Fetch(&type, &value, &traceback); + if (PySequence_NoString_Check(op)) { + PyErr_SetString(PyExc_ValueError, + "setting an array element with a sequence."); + npy_PyErr_ChainExceptionsCause(type, value, traceback); + } + else { + PyErr_Restore(type, value, traceback); + } + return -1; } - return -1; - } - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - assert(npy_is_aligned(ov, NPY_ALIGNOF(@type@))); - *((@type@ *)ov)=temp; - } - else { - PyDataType_GetArrFuncs(PyArray_DESCR(ap))->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), - ap); } + // Support descr == NULL for some scalarmath paths. + @TYPE@_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } @@ -433,19 +447,17 @@ static PyObject * * #suffix = f, , l# */ NPY_NO_EXPORT int -@NAME@_setitem(PyObject *op, void *ov, void *vap) +@NAME@_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; Py_complex oop; @type@ temp; - if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, @NAME@_setitem); - } - if (PyArray_IsScalar(op, @kind@)){ temp = PyArrayScalar_VAL(op, @kind@); } + else if (PyArray_IsZeroDim(op)) { + return convert_to_scalar_and_retry(descr, op, ov, @NAME@_setitem); + } else { if (op == Py_None) { oop.real = NPY_NAN; @@ -504,10 +516,8 @@ NPY_NO_EXPORT int #endif } - memcpy(ov, &temp, NPY_SIZEOF_@NAME@); - if (ap != NULL && PyArray_ISBYTESWAPPED(ap)) { - byte_swap_vector(ov, 2, sizeof(@ftype@)); - } + @NAME@_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } @@ -589,19 +599,17 @@ LONGDOUBLE_getitem(void *ip, void *ap) } NPY_NO_EXPORT int -LONGDOUBLE_setitem(PyObject *op, void *ov, void *vap) +LONGDOUBLE_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; /* ensure alignment */ npy_longdouble temp; - if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, LONGDOUBLE_setitem); - } - if (PyArray_IsScalar(op, LongDouble)) { temp = PyArrayScalar_VAL(op, LongDouble); } + else if (PyArray_IsZeroDim(op)) { + return convert_to_scalar_and_retry(descr, op, ov, LONGDOUBLE_setitem); + } else { /* In case something funny happened in PyArray_IsScalar */ if (PyErr_Occurred()) { @@ -612,13 +620,9 @@ LONGDOUBLE_setitem(PyObject *op, void *ov, void *vap) if (PyErr_Occurred()) { return -1; } - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - *((npy_longdouble *)ov) = temp; - } - else { - copy_and_swap(ov, &temp, PyArray_ITEMSIZE(ap), 1, 0, - PyArray_ISBYTESWAPPED(ap)); - } + // Support descr == NULL for scalarmath paths + LONGDOUBLE_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } @@ -664,12 +668,10 @@ UNICODE_getitem(void *ip, void *vap) } static int -UNICODE_setitem(PyObject *op, void *ov, void *vap) +UNICODE_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; - if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, UNICODE_setitem); + return convert_to_scalar_and_retry(descr, op, ov, UNICODE_setitem); } if (PySequence_NoString_Check(op)) { @@ -691,7 +693,7 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap) } /* truncate if needed */ - Py_ssize_t max_len = PyArray_ITEMSIZE(ap) >> 2; + Py_ssize_t max_len = descr->elsize >> 2; Py_ssize_t actual_len = PyUnicode_GetLength(temp); if (actual_len < 0) { Py_DECREF(temp); @@ -708,7 +710,8 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap) Py_ssize_t num_bytes = actual_len * 4; char *buffer; - if (!PyArray_ISALIGNED(ap)) { + int aligned = npy_is_aligned(ov, NPY_ALIGNOF(Py_UCS4)); + if (!aligned) { buffer = PyArray_malloc(num_bytes); if (buffer == NULL) { Py_DECREF(temp); @@ -725,16 +728,16 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap) return -1; } - if (!PyArray_ISALIGNED(ap)) { + if (!aligned) { memcpy(ov, buffer, num_bytes); PyArray_free(buffer); } /* Fill in the rest of the space with 0 */ - if (PyArray_ITEMSIZE(ap) > num_bytes) { - memset((char*)ov + num_bytes, 0, (PyArray_ITEMSIZE(ap) - num_bytes)); + if (descr->elsize > num_bytes) { + memset((char*)ov + num_bytes, 0, (descr->elsize - num_bytes)); } - if (PyArray_ISBYTESWAPPED(ap)) { + if (PyDataType_ISBYTESWAPPED(descr)) { byte_swap_vector(ov, actual_len, 4); } Py_DECREF(temp); @@ -762,15 +765,14 @@ STRING_getitem(void *ip, void *vap) } static int -STRING_setitem(PyObject *op, void *ov, void *vap) +STRING_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; char *ptr; Py_ssize_t len; PyObject *temp = NULL; if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, STRING_setitem); + return convert_to_scalar_and_retry(descr, op, ov, STRING_setitem); } if (PySequence_NoString_Check(op)) { @@ -808,13 +810,13 @@ STRING_setitem(PyObject *op, void *ov, void *vap) Py_DECREF(temp); return -1; } - memcpy(ov, ptr, PyArray_MIN(PyArray_ITEMSIZE(ap),len)); + memcpy(ov, ptr, PyArray_MIN(descr->elsize, len)); /* * If string length is smaller than room in array * Then fill the rest of the element size with NULL */ - if (PyArray_ITEMSIZE(ap) > len) { - memset((char *)ov + len, 0, (PyArray_ITEMSIZE(ap) - len)); + if (descr->elsize > len) { + memset((char *)ov + len, 0, (descr->elsize - len)); } Py_DECREF(temp); return 0; @@ -841,7 +843,7 @@ OBJECT_getitem(void *ip, void *NPY_UNUSED(ap)) static int -OBJECT_setitem(PyObject *op, void *ov, void *NPY_UNUSED(ap)) +OBJECT_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { PyObject *obj; @@ -867,11 +869,9 @@ VOID_getitem(void *input, void *vap) _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)PyArray_DESCR(vap); if (PyDataType_HASFIELDS(descr)) { - PyObject *key; PyObject *names; int i, n; PyObject *ret; - PyObject *tup; PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; @@ -882,9 +882,7 @@ VOID_getitem(void *input, void *vap) for (i = 0; i < n; i++) { npy_intp offset; PyArray_Descr *new; - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK - if (_unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field_index(descr, i, &new, &offset) < 0) { Py_DECREF(ret); return NULL; } @@ -969,14 +967,10 @@ NPY_NO_EXPORT int _setup_field(int i, _PyArray_LegacyDescr *descr, PyArrayObject *arr, npy_intp *offset_p, char *dstdata) { - PyObject *key; - PyObject *tup; PyArray_Descr *new; npy_intp offset; - key = PyTuple_GET_ITEM(descr->names, i); - tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK - if (_unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field_index(descr, i, &new, &offset) < 0) { return -1; } @@ -1029,18 +1023,17 @@ _copy_and_return_void_setitem(_PyArray_LegacyDescr *dstdescr, char *dstdata, } static int -VOID_setitem(PyObject *op, void *input, void *vap) +VOID_setitem(PyArray_Descr *descr_, PyObject *op, char *ip) { - char *ip = input; - PyArrayObject *ap = vap; - int itemsize = PyArray_ITEMSIZE(ap); + _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)descr_; + int itemsize = descr->elsize; int res; - _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)PyArray_DESCR(ap); if (PyDataType_HASFIELDS(descr)) { PyObject *errmsg; npy_int i; npy_intp offset; + PyArray_Descr *field_descr; int failed = 0; /* If op is 0d-ndarray or numpy scalar, directly get dtype & data ptr */ @@ -1073,23 +1066,18 @@ VOID_setitem(PyObject *op, void *input, void *vap) return -1; } - PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); - PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - for (i = 0; i < names_size; i++) { - PyObject *item; - - if (_setup_field(i, descr, dummy_arr, &offset, ip) == -1) { + PyObject *item = PyTuple_GetItem(op, i); + if (item == NULL) { failed = 1; break; } - item = PyTuple_GetItem(op, i); - if (item == NULL) { + if (_unpack_field_index(descr, i, &field_descr, &offset) < 0) { failed = 1; break; } /* use setitem to set this field */ - if (PyArray_SETITEM(dummy_arr, ip + offset, item) < 0) { + if (NPY_DT_CALL_setitem(field_descr, item, ip + offset) < 0) { failed = 1; break; } @@ -1099,17 +1087,13 @@ VOID_setitem(PyObject *op, void *input, void *vap) /* Otherwise must be non-void scalar. Try to assign to each field */ npy_intp names_size = PyTuple_GET_SIZE(descr->names); - PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); - PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - for (i = 0; i < names_size; i++) { - /* temporarily make ap have only this field */ - if (_setup_field(i, descr, dummy_arr, &offset, ip) == -1) { + if (_unpack_field_index(descr, i, &field_descr, &offset) < 0) { failed = 1; break; } /* use setitem to set this field */ - if (PyArray_SETITEM(dummy_arr, ip + offset, op) < 0) { + if (NPY_DT_CALL_setitem(field_descr, op, ip + offset) < 0) { failed = 1; break; } @@ -1139,7 +1123,7 @@ VOID_setitem(PyObject *op, void *input, void *vap) PyArrayObject *ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( &PyArray_Type, descr->subarray->base, shape.len, shape.ptr, NULL, ip, - PyArray_FLAGS(ap), NULL, NULL); + NPY_ARRAY_WRITEABLE, NULL, NULL); npy_free_cache_dim_obj(shape); if (!ret) { return -1; @@ -1217,15 +1201,14 @@ TIMEDELTA_getitem(void *ip, void *vap) } static int -DATETIME_setitem(PyObject *op, void *ov, void *vap) +DATETIME_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; /* ensure alignment */ npy_datetime temp = 0; PyArray_DatetimeMetaData *meta = NULL; /* Get the datetime units metadata */ - meta = get_datetime_metadata_from_dtype(PyArray_DESCR(ap)); + meta = get_datetime_metadata_from_dtype(descr); if (meta == NULL) { return -1; } @@ -1237,27 +1220,20 @@ DATETIME_setitem(PyObject *op, void *ov, void *vap) } /* Copy the value into the output */ - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - *((npy_datetime *)ov)=temp; - } - else { - PyDataType_GetArrFuncs(PyArray_DESCR(ap))->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), - ap); - } - + DATETIME_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } static int -TIMEDELTA_setitem(PyObject *op, void *ov, void *vap) +TIMEDELTA_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; /* ensure alignment */ npy_timedelta temp = 0; PyArray_DatetimeMetaData *meta = NULL; /* Get the datetime units metadata */ - meta = get_datetime_metadata_from_dtype(PyArray_DESCR(ap)); + meta = get_datetime_metadata_from_dtype(descr); if (meta == NULL) { return -1; } @@ -1268,19 +1244,39 @@ TIMEDELTA_setitem(PyObject *op, void *ov, void *vap) return -1; } - /* Copy the value into the output */ - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - *((npy_timedelta *)ov)=temp; - } - else { - PyDataType_GetArrFuncs(PyArray_DESCR(ap))->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), - ap); - } - + TIMEDELTA_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } +/**begin repeat + * + * #NAME = BOOL, + * BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG, + * HALF, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * OBJECT, STRING, UNICODE, VOID, + * DATETIME, TIMEDELTA# + */ + +/* + * Legacy fallback setitem, should be deprecated, but if anyone calls + * our setitem *without* an array (or stealing it for their dtype) + * they might need it. E.g. a NumPy 3 should probably just dump it all, though. + */ +static int +@NAME@_legacy_setitem(PyObject *value, void *data, void *vap) +{ + // Most builtins allow descr to be NULL traditionally, so assume it's OK + PyArray_Descr *descr = vap == NULL ? NULL : PyArray_DESCR((PyArrayObject *)vap); + return @NAME@_setitem(descr, value, data); +} + +/**end repeat**/ + + /* ***************************************************************************** ** TYPE TO TYPE CONVERSIONS ** @@ -1536,7 +1532,7 @@ static void if (temp == NULL) { return; } - if (@to@_setitem(temp, op, aop)) { + if (@to@_setitem(PyArray_DESCR(aop), temp, (char *)op)) { Py_DECREF(temp); return; } @@ -1584,7 +1580,7 @@ static void Py_INCREF(Py_False); temp = Py_False; } - if (@to@_setitem(temp, op, aop)) { + if (@to@_setitem(PyArray_DESCR(aop), temp, (char *)op)) { Py_DECREF(temp); return; } @@ -1949,7 +1945,7 @@ _basic_copy(void *dst, void *src, int elsize) { * npy_half, npy_float, npy_double, npy_longdouble, * npy_datetime, npy_timedelta# */ -static void +static inline void @fname@_copyswapn (void *dst, npy_intp dstride, void *src, npy_intp sstride, npy_intp n, int swap, void *NPY_UNUSED(arr)) { @@ -1960,7 +1956,7 @@ static void } } -static void +static inline void @fname@_copyswap (void *dst, void *src, int swap, void *NPY_UNUSED(arr)) { /* copy first if needed */ @@ -2042,7 +2038,7 @@ static void /* ignore swap */ } -static void +static inline void @fname@_copyswap (void *dst, void *src, int NPY_UNUSED(swap), void *NPY_UNUSED(arr)) { @@ -2075,7 +2071,7 @@ static void } } -static void +static inline void @fname@_copyswap (void *dst, void *src, int swap, void *NPY_UNUSED(arr)) { /* copy first if needed */ @@ -2220,7 +2216,7 @@ OBJECT_copyswapn(PyObject **dst, npy_intp dstride, PyObject **src, return; } -static void +static inline void OBJECT_copyswap(PyObject **dst, PyObject **src, int NPY_UNUSED(swap), void *NPY_UNUSED(arr)) { @@ -2451,7 +2447,7 @@ UNICODE_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, } -static void +static inline void STRING_copyswap(char *dst, char *src, int NPY_UNUSED(swap), PyArrayObject *arr) { assert(arr != NULL); @@ -2462,7 +2458,7 @@ STRING_copyswap(char *dst, char *src, int NPY_UNUSED(swap), PyArrayObject *arr) _basic_copy(dst, src, PyArray_ITEMSIZE(arr)); } -static void +static inline void UNICODE_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) { int itemsize; @@ -3018,9 +3014,8 @@ UNICODE_compare(npy_ucs4 *ip1, npy_ucs4 *ip2, static int VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) { - PyArray_Descr *descr; - PyObject *names, *key; - PyObject *tup; + _PyArray_LegacyDescr *descr; + PyObject *names; PyArrayObject_fields dummy_struct; PyArrayObject *dummy = (PyArrayObject *)&dummy_struct; char *nip1, *nip2; @@ -3033,18 +3028,16 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) if (mem_handler == NULL) { goto finish; } - descr = PyArray_DESCR(ap); + descr = (_PyArray_LegacyDescr *)PyArray_DESCR(ap); /* * Compare on the first-field. If equal, then * compare on the second-field, etc. */ - names = PyDataType_NAMES(descr); + names = descr->names; for (i = 0; i < PyTuple_GET_SIZE(names); i++) { PyArray_Descr *new; npy_intp offset; - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(PyDataType_FIELDS(descr), key); // noqa: borrowed-ref OK - if (_unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field_index(descr, i, &new, &offset) < 0) { goto finish; } /* Set the fields needed by compare or copyswap */ @@ -4029,7 +4022,7 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { @from@_to_VOID }, @from@_getitem, - @from@_setitem, + @from@_legacy_setitem, (PyArray_CopySwapNFunc*)@from@_copyswapn, (PyArray_CopySwapFunc*)@from@_copyswap, (PyArray_CompareFunc*)@from@_compare, @@ -4148,7 +4141,7 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { @from@_to_VOID }, @from@_getitem, - @from@_setitem, + @from@_legacy_setitem, (PyArray_CopySwapNFunc*)@from@_copyswapn, (PyArray_CopySwapFunc*)@from@_copyswap, (PyArray_CompareFunc*)@from@_compare, @@ -4594,6 +4587,7 @@ set_typeinfo(PyObject *dict) if (dtypemeta == NULL) { return -1; } + NPY_DT_SLOTS(dtypemeta)->setitem = @NAME@_setitem; NPY_DT_SLOTS(dtypemeta)->get_constant = @NAME@_get_constant; /**end repeat**/ diff --git a/numpy/_core/src/multiarray/arraytypes.h.src b/numpy/_core/src/multiarray/arraytypes.h.src index ca8dbeaa67eb..59dc836a2de5 100644 --- a/numpy/_core/src/multiarray/arraytypes.h.src +++ b/numpy/_core/src/multiarray/arraytypes.h.src @@ -44,7 +44,7 @@ small_correlate(const char * d_, npy_intp dstride, */ NPY_NO_EXPORT int -@TYPE@_setitem(PyObject *obj, void *data_ptr, void *arr); +@TYPE@_setitem(PyArray_Descr *descr, PyObject *obj, char *data_ptr); /**end repeat**/ diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 3bae4a28efba..2e9bcbf29e8f 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -328,6 +328,30 @@ _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset) return 0; } + +/** + * Unpack a field from a structured dtype. The field index must be valid. + * + * @param descr The dtype to unpack. + * @param index The index of the field to unpack. + * @param odescr will be set to the field's dtype + * @param offset will be set to the field's offset + * + * @return -1 on failure, 0 on success. + */ + NPY_NO_EXPORT int + _unpack_field_index( + _PyArray_LegacyDescr *descr, + npy_intp index, + PyArray_Descr **odescr, + npy_intp *offset) + { + PyObject *key = PyTuple_GET_ITEM(descr->names, index); + PyObject *tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK + return _unpack_field(tup, odescr, offset); + } + + /* * check whether arrays with datatype dtype might have object fields. This will * only happen for structured dtypes (which may have hidden objects even if the diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index d6b9ad36588f..db7bc64733db 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -65,12 +65,23 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending); NPY_NO_EXPORT void dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j); + /** * unpack tuple of PyDataType_FIELDS(dtype) (descr, offset, title[not-needed]) */ NPY_NO_EXPORT int _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset); +/** + * Unpack a field from a structured dtype by index. + */ +NPY_NO_EXPORT int +_unpack_field_index( + _PyArray_LegacyDescr *descr, + npy_intp index, + PyArray_Descr **odescr, + npy_intp *offset); + /* * check whether arrays with datatype dtype might have object fields. This will * only happen for structured dtypes (which may have hidden objects even if the diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 3f569cc3ccec..692c7cfc2e0a 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -129,6 +129,20 @@ default_get_constant(PyArray_Descr *descr, int constant_id, void *data) } +static int +legacy_fallback_setitem(PyArray_Descr *descr, PyObject *value, char *data) +{ + PyArrayObject_fields arr_fields = { + .flags = NPY_ARRAY_WRITEABLE, /* assume array is not behaved. */ + .descr = descr, + }; + Py_SET_TYPE(&arr_fields, &PyArray_Type); + Py_SET_REFCNT(&arr_fields, 1); + + return PyDataType_GetArrFuncs(descr)->setitem(value, data, &arr_fields); +} + + static int legacy_setitem_using_DType(PyObject *obj, void *data, void *arr) { @@ -138,9 +152,7 @@ legacy_setitem_using_DType(PyObject *obj, void *data, void *arr) "supported for basic NumPy DTypes."); return -1; } - PyArrayDTypeMeta_SetItem *setitem; - setitem = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(arr)))->setitem; - return setitem(PyArray_DESCR(arr), obj, data); + return NPY_DT_CALL_setitem(PyArray_DESCR(arr), obj, data); } @@ -1189,7 +1201,12 @@ dtypemeta_wrap_legacy_descriptor( dt_slots->ensure_canonical = ensure_native_byteorder; dt_slots->get_fill_zero_loop = NULL; dt_slots->finalize_descr = NULL; - dt_slots->setitem = NULL; + // May be overwritten, but if not provide fallback via array struct hack. + // `getitem` is a trickier because of structured dtypes returning views. + if (dt_slots->f.setitem == NULL) { + dt_slots->f.setitem = legacy_setitem_using_DType; + } + dt_slots->setitem = legacy_fallback_setitem; dt_slots->getitem = NULL; if (PyTypeNum_ISSIGNED(dtype_class->type_num)) { diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index 202116a4cc91..d633af4e7b84 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -288,8 +288,7 @@ PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) static inline int PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) { - return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->setitem( - v, itemptr, arr); + return NPY_DT_CALL_setitem(PyArray_DESCR(arr), v, itemptr); } // Like PyArray_DESCR_REPLACE, but calls ensure_canonical instead of DescrNew diff --git a/numpy/_core/src/multiarray/scalarapi.c b/numpy/_core/src/multiarray/scalarapi.c index e133b46d008a..7d65d972998d 100644 --- a/numpy/_core/src/multiarray/scalarapi.c +++ b/numpy/_core/src/multiarray/scalarapi.c @@ -519,15 +519,10 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) if (buff == NULL) { return PyErr_NoMemory(); } - /* copyswap needs an array object, but only actually cares about the - * dtype - */ - PyArrayObject_fields dummy_arr; - if (base == NULL) { - dummy_arr.descr = descr; - base = (PyObject *)&dummy_arr; + memcpy(buff, data, itemsize); + if (swap) { + byte_swap_vector(buff, itemsize / 4, 4); } - copyswap(buff, data, swap, base); /* truncation occurs here */ PyObject *u = PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, buff, itemsize / 4); diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index fbdbbb8d2375..06f18b5c7259 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -107,7 +107,7 @@ sfloat_getitem(char *data, PyArrayObject *arr) static int -sfloat_setitem(PyObject *obj, char *data, PyArrayObject *arr) +sfloat_setitem(PyArray_Descr *descr_, PyObject *obj, char *data) { if (!PyFloat_CheckExact(obj)) { PyErr_SetString(PyExc_NotImplementedError, @@ -115,7 +115,7 @@ sfloat_setitem(PyObject *obj, char *data, PyArrayObject *arr) return -1; } - PyArray_SFloatDescr *descr = (PyArray_SFloatDescr *)PyArray_DESCR(arr); + PyArray_SFloatDescr *descr = (PyArray_SFloatDescr *)descr_; double value = PyFloat_AsDouble(obj); value /= descr->scaling; @@ -131,9 +131,10 @@ NPY_DType_Slots sfloat_slots = { .default_descr = &sfloat_default_descr, .common_dtype = &sfloat_common_dtype, .common_instance = &sfloat_common_instance, + .setitem = &sfloat_setitem, .f = { .getitem = (PyArray_GetItemFunc *)&sfloat_getitem, - .setitem = (PyArray_SetItemFunc *)&sfloat_setitem, + .setitem = NULL, } }; diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index 3c57d6866c56..e2d7c22f5deb 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -1246,7 +1246,7 @@ static PyObject * */ return PyGenericArrType_Type.tp_as_number->nb_@oper@(a,b); case CONVERT_PYSCALAR: - if (@NAME@_setitem(other, (char *)&other_val, NULL) < 0) { + if (@NAME@_setitem(NULL, other, (char *)&other_val) < 0) { return NULL; } break; @@ -1397,7 +1397,7 @@ static PyObject * return PyGenericArrType_Type.tp_as_number->nb_true_divide(a,b); case CONVERT_PYSCALAR: /* This is the special behavior, convert to float64 directly */ - if (DOUBLE_setitem(other, (char *)&other_val, NULL) < 0) { + if (DOUBLE_setitem(NULL, other, (char *)&other_val) < 0) { return NULL; } break; @@ -1516,7 +1516,7 @@ static PyObject * case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_as_number->nb_power(a, b, modulo); case CONVERT_PYSCALAR: - if (@NAME@_setitem(other, (char *)&other_val, NULL) < 0) { + if (@NAME@_setitem(NULL, other, (char *)&other_val) < 0) { return NULL; } break; @@ -1924,7 +1924,7 @@ static PyObject* case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_richcompare(self, other, cmp_op); case CONVERT_PYSCALAR: - if (@NAME@_setitem(other, (char *)&arg2, NULL) < 0) { + if (@NAME@_setitem(NULL, other, (char *)&arg2) < 0) { return NULL; } break; diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index a3939daa8904..658c672d5f99 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -266,11 +266,6 @@ def test_scalar_coercion(self, scalar): # Ensure we have a full-precision number if available scalar = type(scalar)((scalar * 2)**0.5) - if type(scalar) is rational: - # Rational generally fails due to a missing cast. In the future - # object casts should automatically be defined based on `setitem`. - pytest.xfail("Rational to object cast is undefined currently.") - # Use casting from object: arr = np.array(scalar, dtype=object).astype(scalar.dtype) From f2dcdb7d3fc972bab47dea530d755bdab055068f Mon Sep 17 00:00:00 2001 From: "hpkfft.com" Date: Mon, 6 Oct 2025 04:07:07 -0700 Subject: [PATCH 0530/1018] PERF: Intern strings used to build global tuples. (#29875) Intern strings of dlpack kwname tuples. Otherwise they may not be interned and typical argument parsing (e.g. including ours) misses the parsing fast-paths, which makes things fairly significantly slower. Our kwarg parsing basically assumes that kwargs are practically always interned strings. This is probably true except in a weird case like this. --- numpy/_core/src/multiarray/npy_static_data.c | 10 ++++++++-- numpy/_core/src/multiarray/npy_static_data.h | 3 +++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 62e1fd3c1b15..9fd321a375a2 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -64,6 +64,9 @@ intern_strings(void) INTERN_STRING(pyvals_name, "UFUNC_PYVALS_NAME"); INTERN_STRING(legacy, "legacy"); INTERN_STRING(__doc__, "__doc__"); + INTERN_STRING(copy, "copy"); + INTERN_STRING(dl_device, "dl_device"); + INTERN_STRING(max_version, "max_version"); return 0; } @@ -169,7 +172,8 @@ initialize_static_globals(void) return -1; } - npy_static_pydata.kwnames_is_copy = Py_BuildValue("(s)", "copy"); + npy_static_pydata.kwnames_is_copy = + Py_BuildValue("(O)", npy_interned_str.copy); if (npy_static_pydata.kwnames_is_copy == NULL) { return -1; } @@ -185,7 +189,9 @@ initialize_static_globals(void) } npy_static_pydata.dl_call_kwnames = - Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); + Py_BuildValue("(OOO)", npy_interned_str.dl_device, + npy_interned_str.copy, + npy_interned_str.max_version); if (npy_static_pydata.dl_call_kwnames == NULL) { return -1; } diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index 287dc80e4c1f..a6901c858374 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -43,6 +43,9 @@ typedef struct npy_interned_str_struct { PyObject *pyvals_name; PyObject *legacy; PyObject *__doc__; + PyObject *copy; + PyObject *dl_device; + PyObject *max_version; } npy_interned_str_struct; /* From a07a8e7d892be2292333331b9b8362b20f609152 Mon Sep 17 00:00:00 2001 From: Sandro Date: Thu, 25 Sep 2025 20:13:33 +0200 Subject: [PATCH 0531/1018] DOC: Update docstring for `count_nonzero` The return type of the function was changed in 2.3.0, but the docstring remained the same (gh-28774). --- numpy/_core/numeric.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index ec2cbf6dd7fc..b050b81834c6 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -523,11 +523,11 @@ def count_nonzero(a, axis=None, *, keepdims=False): -------- >>> import numpy as np >>> np.count_nonzero(np.eye(4)) - 4 + np.int64(4) >>> a = np.array([[0, 1, 7, 0], ... [3, 0, 2, 19]]) >>> np.count_nonzero(a) - 5 + np.int64(5) >>> np.count_nonzero(a, axis=0) array([1, 1, 2, 1]) >>> np.count_nonzero(a, axis=1) From aa45f1aecd106cbbe1b834b7f2622aee9273a18c Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Tue, 7 Oct 2025 04:49:15 -0400 Subject: [PATCH 0532/1018] ENH, API: New sorting slots for DType API (#29737) * ENH, API: New sorting slots for DType API * DOC: Add sorting context structures and functions to DType API documentation * DOC: Fix type signature syntax * DOC: Update data pointer type in sorting function signatures * DOC: Fix incorrect argsort function name * DOC: Fix formatting of function signature for PyArray_SortFuncWithContext * ENH: Initialize moving to an arraymethod table for user dtype sorts * ENH: Initialize PyArrayMethod_Context to zero in sorting functions * ENH: Add stable sort option to context in user sorts * DOC: Remove deprecated references from DType API sorting documentation * ENH: Refactor sorting parameters to use NPY_SORTKIND in PyArrayMethod_SortParameters * ENH: Remove unnecessary blank line in NPY_DType_Slots structure * ENH: Simplify axis check in PyArray_ArgSort * REF: Simplify PyArray_(Arg)Sort loop discovery logic * BUG: Add missing breaks in loop discovery in sorts * BUG: Simplify context initialization in PyArray_Sort and PyArray_ArgSort * BUG: Fix dangling pointer to context in PyArray_(Arg)Sort * BUG: Fix dangling context pointer in PyArray_(Arg)Sort * REF: Update sorting implementation to use new generalized array method system * ENH: Use dtype slots for sort and argsort arraymethods * STYLE: Add back blank line in methods.h * REF: Simplify conditional logic in PyArray_Sort and PyArray_ArgSort * BUG: Add error handling for strided loop retrieval in PyArray_(Arg)Sort * ENH: Fill out context and strides in sort strided loop calls * ENH: Add correct number of descriptors and strides in sorting loops * BUG: Correct descriptors and strides in argsort implementation * ENH: If-case parameters field in PyArrayMethod_Context * REF: Initialize PyArrayMethod_Context structure directly for sorting functions * ENH: Pass auxdata to dtype sorts * ENH: Update stride calculation for sorting to use item size * ENH: Refactor context initialization and descriptor resolution in sorting functions * ENH: Implement sorting and argsorting methods for scaled float dtype * DOC: Add comment to clarify that we ignore `view_offset` * ENH: Free auxdata and decref descriptors in sorting functions * DOC: Add comments to clarify that method flags are ignored in sorting functions * ENH: Add scaled float tests for sorting and argsorting with scaled and non-contiguous arrays * TST: Update scaled float argsort test to stride array * STYLE: Fix formatting in sfloat test_sort * BUG: Update sorting functions to check for sort method in cleanup instead of strided loop * REF: Fix scope for sort contexts * REF: Initialize sorting context only if taken * DOC: Add parameters member to PyArrayMethod_Context for loop parameterization * BUG: Move declaration of loop_descrs array in PyArray_ArgSort * DOC: Add comment clarifying arraymethod context parameters * DOC: Clarify description of parameters member in PyArrayMethod_Context * REF: Use existing variable to simplify NPY_DTYPE macro * BUG: Add missing output decref in PyArray_ArgSort * BUG: Add assertions to validate parameters in sorting functions of scaled float dtype * REF: Simplify argument passing by removing unnecessary dimensions and strides in casting functions * BUG: Fix castinfo changes * TST: assert exact stride values in sfloat sorts Co-authored-by: Sebastian Berg * TST: assert exact stride values in sfloat sorts Co-authored-by: Sebastian Berg * DOC: move versionchanged in ArrayMethod_Context Co-authored-by: Sebastian Berg * BUG: Fix stride initialization in _new_sortlike and ensure proper decref in PyArray_ArgSort * BUG: Correct dimension and stride initializations in sorting * BUG: Fix strides initialization in sorting Co-authored-by: Sebastian Berg * DOC: clarify function level in parameters for PyArrayMethod_Context Co-authored-by: Sebastian Berg * REF: initialize slots in-line for sfloat sorts * BUG: Remove incorrect dimension assertions in sorting loops * BUG: Correct size to sizeof * STYLE: revert newline deletion * STYLE: remove trailing whitespace * STYLE: revert delete newline * BUG: Fix incorrect stride assignment in _new_sortlike function * STYLE: Remove unnecessary whitespace * DOC: Improve comment Co-authored-by: Sebastian Berg * REF: Remove first branch in resolve_descriptors for sfloat sorts * TST: Add reverse sorting test for non-aligned arrays in sfloat sort tests * DOC: Add TODO comments for future registration method in sfloat_init_sort * ENH: Perform descriptor handling in sorting array method for byte order * DOC: Remove redundant versionchanged note in types-and-structures.rst * TST: Add stable and unstable sorting tests for sfloat arrays * ENH: Update stride assertions in sfloat_default_sort_loop for improved data validation * ENH: Add stride assertions in sfloat_default_argsort_loop for improved data validation * ENH: Change descriptor handling in sfloat_sort_resolve_descriptors and sfloat_argsort_resolve_descriptors for improved byte order management * ENH: Manage reference counting for argsort method in sfloat_init_sort to prevent memory leaks * ENH: Add assertion in sfloat_sort_resolve_descriptors to validate descriptor consistency * ENH: Add assertion in sfloat_argsort_resolve_descriptors to validate second descriptor type * ENH: Move error handling to sfloat_sort_get_loop and sfloat_argsort_get_loop for unsupported sort kinds * ENH: Manage reference counting for sort method in sfloat_init_sort to prevent memory leaks * REF: Clarify stride calculation in PyArray_Sort and PyArray_ArgSort * ENH: Add method flag handling in sorting methods to check for GIL * REF: Simplify descr element size retrieval in PyArray_Sort and PyArray_ArgSort * BUG: Replace NULL return with goto fail in PyArray_ArgSort for error handling * REF: Assert instead of handling non-native byteorder in sfloat sorts * BUG: Initialize return value to NULL in PyArray_ArgSort for error handling * REF: Remove descr-specific thread release in favor of pyapi flag in sorting methods * BUG: Fix needcopy logic in _new_sortlike and _new_argsortlike functions * BUG: Remove unnecessary NULL check for odescr in PyArray_ArgSort --------- Co-authored-by: Sebastian Berg --- .../reference/c-api/types-and-structures.rst | 10 + numpy/_core/include/numpy/dtype_api.h | 11 + numpy/_core/src/multiarray/dtypemeta.h | 6 + numpy/_core/src/multiarray/item_selection.c | 327 +++++++++++++----- numpy/_core/src/umath/_scaled_float_dtype.c | 234 +++++++++++++ numpy/_core/tests/test_custom_dtypes.py | 72 ++++ 6 files changed, 579 insertions(+), 81 deletions(-) diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index fb8efb9c8766..49704913037b 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -728,6 +728,7 @@ PyArrayMethod_Context and PyArrayMethod_Spec PyObject *caller; struct PyArrayMethodObject_tag *method; PyArray_Descr *const *descriptors; + void *parameters; } PyArrayMethod_Context .. c:member:: PyObject *caller @@ -744,6 +745,15 @@ PyArrayMethod_Context and PyArrayMethod_Spec An array of descriptors for the ufunc loop, filled in by ``resolve_descriptors``. The length of the array is ``nin`` + ``nout``. + .. c:member:: void *parameters + + A pointer to a structure containing any runtime parameters needed by the + loop. This is ``NULL`` if no parameters are needed. The type of the + struct is specific to the registered function. + + .. versionchanged:: NumPy 2.4 + The `parameters` member was added in NumPy 2.4. + .. c:type:: PyArrayMethod_Spec A struct used to register an ArrayMethod with NumPy. We use the slots diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index cc61a826c103..2222ff342253 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -119,6 +119,13 @@ typedef struct PyArrayMethod_Context_tag { * NPY_ARRAYMETHOD_CONTEXT_FLAGS */ uint64_t flags; + + /* + * Optional run-time parameters to pass to the loop (currently used in sorting). + * Fixed parameters are expected to be passed via auxdata. + */ + void *parameters; + /* Structure may grow (this is harmless for DType authors) */ #endif } PyArrayMethod_Context; @@ -526,4 +533,8 @@ typedef int (PyArrayDTypeMeta_GetConstant)(PyArray_Descr *descr, int ID, void *d typedef int(PyArrayDTypeMeta_SetItem)(PyArray_Descr *, PyObject *, char *); typedef PyObject *(PyArrayDTypeMeta_GetItem)(PyArray_Descr *, char *); +typedef struct { + NPY_SORTKIND flags; +} PyArrayMethod_SortParameters; + #endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */ diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index d633af4e7b84..bf0acb48b899 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -90,6 +90,12 @@ typedef struct { * dtype instance for backward compatibility. (Keep this at end) */ PyArray_ArrFuncs f; + + /* + * Hidden slots for the sort and argsort arraymethods. + */ + PyArrayMethodObject *sort_meth; + PyArrayMethodObject *argsort_meth; } NPY_DType_Slots; // This must be updated if new slots before within_dtype_castingimpl diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 5e622ed59e08..a50b8c49c3fa 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -25,6 +25,7 @@ #include "lowlevel_strided_loops.h" #include "array_assign.h" #include "refcount.h" +#include "methods.h" #include "npy_sort.h" #include "npy_partition.h" @@ -1194,6 +1195,8 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, */ static int _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, + PyArrayMethod_StridedLoop *strided_loop, PyArrayMethod_Context *context, + NpyAuxData *auxdata, NPY_ARRAYMETHOD_FLAGS *method_flags, PyArray_PartitionFunc *part, npy_intp const *kth, npy_intp nkth) { npy_intp N = PyArray_DIM(op, axis); @@ -1201,8 +1204,8 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, npy_intp astride = PyArray_STRIDE(op, axis); int swap = PyArray_ISBYTESWAPPED(op); int is_aligned = IsAligned(op); - int needcopy = !is_aligned || swap || astride != elsize; - int needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + int needcopy = 0; + int needs_api; char *buffer = NULL; @@ -1223,6 +1226,13 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, if (N <= 1 || PyArray_SIZE(op) == 0) { return 0; } + + if (method_flags != NULL) { + needs_api = *method_flags & NPY_METH_REQUIRES_PYAPI; + } + else { + needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + } PyObject *mem_handler = PyDataMem_GetHandler(); if (mem_handler == NULL) { @@ -1235,6 +1245,26 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, } size = it->size; + if (strided_loop != NULL) { + // Descriptors have already been resolved + odescr = context->descriptors[0]; + Py_INCREF(odescr); + } + else { + if (swap) { + odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); + } + else { + odescr = descr; + Py_INCREF(odescr); + } + } + + needcopy = !is_aligned || astride != elsize; + if (!PyArray_EquivTypes(descr, odescr)) { + needcopy = 1; + } + if (needcopy) { buffer = PyDataMem_UserNEW(N * elsize, mem_handler); if (buffer == NULL) { @@ -1245,14 +1275,6 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, memset(buffer, 0, N * elsize); } - if (swap) { - odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); - } - else { - odescr = descr; - Py_INCREF(odescr); - } - NPY_ARRAYMETHOD_FLAGS to_transfer_flags; if (PyArray_GetDTypeTransferFunction( @@ -1270,7 +1292,9 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, } } - NPY_BEGIN_THREADS_DESCR(descr); + if (!needs_api) { + NPY_BEGIN_THREADS; + } while (size--) { char *bufptr = it->dataptr; @@ -1295,7 +1319,14 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, */ if (part == NULL) { - ret = sort(bufptr, N, op); + if (strided_loop != NULL) { + char *const data[2] = {bufptr, bufptr}; + npy_intp strides[2] = {elsize, elsize}; + ret = strided_loop(context, data, &N, strides, NULL); + } + else { + ret = sort(bufptr, N, op); + } if (needs_api && PyErr_Occurred()) { ret = -1; } @@ -1333,7 +1364,9 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, } fail: - NPY_END_THREADS_DESCR(descr); + if (!needs_api) { + NPY_END_THREADS; + } /* cleanup internal buffer */ if (needcopy) { PyArray_ClearBuffer(odescr, buffer, elsize, N, 1); @@ -1361,16 +1394,17 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, static PyObject* _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, - PyArray_ArgPartitionFunc *argpart, - npy_intp const *kth, npy_intp nkth) + PyArrayMethod_StridedLoop *strided_loop, PyArrayMethod_Context *context, + NpyAuxData *auxdata, NPY_ARRAYMETHOD_FLAGS *method_flags, + PyArray_ArgPartitionFunc *argpart, npy_intp const *kth, npy_intp nkth) { npy_intp N = PyArray_DIM(op, axis); npy_intp elsize = (npy_intp)PyArray_ITEMSIZE(op); npy_intp astride = PyArray_STRIDE(op, axis); int swap = PyArray_ISBYTESWAPPED(op); int is_aligned = IsAligned(op); - int needcopy = !is_aligned || swap || astride != elsize; - int needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + int needcopy = 0; + int needs_api; int needidxbuffer; char *valbuffer = NULL; @@ -1407,6 +1441,13 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, rstride = PyArray_STRIDE(rop, axis); needidxbuffer = rstride != sizeof(npy_intp); + if (method_flags != NULL) { + needs_api = *method_flags & NPY_METH_REQUIRES_PYAPI; + } + else { + needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + } + /* Check if there is any argsorting to do */ if (N <= 1 || PyArray_SIZE(op) == 0) { Py_DECREF(mem_handler); @@ -1422,6 +1463,26 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } size = it->size; + if (strided_loop != NULL) { + // Descriptors have already been resolved + odescr = context->descriptors[0]; + Py_INCREF(odescr); + } + else { + if (swap) { + odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); + } + else { + odescr = descr; + Py_INCREF(odescr); + } + } + + needcopy = !is_aligned || astride != elsize; + if (!PyArray_EquivTypes(descr, odescr)) { + needcopy = 1; + } + if (needcopy) { valbuffer = PyDataMem_UserNEW(N * elsize, mem_handler); if (valbuffer == NULL) { @@ -1432,14 +1493,6 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, memset(valbuffer, 0, N * elsize); } - if (swap) { - odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); - } - else { - odescr = descr; - Py_INCREF(odescr); - } - if (PyArray_GetDTypeTransferFunction( is_aligned, astride, elsize, descr, odescr, 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { @@ -1456,7 +1509,9 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } } - NPY_BEGIN_THREADS_DESCR(descr); + if (!needs_api) { + NPY_BEGIN_THREADS; + } while (size--) { char *valptr = it->dataptr; @@ -1485,7 +1540,14 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } if (argpart == NULL) { - ret = argsort(valptr, idxptr, N, op); + if (strided_loop != NULL) { + char *const data[2] = {valptr, (char *)idxptr}; + npy_intp strides[2] = {elsize, sizeof(npy_intp)}; + ret = strided_loop(context, data, &N, strides, NULL); + } + else { + ret = argsort(valptr, idxptr, N, op); + } /* Object comparisons may raise an exception */ if (needs_api && PyErr_Occurred()) { ret = -1; @@ -1525,7 +1587,9 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } fail: - NPY_END_THREADS_DESCR(descr); + if (!needs_api) { + NPY_END_THREADS; + } /* cleanup internal buffers */ if (needcopy) { PyArray_ClearBuffer(odescr, valbuffer, elsize, N, 1); @@ -1654,7 +1718,7 @@ PyArray_Partition(PyArrayObject *op, PyArrayObject * ktharray, int axis, return -1; } - ret = _new_sortlike(op, axis, sort, part, + ret = _new_sortlike(op, axis, sort, NULL, NULL, NULL, NULL, part, PyArray_DATA(kthrvl), PyArray_SIZE(kthrvl)); Py_DECREF(kthrvl); @@ -1710,7 +1774,7 @@ PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int axis, return NULL; } - ret = _new_argsortlike(op2, axis, argsort, argpart, + ret = _new_argsortlike(op2, axis, argsort, NULL, NULL, NULL, NULL, argpart, PyArray_DATA(kthrvl), PyArray_SIZE(kthrvl)); Py_DECREF(kthrvl); @@ -3072,9 +3136,19 @@ static PyArray_SortFunc* const generic_sort_table[] = {npy_quicksort, NPY_NO_EXPORT int PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) { + PyArrayMethodObject *sort_method = NULL; + PyArrayMethod_StridedLoop *strided_loop = NULL; + PyArrayMethod_SortParameters sort_params = {.flags = flags}; + PyArrayMethod_Context context = {0}; + PyArray_Descr *loop_descrs[2]; + NpyAuxData *auxdata = NULL; + NPY_ARRAYMETHOD_FLAGS *method_flags = NULL; + PyArray_SortFunc **sort_table = NULL; PyArray_SortFunc *sort = NULL; + int ret; + if (check_and_adjust_axis(&axis, PyArray_NDIM(op)) < 0) { return -1; } @@ -3086,43 +3160,84 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) // Zero the NPY_HEAPSORT bit, maps NPY_HEAPSORT to NPY_QUICKSORT flags &= ~_NPY_SORT_HEAPSORT; - sort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->sort; - switch (flags) { - case NPY_SORT_DEFAULT: - sort = sort_table[NPY_QUICKSORT]; - break; - case NPY_SORT_STABLE: - sort = sort_table[NPY_STABLESORT]; - break; - default: - break; - } - - // Look for appropriate generic function if no type specific version - if (sort == NULL) { - if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { - PyErr_SetString(PyExc_TypeError, - "type does not have compare function"); + // Look for type specific functions + sort_method = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(op)))->sort_meth; + if (sort_method != NULL) { + PyArray_Descr *descr = PyArray_DESCR(op); + PyArray_DTypeMeta *dt = NPY_DTYPE(descr); + + PyArray_DTypeMeta *dtypes[2] = {dt, dt}; + PyArray_Descr *given_descrs[2] = {descr, descr}; + // Sort cannot be a view, so view_offset is unused + npy_intp view_offset = 0; + + if (sort_method->resolve_descriptors( + sort_method, dtypes, given_descrs, loop_descrs, &view_offset) < 0) { + PyErr_SetString(PyExc_RuntimeError, + "unable to resolve descriptors for sort"); return -1; } + context.descriptors = loop_descrs; + context.parameters = &sort_params; + + // Arrays are always contiguous for sorting + npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; + + if (sort_method->get_strided_loop( + &context, 1, 0, strides, &strided_loop, &auxdata, method_flags) < 0) { + ret = -1; + goto fail; + } + } + else { + sort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->sort; switch (flags) { case NPY_SORT_DEFAULT: - sort = generic_sort_table[NPY_QUICKSORT]; + sort = sort_table[NPY_QUICKSORT]; break; case NPY_SORT_STABLE: - sort = generic_sort_table[NPY_STABLESORT]; + sort = sort_table[NPY_STABLESORT]; break; default: break; } + + // Look for appropriate generic function if no type specific version + if (sort == NULL) { + if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { + PyErr_SetString(PyExc_TypeError, + "type does not have compare function"); + return -1; + } + switch (flags) { + case NPY_SORT_DEFAULT: + sort = generic_sort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + sort = generic_sort_table[NPY_STABLESORT]; + break; + default: + break; + } + } + + if (sort == NULL) { + PyErr_SetString(PyExc_TypeError, + "no current sort function meets the requirements"); + return -1; + } } - if (sort == NULL) { - PyErr_SetString(PyExc_TypeError, - "no current sort function meets the requirements"); - return -1; + ret = _new_sortlike(op, axis, sort, strided_loop, + &context, auxdata, method_flags, NULL, NULL, 0); + +fail: + if (sort_method != NULL) { + NPY_AUXDATA_FREE(auxdata); + Py_DECREF(context.descriptors[0]); + Py_DECREF(context.descriptors[1]); } - return _new_sortlike(op, axis, sort, NULL, NULL, 0); + return ret; } /* Table of generic argsort function for use by PyArray_ArgSortEx */ @@ -3138,6 +3253,14 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) { PyArrayObject *op2; PyObject *ret; + PyArrayMethodObject *argsort_method = NULL; + PyArrayMethod_StridedLoop *strided_loop = NULL; + PyArrayMethod_SortParameters sort_params = {.flags = flags}; + PyArrayMethod_Context context = {0}; + PyArray_Descr *loop_descrs[2]; + NpyAuxData *auxdata = NULL; + NPY_ARRAYMETHOD_FLAGS *method_flags = NULL; + PyArray_ArgSortFunc **argsort_table = NULL; PyArray_ArgSortFunc *argsort = NULL; @@ -3145,51 +3268,93 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) flags &= ~_NPY_SORT_HEAPSORT; // Look for type specific functions - argsort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->argsort; - switch (flags) { - case NPY_SORT_DEFAULT: - argsort = argsort_table[NPY_QUICKSORT]; - break; - case NPY_SORT_STABLE: - argsort = argsort_table[NPY_STABLESORT]; - break; - default: - break; - } - - // Look for generic function if no type specific version - if (argsort == NULL) { - if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { - PyErr_SetString(PyExc_TypeError, - "type does not have compare function"); + argsort_method = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(op)))->argsort_meth; + if (argsort_method != NULL) { + PyArray_Descr *descr = PyArray_DESCR(op); + PyArray_Descr *odescr = PyArray_DescrFromType(NPY_INTP); + PyArray_DTypeMeta *dt = NPY_DTYPE(descr); + PyArray_DTypeMeta *odt = NPY_DTYPE(odescr); + + PyArray_DTypeMeta *dtypes[2] = {dt, odt}; + PyArray_Descr *given_descrs[2] = {descr, odescr}; + // we can ignore the view_offset for sorting + npy_intp view_offset = 0; + + int resolve_ret = argsort_method->resolve_descriptors( + argsort_method, dtypes, given_descrs, loop_descrs, &view_offset); + Py_DECREF(odescr); + if (resolve_ret < 0) { + PyErr_SetString(PyExc_RuntimeError, + "unable to resolve descriptors for argsort"); return NULL; } + context.descriptors = loop_descrs; + context.parameters = &sort_params; + + // Arrays are always contiguous for sorting + npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; + + if (argsort_method->get_strided_loop( + &context, 1, 0, strides, &strided_loop, &auxdata, method_flags) < 0) { + ret = NULL; + goto fail; + } + } + else { + argsort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->argsort; switch (flags) { case NPY_SORT_DEFAULT: - argsort = generic_argsort_table[NPY_QUICKSORT]; + argsort = argsort_table[NPY_QUICKSORT]; break; case NPY_SORT_STABLE: - argsort = generic_argsort_table[NPY_STABLESORT]; + argsort = argsort_table[NPY_STABLESORT]; break; default: break; } - } - if (argsort == NULL) { - PyErr_SetString(PyExc_TypeError, - "no current argsort function meets the requirements"); - return NULL; + // Look for generic function if no type specific version + if (argsort == NULL) { + if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { + PyErr_SetString(PyExc_TypeError, + "type does not have compare function"); + return NULL; + } + switch (flags) { + case NPY_SORT_DEFAULT: + argsort = generic_argsort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + argsort = generic_argsort_table[NPY_STABLESORT]; + break; + default: + break; + } + } + + if (argsort == NULL) { + PyErr_SetString(PyExc_TypeError, + "no current argsort function meets the requirements"); + return NULL; + } } op2 = (PyArrayObject *)PyArray_CheckAxis(op, &axis, 0); if (op2 == NULL) { - return NULL; + ret = NULL; + goto fail; } - ret = _new_argsortlike(op2, axis, argsort, NULL, NULL, 0); - + ret = _new_argsortlike(op2, axis, argsort, strided_loop, + &context, auxdata, method_flags, NULL, NULL, 0); Py_DECREF(op2); + +fail: + if (argsort_method != NULL) { + NPY_AUXDATA_FREE(auxdata); + Py_DECREF(context.descriptors[0]); + Py_DECREF(context.descriptors[1]); + } return ret; } diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 06f18b5c7259..463ccbffae0b 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -21,6 +21,7 @@ #include "array_method.h" #include "common.h" #include "numpy/npy_math.h" +#include "npy_sort.h" #include "convert_datatype.h" #include "dtypemeta.h" #include "dispatching.h" @@ -862,6 +863,235 @@ sfloat_init_ufuncs(void) { } +NPY_NO_EXPORT int +sfloat_stable_sort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + assert(data[0] == data[1]); + assert(strides[0] == sizeof(npy_float64) && strides[1] == sizeof(npy_float64)); + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + assert(parameters->flags == NPY_SORT_STABLE); + + npy_intp N = dimensions[0]; + char *in = data[0]; + + return timsort_double(in, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_default_sort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + assert(data[0] == data[1]); + assert(strides[0] == sizeof(npy_float64) && strides[1] == sizeof(npy_float64)); + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + assert(parameters->flags == NPY_SORT_DEFAULT); + + npy_intp N = dimensions[0]; + char *in = data[0]; + + return quicksort_double(in, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_sort_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + + if (PyDataType_FLAGCHK(context->descriptors[0], NPY_NEEDS_PYAPI)) { + *flags |= NPY_METH_REQUIRES_PYAPI; + } + + if (parameters->flags == NPY_SORT_STABLE) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_stable_sort_loop; + } + else if (parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_default_sort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + + +static NPY_CASTING +sfloat_sort_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), + PyArray_Descr *given_descrs[2], + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) +{ + assert(!(given_descrs[1] != given_descrs[0] && given_descrs[1] != NULL)); + assert(PyArray_IsNativeByteOrder(given_descrs[0]->byteorder)); + + loop_descrs[0] = given_descrs[0]; + Py_INCREF(loop_descrs[0]); + loop_descrs[1] = loop_descrs[0]; + Py_INCREF(loop_descrs[1]); + + return NPY_NO_CASTING; +} + + +NPY_NO_EXPORT int +sfloat_stable_argsort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + assert(parameters->flags == NPY_SORT_STABLE); + assert(strides[0] == sizeof(npy_float64)); + assert(strides[1] == sizeof(npy_intp)); + + npy_intp N = dimensions[0]; + char *in = data[0]; + npy_intp *out = (npy_intp *)data[1]; + + return atimsort_double(in, out, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_default_argsort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + assert(parameters->flags == NPY_SORT_DEFAULT); + assert(strides[0] == sizeof(npy_float64)); + assert(strides[1] == sizeof(npy_intp)); + + npy_intp N = dimensions[0]; + char *in = data[0]; + npy_intp *out = (npy_intp *)data[1]; + + return aquicksort_double(in, out, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_argsort_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + + if (PyDataType_FLAGCHK(context->descriptors[0], NPY_NEEDS_PYAPI)) { + *flags |= NPY_METH_REQUIRES_PYAPI; + } + + if (parameters->flags == NPY_SORT_STABLE) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_stable_argsort_loop; + } + else if (parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_default_argsort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + + +NPY_NO_EXPORT NPY_CASTING +sfloat_argsort_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *dtypes[2], + PyArray_Descr *given_descrs[2], + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) +{ + assert(given_descrs[1] == NULL || given_descrs[1]->type_num == NPY_INTP); + assert(PyArray_IsNativeByteOrder(given_descrs[0]->byteorder)); + + loop_descrs[0] = given_descrs[0]; + Py_INCREF(loop_descrs[0]); + loop_descrs[1] = PyArray_DescrFromType(NPY_INTP); + if (loop_descrs[1] == NULL) { + return -1; + } + return NPY_NO_CASTING; +} + + +static int +sfloat_init_sort(void) +{ + PyArray_DTypeMeta *dtypes[2] = {&PyArray_SFloatDType, &PyArray_SFloatDType}; + PyType_Slot slots[3] = { + {NPY_METH_resolve_descriptors, &sfloat_sort_resolve_descriptors}, + {NPY_METH_get_loop, &sfloat_sort_get_loop}, + {0, NULL} + }; + PyArrayMethod_Spec spec = { + .nin = 1, + .nout = 1, + .dtypes = dtypes, + .slots = slots, + }; + spec.name = "sfloat_sort"; + spec.casting = NPY_NO_CASTING; + spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + + PyBoundArrayMethodObject *sort_meth = PyArrayMethod_FromSpec_int(&spec, 0); + if (sort_meth == NULL) { + return -1; + } + // TODO: once registration method is in place, use it instead of setting hidden slot + NPY_DT_SLOTS(&PyArray_SFloatDType)->sort_meth = sort_meth->method; + Py_INCREF(sort_meth->method); + Py_DECREF(sort_meth); + + spec.name = "sfloat_argsort"; + dtypes[1] = &PyArray_IntpDType; + + slots[0].slot = NPY_METH_resolve_descriptors; + slots[0].pfunc = &sfloat_argsort_resolve_descriptors; + slots[1].slot = NPY_METH_get_loop; + slots[1].pfunc = &sfloat_argsort_get_loop; + + // TODO: once registration method is in place, use it instead of setting hidden slot + PyBoundArrayMethodObject *argsort_meth = PyArrayMethod_FromSpec_int(&spec, 0); + if (argsort_meth == NULL) { + return -1; + } + NPY_DT_SLOTS(&PyArray_SFloatDType)->argsort_meth = argsort_meth->method; + Py_INCREF(argsort_meth->method); + Py_DECREF(argsort_meth); + + return 0; +} + /* * Python entry point, exported via `umathmodule.h` and `multiarraymodule.c`. * TODO: Should be moved when the necessary API is not internal anymore. @@ -898,6 +1128,10 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } + if (sfloat_init_sort() < 0) { + return NULL; + } + npy_thread_unsafe_state.get_sfloat_dtype_initialized = NPY_TRUE; return (PyObject *)&PyArray_SFloatDType; } diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index 4d2082a949b7..f03e35dca9f2 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -231,6 +231,78 @@ def test_wrapped_and_wrapped_reductions(self): expected = np.hypot.reduce(float_equiv, keepdims=True) assert res.view(np.float64) * 2 == expected + def test_sort(self): + a = self._get_array(1.) + a = a[::-1] # reverse it + + a.sort() + assert_array_equal(a.view(np.float64), [1., 2., 3.]) + + a = self._get_array(1.) + a = a[::-1] # reverse it + + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [1., 2., 3.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [3., 2., 1.]) + + a = self._get_array(0.5) # different factor + a = a[::2][::-1] # non-contiguous + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [2., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 2.]) + + a = self._get_array(0.5, aligned=False) + a = a[::-1] # reverse it + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_a = np.sort(a, stable=True) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_a = np.sort(a, stable=False) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + def test_argsort(self): + a = self._get_array(1.) + a = a[::-1] # reverse it + + indices = np.argsort(a) + assert_array_equal(indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [3., 2., 1.]) + + a = self._get_array(0.5) + a = a[::2][::-1] # reverse it + indices = np.argsort(a) + assert_array_equal(indices, [1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 2.]) + + a = self._get_array(0.5, aligned=False) + a = a[::-1] # reverse it + indices = np.argsort(a) + assert_array_equal(indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_indices = np.argsort(a, stable=True) + assert_array_equal(sorted_indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_indices = np.argsort(a, stable=False) + assert_array_equal(sorted_indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + def test_astype_class(self): # Very simple test that we accept `.astype()` also on the class. # ScaledFloat always returns the default descriptor, but it does From 30f0891b64e1bfae47e7414ecc96f57d99208586 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Wed, 8 Oct 2025 04:08:51 +1100 Subject: [PATCH 0533/1018] CI: macos-13 --> macos-15-intel (#29886) --- .github/workflows/macos.yml | 9 ++------- .github/workflows/wheels.yml | 8 ++------ 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index ef9e86f6ba70..868dac6dcbbc 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -22,7 +22,7 @@ jobs: name: macOS x86-64 conda # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' - runs-on: macos-13 + runs-on: macos-15-intel strategy: fail-fast: false matrix: @@ -113,7 +113,7 @@ jobs: fail-fast: false matrix: build_runner: - - [ macos-13, "macos_x86_64" ] + - [ macos-15-intel, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] version: ["3.11", "3.14t-dev"] @@ -128,11 +128,6 @@ jobs: with: python-version: ${{ matrix.version }} - - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 - if: ${{ matrix.build_runner[0] == 'macos-13' }} - with: - xcode-version: '14.3' - - name: Install dependencies run: | pip install -r requirements/build_requirements.txt diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 8e8c8768cd9c..5294c34cbb63 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -34,7 +34,7 @@ jobs: - [ubuntu-22.04, musllinux_x86_64, ""] - [ubuntu-22.04-arm, manylinux_aarch64, ""] - [ubuntu-22.04-arm, musllinux_aarch64, ""] - - [macos-13, macosx_x86_64, openblas] + - [macos-15-intel, macosx_x86_64, openblas] - [macos-14, macosx_arm64, openblas] - [windows-2022, win_amd64, ""] - [windows-11-arm, win_arm64, ""] @@ -72,7 +72,7 @@ jobs: echo "CIBW_ENVIRONMENT_WINDOWS=PKG_CONFIG_PATH=$CIBW" >> $env:GITHUB_ENV - name: Setup macOS - if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' + if: matrix.buildplat[0] == 'macos-15-intel' || matrix.buildplat[0] == 'macos-14' run: | # Needed due to https://github.com/actions/runner-images/issues/3371 # Supported versions: https://github.com/actions/runner-images/blob/main/images/macos/macos-14-arm64-Readme.md @@ -84,10 +84,6 @@ jobs: # only target Sonoma onwards CIBW="MACOSX_DEPLOYMENT_TARGET=14.0 INSTALL_OPENBLAS=false RUNNER_OS=macOS" echo "CIBW_ENVIRONMENT_MACOS=$CIBW" >> "$GITHUB_ENV" - - # the macos-13 image that's used for building the x86_64 wheel can't test - # a wheel with deployment target >= 14 without further work - echo "CIBW_TEST_SKIP=*-macosx_x86_64" >> "$GITHUB_ENV" else # macosx_x86_64 with OpenBLAS # if INSTALL_OPENBLAS isn't specified then scipy-openblas is automatically installed From 128d684bdc8d756c63341baab0fd7e4df3fbc09c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Oct 2025 12:51:49 -0600 Subject: [PATCH 0534/1018] MAINT: Bump github/codeql-action from 3.30.6 to 4.30.7 (#29891) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.6 to 4.30.7. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/64d10c13136e1c5bce3e5fbde8d4906eeaafc885...e296a935590eb16afc0c0108289f68c87e2a89a5) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.30.7 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 0b055370119d..d6719c246c0d 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 + uses: github/codeql-action/init@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 + uses: github/codeql-action/autobuild@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 + uses: github/codeql-action/analyze@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 3ea1074c5a9d..6fb0391abde5 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v2.1.27 + uses: github/codeql-action/upload-sarif@e296a935590eb16afc0c0108289f68c87e2a89a5 # v2.1.27 with: sarif_file: results.sarif From dbeb2514431d660ffaa97412958446577e7e0bd0 Mon Sep 17 00:00:00 2001 From: Inessa Pawson Date: Tue, 7 Oct 2025 21:35:46 -0400 Subject: [PATCH 0535/1018] Add Plausible analytics to the NumPy documentation --- doc/source/conf.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/conf.py b/doc/source/conf.py index eba0bd014fb0..0204818aa094 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -290,6 +290,10 @@ def setup(app): "json_url": "https://numpy.org/doc/_static/versions.json", }, "show_version_warning_banner": True, + "analytics": [ + "plausible_analytics_domain": "numpy.org/doc/stable/", + "plausible_analytics_url": "https://views.scientific-python.org/js/script.file-downloads.hash.outbound-links.js", + ] } html_title = f"{project} v{version} Manual" From 2a9315a84ffc5a79f9a56f749386321acb78d864 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Oct 2025 13:26:35 -0600 Subject: [PATCH 0536/1018] MAINT: Bump astral-sh/setup-uv from 6.8.0 to 7.0.0 (#29895) Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 6.8.0 to 7.0.0. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/d0cc045d04ccac9d8b7881df0226f9e82c39688e...eb1897b8dc4b5d5bfe39a428a8f2304605e0983c) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mypy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index c6b7c89b8f5c..8045d6bb3969 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -55,7 +55,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 + - uses: astral-sh/setup-uv@eb1897b8dc4b5d5bfe39a428a8f2304605e0983c # v7.0.0 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true From 4ddd342682e0ae8c28abf8f5509ed4baf93c424e Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 9 Oct 2025 01:50:34 +0200 Subject: [PATCH 0537/1018] BUG: Fixup float16 conversion error path and add tests (#29897) I had modified the code to not purely rely on PyErr_Occurred() for error propagation, but missed this conversion call. This also applied to float conversions actually. Signed-off-by: Sebastian Berg --- numpy/_core/src/multiarray/arraytypes.c.src | 6 ++++++ numpy/_core/tests/test_array_coercion.py | 21 +++++++++++++++++++++ numpy/_core/tests/test_half.py | 15 +++++++++++++++ 3 files changed, 42 insertions(+) diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index da00596e7e24..d67bdd046c6d 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -124,6 +124,9 @@ static float MyPyFloat_AsFloat(PyObject *obj) { double d_val = MyPyFloat_AsDouble(obj); + if (error_converting(d_val)) { + return -1; + } float res = (float)d_val; if (NPY_UNLIKELY(npy_isinf(res) && !npy_isinf(d_val))) { if (PyUFunc_GiveFloatingpointErrors("cast", NPY_FPE_OVERFLOW) < 0) { @@ -138,6 +141,9 @@ static npy_half MyPyFloat_AsHalf(PyObject *obj) { double d_val = MyPyFloat_AsDouble(obj); + if (error_converting(d_val)) { + return -1; + } npy_half res = npy_double_to_half(d_val); if (NPY_UNLIKELY(npy_half_isinf(res) && !npy_isinf(d_val))) { if (PyUFunc_GiveFloatingpointErrors("cast", NPY_FPE_OVERFLOW) < 0) { diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 658c672d5f99..9b2a7a5bab85 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -904,3 +904,24 @@ def test_empty_string(): assert_array_equal(res, b"") assert res.shape == (2, 10) assert res.dtype == "S1" + + +@pytest.mark.parametrize("dtype", ["S", "U", object]) +@pytest.mark.parametrize("res_dt,hug_val", + [("float16", "1e30"), ("float32", "1e200")]) +def test_string_to_float_coercion_errors(dtype, res_dt, hug_val): + # This test primarly tests setitem + val = np.array(["3M"], dtype=dtype)[0] # use the scalar + + with pytest.raises(ValueError): + np.array(val, dtype=res_dt) + + val = np.array([hug_val], dtype=dtype)[0] # use the scalar + + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning): + np.array(val, dtype=res_dt) + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + np.array(val, dtype=res_dt) diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index ef13c5fbcde6..3ced5b466a44 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -93,6 +93,21 @@ def test_half_conversion_to_string(self, string_dt): arr = np.ones(3, dtype=np.float16).astype(string_dt) assert arr.dtype == expected_dt + @pytest.mark.parametrize("dtype", ["S", "U", object]) + def test_to_half_cast_error(self, dtype): + arr = np.array(["3M"], dtype=dtype) + with pytest.raises(ValueError): + arr.astype(np.float16) + + arr = np.array(["23490349034"], dtype=dtype) + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning): + arr.astype(np.float16) + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + arr.astype(np.float16) + @pytest.mark.parametrize("string_dt", ["S", "U"]) def test_half_conversion_from_string(self, string_dt): string = np.array("3.1416", dtype=string_dt) From 1ac04e0f7f64f672f8c2d2a04a82a2baaefa09eb Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 9 Oct 2025 03:02:14 +0200 Subject: [PATCH 0538/1018] TYP: add missing `__slots__` (#29901) --- numpy/__init__.pyi | 2 ++ numpy/exceptions.pyi | 2 ++ numpy/lib/_index_tricks_impl.pyi | 12 ++++++++++++ numpy/lib/mixins.pyi | 2 ++ numpy/ma/extras.pyi | 4 ++++ 5 files changed, 22 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d29bb196a070..aed762195a76 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5736,6 +5736,8 @@ pow = power true_divide = divide class errstate: + __slots__ = "_all", "_call", "_divide", "_invalid", "_over", "_token", "_under" + def __init__( self, *, diff --git a/numpy/exceptions.pyi b/numpy/exceptions.pyi index 9ed50927d070..9bcc097dfc0f 100644 --- a/numpy/exceptions.pyi +++ b/numpy/exceptions.pyi @@ -17,6 +17,8 @@ class TooHardError(RuntimeError): ... class DTypePromotionError(TypeError): ... class AxisError(ValueError, IndexError): + __slots__ = "_msg", "axis", "ndim" + axis: int | None ndim: int | None @overload diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index c9ee8a5b0bb7..b624cfff4481 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -106,6 +106,8 @@ class ndindex: def ndincr(self, /) -> None: ... class nd_grid(Generic[_BoolT_co]): + __slots__ = ("sparse",) + sparse: _BoolT_co def __init__(self, sparse: _BoolT_co = ...) -> None: ... @overload @@ -115,10 +117,14 @@ class nd_grid(Generic[_BoolT_co]): @final class MGridClass(nd_grid[L[False]]): + __slots__ = () + def __init__(self) -> None: ... @final class OGridClass(nd_grid[L[True]]): + __slots__ = () + def __init__(self) -> None: ... class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co]): @@ -155,13 +161,19 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] @final class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): + __slots__ = () + def __init__(self, /) -> None: ... @final class CClass(AxisConcatenator[L[-1], L[False], L[2], L[0]]): + __slots__ = () + def __init__(self, /) -> None: ... class IndexExpression(Generic[_BoolT_co]): + __slots__ = ("maketuple",) + maketuple: _BoolT_co def __init__(self, maketuple: _BoolT_co) -> None: ... @overload diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi index f23c58fa6586..1572f6bee289 100644 --- a/numpy/lib/mixins.pyi +++ b/numpy/lib/mixins.pyi @@ -13,6 +13,8 @@ __all__ = ["NDArrayOperatorsMixin"] # As such, only little type safety can be provided here. class NDArrayOperatorsMixin(ABC): + __slots__ = () + @abstractmethod def __array_ufunc__( self, diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 5a7b2b399ea8..034b309af080 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -110,12 +110,16 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... def corrcoef(x, y=None, rowvar=True, bias=..., allow_masked=True, ddof=...): ... class MAxisConcatenator(AxisConcatenator): + __slots__ = () + @staticmethod def concatenate(arrays: Incomplete, axis: int = 0) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] @classmethod def makemat(cls, arr: Incomplete) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] class mr_class(MAxisConcatenator): + __slots__ = () + def __init__(self) -> None: ... mr_: mr_class From 8e30aa91b17d1cb466a2341aeba1b039d1cf6592 Mon Sep 17 00:00:00 2001 From: Swayam Date: Thu, 9 Oct 2025 15:00:37 +0530 Subject: [PATCH 0539/1018] BUG: Ensure backwards compatibility for patching finfo (#29899) Ensure attributes are settable. JAX uses this to subclass and then patch it's own finfo objects --------- Co-authored-by: Sebastian Berg --- numpy/_core/getlimits.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index 1317c8b1ea0a..b8b6f69cd5b7 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -258,11 +258,11 @@ def iexp(self): return math.ceil(math.log2(self.maxexp - self.minexp)) def __str__(self): - if self._fmt is not None: - return self._fmt + if (fmt := getattr(self, "_fmt", None)) is not None: + return fmt def get_str(name, pad=None): - if (val := getattr(self, name)) is None: + if (val := getattr(self, name, None)) is None: return "" if pad is not None: s = str(val).ljust(pad) @@ -303,8 +303,8 @@ def get_str(name, pad=None): return fmt def __repr__(self): - if self._repr is not None: - return self._repr + if (repr_str := getattr(self, "_repr", None)) is not None: + return repr_str c = self.__class__.__name__ @@ -324,7 +324,7 @@ def __repr__(self): self._repr = repr_str return repr_str - @property + @cached_property def tiny(self): """Return the value for tiny, alias of smallest_normal. From 50ddf5be5ecade9e59dfdfbaed6a04a417cb4759 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 9 Oct 2025 13:26:24 +0300 Subject: [PATCH 0540/1018] BLD: do not use matplotlib 3.10.6 [skip azp][skip actions][skip cirrus] (#29906) --- requirements/doc_requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 9d006b961923..b8f5cb2bd8fd 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -5,8 +5,7 @@ pydata-sphinx-theme>=0.15.2 sphinx-copybutton sphinx-design scipy -matplotlib -pyparsing<3.3 +matplotlib!=3.10.6 pandas breathe>4.33.0 ipython!=8.1.0 From 5fe21fbae8125c032b4e23606be5b3a11d25dd4a Mon Sep 17 00:00:00 2001 From: "hpkfft.com" Date: Thu, 9 Oct 2025 04:26:35 -0700 Subject: [PATCH 0541/1018] ENH: Set DLPack tensor `shape` and `strides` to NULL iff `ndim == 0`. (#29872) This assumes a slight shift in the dlpack interpretation: https://github.com/dmlc/dlpack/issues/177 --- DLPack allows, but does not require, DLTensor.strides to be NULL if PyArray_IS_C_CONTIGUOUS. Previously, in this case, NumPy set the strides pointer to NULL. Now, NumPy will supply a valid pointer to correctly initialized strides when ndim > 0. This change can improve performance for downstream users. For example, nanobind can be used to import a NumPy array using the DLPack protocol to make the array available to C++ code. If DLTensor.strides is NULL, nanobind allocates and initializes a strides array. This redundant work is now avoided since NumPy provides a pointer to the strides, which it allocated and initialized anyway. If ndim == 0, this PR sets both shape and strides to NULL. Previously, strides was was set to NULL (since PyArray_IS_C_CONTIGUOUS), but shape pointed past the end of allocated memory. For security and debugability, NULL is preferred. --- numpy/_core/src/multiarray/dlpack.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index ac37a04c30c6..62cd137daa7c 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -285,10 +285,6 @@ fill_dl_tensor_information( } dl_tensor->ndim = ndim; - if (PyArray_IS_C_CONTIGUOUS(self)) { - /* No need to pass strides, so just NULL it again */ - dl_tensor->strides = NULL; - } dl_tensor->byte_offset = 0; return 0; @@ -351,9 +347,8 @@ create_dlpack_capsule( dl_tensor = &managed->dl_tensor; } - dl_tensor->shape = (int64_t *)((char *)ptr + offset); - /* Note that strides may be set to NULL later if C-contiguous */ - dl_tensor->strides = dl_tensor->shape + ndim; + dl_tensor->shape = (ndim > 0) ? (int64_t *)((char *)ptr + offset) : NULL; + dl_tensor->strides = (ndim > 0) ? dl_tensor->shape + ndim : NULL; if (fill_dl_tensor_information(dl_tensor, self, result_device) < 0) { PyMem_Free(ptr); From 7b80c99ef668f49ddc0bbb94029454e81c83371d Mon Sep 17 00:00:00 2001 From: William Pursell Date: Thu, 9 Oct 2025 05:32:44 -0600 Subject: [PATCH 0542/1018] BLD: Add missing (#29704) This is required for the vendored-meson build on MacOS with some versions of clang. --- numpy/_core/src/multiarray/unique.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index d31fed6f6908..979fb8fae0a4 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "numpy/arrayobject.h" From 1361720c6d6e7429a4bfa1d0712fd0306ca449d3 Mon Sep 17 00:00:00 2001 From: Anik Chand Date: Thu, 9 Oct 2025 20:57:50 +0530 Subject: [PATCH 0543/1018] DEP: add release note for linalg/fft deprecation finalization --- doc/release/upcoming_changes/29000.expiring.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 doc/release/upcoming_changes/29000.expiring.rst diff --git a/doc/release/upcoming_changes/29000.expiring.rst b/doc/release/upcoming_changes/29000.expiring.rst new file mode 100644 index 000000000000..2353f38a7083 --- /dev/null +++ b/doc/release/upcoming_changes/29000.expiring.rst @@ -0,0 +1,10 @@ +Deprecated features removed in 2.2.0 +==================================== + +The following deprecated functions and classes have been removed: + +* :mod:`numpy.linalg.linalg` + Previously deprecated, use :mod:`numpy.linalg` instead. + +* :mod:`numpy.fft.helpers` + Previously deprecated, use :mod:`numpy.fft` instead. \ No newline at end of file From 53b0d99f9c01bc9837bda4b4fa6c2d8752c09c76 Mon Sep 17 00:00:00 2001 From: Sayed Awad Date: Thu, 9 Oct 2025 18:40:11 +0300 Subject: [PATCH 0544/1018] BUG: Fix float16-sort failures on 32-bit x86 MSVC (#29908) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The failures are triggered when the Intel x86 sort AVX‑512 kernels for 16‑bit are enabled at build time and the CPU/OS also supports them. A quick look at the `zmm_vector::ge(reg_t, reg_t)` seems to not correctly generate the instructions for it. This patch does not actually fix the underlying bug; instead, it disables these kernels on 32‑bit MSVC builds as a stop‑gap, since the issue requires further investigation and an upstream fix. Note: Newer NumPy releases may drop the entire AVX‑512 support on 32‑bit for all compilers and will enable at most AVX2 as part of gh-28896 --- numpy/_core/meson.build | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 0d8c513d4fde..35a792c29909 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -867,7 +867,9 @@ foreach gen_mtargets : [ [ 'x86_simd_qsort_16bit.dispatch.h', 'src/npysort/x86_simd_qsort_16bit.dispatch.cpp', - use_intel_sort ? [AVX512_SPR, AVX512_ICL] : [] + # Do not enable AVX-512 on MSVC 32-bit (x86): it’s buggy there; + # Ref: NumPy issue numpy/numpy#29808 + use_intel_sort and not (cc.get_id() == 'msvc' and cpu_family == 'x86') ? [AVX512_SPR, AVX512_ICL] : [] ], [ 'highway_qsort.dispatch.h', From e89ec589000e471f04c71b676c866baec05ecd7d Mon Sep 17 00:00:00 2001 From: Sandeep Gupta Date: Thu, 9 Oct 2025 22:42:35 +0530 Subject: [PATCH 0545/1018] BUG: Fix INT_MIN % -1 to return 0 for all signed integer types (#29893) * BUG: Fix INT_MIN % -1 to return 0 for all signed integer types - Explicitly check for INT_MIN % -1 in scalar tail loops for fmod and remainder kernels. - Set result to 0 to avoid undefined behavior and match NumPy/Python expectations. - Ensures correct, portable behavior on all platforms (e.g., PPC64LE). * Apply suggestion from @seberg --------- Co-authored-by: Sebastian Berg --- numpy/_core/src/umath/loops_modulo.dispatch.c.src | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/umath/loops_modulo.dispatch.c.src b/numpy/_core/src/umath/loops_modulo.dispatch.c.src index 032cc3344060..4645fe14a487 100644 --- a/numpy/_core/src/umath/loops_modulo.dispatch.c.src +++ b/numpy/_core/src/umath/loops_modulo.dispatch.c.src @@ -490,12 +490,16 @@ vsx4_simd_@func@_by_scalar_contig_@sfx@(char **args, npy_intp len) #else /* fmod and remainder */ for (; len > 0; --len, ++src1, ++dst1) { const npyv_lanetype_@sfx@ a = *src1; - *dst1 = a % scalar; + if (NPY_UNLIKELY(a == NPY_MIN_INT@len@ && scalar == -1)) { + *dst1 = 0; + } else { + *dst1 = a % scalar; #if @id@ == 1 /* remainder */ - if (!((a > 0) == (scalar > 0) || *dst1 == 0)) { - *dst1 += scalar; - } + if (!((a > 0) == (scalar > 0) || *dst1 == 0)) { + *dst1 += scalar; + } #endif + } } #endif npyv_cleanup(); From e7d96553eb02a9093a08f5f0913e0ab72042ef0f Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Thu, 9 Oct 2025 23:10:22 +0530 Subject: [PATCH 0546/1018] DEP: remove linalg.py (finalize deprecation) --- numpy/linalg/linalg.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 81c80d0fd690..8b137891791f 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -1,17 +1 @@ -def __getattr__(attr_name): - import warnings - from numpy.linalg import _linalg - ret = getattr(_linalg, attr_name, None) - if ret is None: - raise AttributeError( - f"module 'numpy.linalg.linalg' has no attribute {attr_name}") - warnings.warn( - "The numpy.linalg.linalg has been made private and renamed to " - "numpy.linalg._linalg. All public functions exported by it are " - f"available from numpy.linalg. Please use numpy.linalg.{attr_name} " - "instead.", - DeprecationWarning, - stacklevel=3 - ) - return ret From 41ba30f4556f60975068864aef93df24bbcf1a74 Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Thu, 9 Oct 2025 23:11:49 +0530 Subject: [PATCH 0547/1018] DEP: remove fft/helpers.py (finalize deprecation) --- numpy/fft/helper.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py index 08d5662c6d17..8b137891791f 100644 --- a/numpy/fft/helper.py +++ b/numpy/fft/helper.py @@ -1,17 +1 @@ -def __getattr__(attr_name): - import warnings - from numpy.fft import _helper - ret = getattr(_helper, attr_name, None) - if ret is None: - raise AttributeError( - f"module 'numpy.fft.helper' has no attribute {attr_name}") - warnings.warn( - "The numpy.fft.helper has been made private and renamed to " - "numpy.fft._helper. All four functions exported by it (i.e. fftshift, " - "ifftshift, fftfreq, rfftfreq) are available from numpy.fft. " - f"Please use numpy.fft.{attr_name} instead.", - DeprecationWarning, - stacklevel=3 - ) - return ret From eba3d75e401bb1090b6ec389fd81aa65706a467a Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 9 Oct 2025 19:53:38 +0200 Subject: [PATCH 0548/1018] TYP: parameters with missing default value (#29905) --- numpy/__init__.pyi | 74 +++---- numpy/_core/defchararray.pyi | 30 +-- numpy/_core/multiarray.pyi | 74 +++---- numpy/_pytesttester.pyi | 14 +- numpy/_typing/_ufunc.pyi | 4 +- numpy/exceptions.pyi | 4 +- numpy/fft/_pocketfft.pyi | 8 +- numpy/lib/_datasource.pyi | 6 +- numpy/lib/_function_base_impl.pyi | 4 +- numpy/lib/_index_tricks_impl.pyi | 10 +- numpy/lib/_npyio_impl.pyi | 6 +- numpy/lib/_stride_tricks_impl.pyi | 2 +- numpy/lib/_twodim_base_impl.pyi | 4 +- numpy/linalg/_linalg.pyi | 8 +- numpy/polynomial/_polybase.pyi | 10 +- numpy/polynomial/_polytypes.pyi | 6 +- numpy/polynomial/chebyshev.pyi | 6 +- numpy/random/_generator.pyi | 313 ++++++++++++++--------------- numpy/random/_mt19937.pyi | 2 +- numpy/random/_pcg64.pyi | 4 +- numpy/random/_philox.pyi | 2 +- numpy/random/mtrand.pyi | 318 +++++++++++++++--------------- numpy/testing/_private/utils.pyi | 8 +- 23 files changed, 459 insertions(+), 458 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index aed762195a76..7d0bd028c826 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1641,11 +1641,11 @@ class flatiter(Generic[_ArrayT_co]): value: Any, ) -> None: ... @overload - def __array__(self: flatiter[ndarray[_1DShapeT, _DTypeT]], dtype: None = ..., /) -> ndarray[_1DShapeT, _DTypeT]: ... + def __array__(self: flatiter[ndarray[_1DShapeT, _DTypeT]], dtype: None = None, /) -> ndarray[_1DShapeT, _DTypeT]: ... @overload def __array__(self: flatiter[ndarray[_1DShapeT, Any]], dtype: _DTypeT, /) -> ndarray[_1DShapeT, _DTypeT]: ... @overload - def __array__(self: flatiter[ndarray[Any, _DTypeT]], dtype: None = ..., /) -> ndarray[_AnyShape, _DTypeT]: ... + def __array__(self: flatiter[ndarray[Any, _DTypeT]], dtype: None = None, /) -> ndarray[_AnyShape, _DTypeT]: ... @overload def __array__(self, dtype: _DTypeT, /) -> ndarray[_AnyShape, _DTypeT]: ... @@ -2346,9 +2346,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # 1D + 1D returns a scalar; # all other with at least 1 non-0D array return an ndarray. @overload - def dot(self, b: _ScalarLike_co, out: None = ...) -> NDArray[Any]: ... + def dot(self, b: _ScalarLike_co, out: None = None) -> NDArray[Any]: ... @overload - def dot(self, b: ArrayLike, out: None = ...) -> Any: ... + def dot(self, b: ArrayLike, out: None = None) -> Any: ... @overload def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... @@ -2391,7 +2391,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., dtype: DTypeLike | None = ..., - out: None = ..., + out: None = None, ) -> Any: ... @overload def trace( @@ -2418,7 +2418,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: NDArray[_ScalarT], indices: _IntLike_co, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, mode: _ModeKind = ..., ) -> _ScalarT: ... @overload @@ -2426,7 +2426,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, mode: _ModeKind = ..., ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload @@ -3660,7 +3660,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): self, indices: _IntLike_co, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, mode: _ModeKind = ..., ) -> Self: ... @overload @@ -3668,7 +3668,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, mode: _ModeKind = ..., ) -> NDArray[Self]: ... @overload @@ -5742,11 +5742,11 @@ class errstate: self, *, call: _ErrCall = ..., - all: _ErrKind | None = ..., - divide: _ErrKind | None = ..., - over: _ErrKind | None = ..., - under: _ErrKind | None = ..., - invalid: _ErrKind | None = ..., + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, ) -> None: ... def __enter__(self) -> None: ... def __exit__( @@ -5877,37 +5877,37 @@ class memmap(ndarray[_ShapeT_co, _DTypeT_co]): subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: type[uint8] = ..., - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: int | tuple[int, ...] | None = ..., - order: _OrderKACF = ..., + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", ) -> memmap[Any, dtype[uint8]]: ... @overload def __new__( subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: _DTypeLike[_ScalarT], - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: int | tuple[int, ...] | None = ..., - order: _OrderKACF = ..., + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", ) -> memmap[Any, dtype[_ScalarT]]: ... @overload def __new__( subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: DTypeLike, - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: int | tuple[int, ...] | None = ..., - order: _OrderKACF = ..., + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", ) -> memmap[Any, dtype]: ... def __array_finalize__(self, obj: object) -> None: ... def __array_wrap__( self, array: memmap[_ShapeT_co, _DTypeT_co], - context: tuple[ufunc, tuple[Any, ...], int] | None = ..., - return_scalar: builtins.bool = ..., + context: tuple[ufunc, tuple[Any, ...], int] | None = None, + return_scalar: builtins.bool = False, ) -> Any: ... def flush(self) -> None: ... @@ -5923,11 +5923,11 @@ class vectorize: def __init__( self, pyfunc: Callable[..., Any], - otypes: str | Iterable[DTypeLike] | None = ..., - doc: str | None = ..., - excluded: Iterable[int | str] | None = ..., - cache: builtins.bool = ..., - signature: str | None = ..., + otypes: str | Iterable[DTypeLike] | None = None, + doc: str | None = None, + excluded: Iterable[int | str] | None = None, + cache: builtins.bool = False, + signature: str | None = None, ) -> None: ... def __call__(self, *args: Any, **kwargs: Any) -> Any: ... @@ -5980,8 +5980,8 @@ class poly1d: def __init__( self, c_or_r: ArrayLike, - r: builtins.bool = ..., - variable: str | None = ..., + r: builtins.bool = False, + variable: str | None = None, ) -> None: ... def __len__(self) -> int: ... def __neg__(self) -> poly1d: ... @@ -6011,8 +6011,8 @@ class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): def __new__( subtype, # pyright: ignore[reportSelfClsParameterName] data: ArrayLike, - dtype: DTypeLike | None = ..., - copy: builtins.bool = ..., + dtype: DTypeLike | None = None, + copy: builtins.bool = True, ) -> matrix[_2D, Incomplete]: ... def __array_finalize__(self, obj: object) -> None: ... diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 9b86c2a33e35..815d1397b39c 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -106,12 +106,12 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): def __new__( subtype, shape: _ShapeLike, - itemsize: SupportsIndex | SupportsInt = ..., - unicode: L[False] = ..., - buffer: _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: _ShapeLike = ..., - order: _OrderKACF = ..., + itemsize: SupportsIndex | SupportsInt = 1, + unicode: L[False] = False, + buffer: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", ) -> _CharArray[bytes_]: ... @overload def __new__( @@ -119,22 +119,22 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt, unicode: L[True], - buffer: _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: _ShapeLike = ..., - order: _OrderKACF = ..., + buffer: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", ) -> _CharArray[str_]: ... @overload def __new__( subtype, shape: _ShapeLike, - itemsize: SupportsIndex | SupportsInt = ..., + itemsize: SupportsIndex | SupportsInt = 1, *, unicode: L[True], - buffer: _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: _ShapeLike = ..., - order: _OrderKACF = ..., + buffer: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", ) -> _CharArray[str_]: ... def __array_finalize__(self, obj: object) -> None: ... diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 8ab52b6fe74d..b90453d66d02 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -249,7 +249,7 @@ class _ConstructorEmpty(Protocol): self, /, shape: SupportsIndex, - dtype: None = ..., + dtype: None = None, order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], ) -> _Array1D[float64]: ... @@ -287,7 +287,7 @@ class _ConstructorEmpty(Protocol): self, /, shape: _AnyShapeT, - dtype: None = ..., + dtype: None = None, order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], ) -> _Array[_AnyShapeT, float64]: ... @@ -324,7 +324,7 @@ class _ConstructorEmpty(Protocol): def __call__( self, /, shape: _ShapeLike, - dtype: None = ..., + dtype: None = None, order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], ) -> NDArray[float64]: ... @@ -400,7 +400,7 @@ empty: Final[_ConstructorEmpty] @overload def empty_like( prototype: _ArrayT, - dtype: None = ..., + dtype: None = None, order: _OrderKACF = ..., subok: bool = ..., shape: _ShapeLike | None = ..., @@ -410,7 +410,7 @@ def empty_like( @overload def empty_like( prototype: _ArrayLike[_ScalarT], - dtype: None = ..., + dtype: None = None, order: _OrderKACF = ..., subok: bool = ..., shape: _ShapeLike | None = ..., @@ -527,9 +527,9 @@ def concatenate( # type: ignore[misc] arrays: _ArrayLike[_ScalarT], /, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, *, - dtype: None = ..., + dtype: None = None, casting: _CastingKind | None = ... ) -> NDArray[_ScalarT]: ... @overload @@ -537,7 +537,7 @@ def concatenate( # type: ignore[misc] arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, *, dtype: _DTypeLike[_ScalarT], casting: _CastingKind | None = ... @@ -547,7 +547,7 @@ def concatenate( # type: ignore[misc] arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, *, dtype: DTypeLike | None = None, casting: _CastingKind | None = ... @@ -608,7 +608,7 @@ def min_scalar_type(a: ArrayLike, /) -> dtype: ... def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... @overload -def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ... +def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Any: ... @overload def dot(a: ArrayLike, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... @@ -682,7 +682,7 @@ def may_share_memory( @overload def asarray( a: _ArrayLike[_ScalarT], - dtype: None = ..., + dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., @@ -713,7 +713,7 @@ def asarray( @overload def asanyarray( a: _ArrayT, # Preserve subclass-information - dtype: None = ..., + dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., @@ -723,7 +723,7 @@ def asanyarray( @overload def asanyarray( a: _ArrayLike[_ScalarT], - dtype: None = ..., + dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., @@ -754,7 +754,7 @@ def asanyarray( @overload def ascontiguousarray( a: _ArrayLike[_ScalarT], - dtype: None = ..., + dtype: None = None, *, like: _SupportsArrayFunc | None = ..., ) -> NDArray[_ScalarT]: ... @@ -776,7 +776,7 @@ def ascontiguousarray( @overload def asfortranarray( a: _ArrayLike[_ScalarT], - dtype: None = ..., + dtype: None = None, *, like: _SupportsArrayFunc | None = ..., ) -> NDArray[_ScalarT]: ... @@ -801,7 +801,7 @@ def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype: ... @overload def fromstring( string: str | bytes, - dtype: None = ..., + dtype: None = None, count: SupportsIndex = ..., *, sep: str, @@ -832,7 +832,7 @@ def frompyfunc( # type: ignore[overload-overlap] nin: L[1], nout: L[1], *, - identity: None = ..., + identity: None = None, ) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ... @overload def frompyfunc( # type: ignore[overload-overlap] @@ -848,7 +848,7 @@ def frompyfunc( # type: ignore[overload-overlap] nin: L[2], nout: L[1], *, - identity: None = ..., + identity: None = None, ) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ... @overload def frompyfunc( # type: ignore[overload-overlap] @@ -864,7 +864,7 @@ def frompyfunc( # type: ignore[overload-overlap] nin: _Nin, nout: L[1], *, - identity: None = ..., + identity: None = None, ) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ... @overload def frompyfunc( # type: ignore[overload-overlap] @@ -880,7 +880,7 @@ def frompyfunc( nin: _Nin, nout: _Nout, *, - identity: None = ..., + identity: None = None, ) -> _PyFunc_Nin1P_Nout2P[_ReturnType, None, _Nin, _Nout]: ... @overload def frompyfunc( @@ -902,7 +902,7 @@ def frompyfunc( @overload def fromfile( file: StrOrBytesPath | _SupportsFileMethods, - dtype: None = ..., + dtype: None = None, count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., @@ -950,7 +950,7 @@ def fromiter( @overload def frombuffer( buffer: _SupportsBuffer, - dtype: None = ..., + dtype: None = None, count: SupportsIndex = ..., offset: SupportsIndex = ..., *, @@ -979,7 +979,7 @@ def frombuffer( def arange( # type: ignore[misc] stop: _IntLike_co, /, *, - dtype: None = ..., + dtype: None = None, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., ) -> _Array1D[signedinteger]: ... @@ -988,7 +988,7 @@ def arange( # type: ignore[misc] start: _IntLike_co, stop: _IntLike_co, step: _IntLike_co = ..., - dtype: None = ..., + dtype: None = None, *, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., @@ -997,7 +997,7 @@ def arange( # type: ignore[misc] def arange( # type: ignore[misc] stop: _FloatLike_co, /, *, - dtype: None = ..., + dtype: None = None, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., ) -> _Array1D[floating]: ... @@ -1006,7 +1006,7 @@ def arange( # type: ignore[misc] start: _FloatLike_co, stop: _FloatLike_co, step: _FloatLike_co = ..., - dtype: None = ..., + dtype: None = None, *, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., @@ -1015,7 +1015,7 @@ def arange( # type: ignore[misc] def arange( stop: _TD64Like_co, /, *, - dtype: None = ..., + dtype: None = None, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., ) -> _Array1D[timedelta64]: ... @@ -1024,7 +1024,7 @@ def arange( start: _TD64Like_co, stop: _TD64Like_co, step: _TD64Like_co = ..., - dtype: None = ..., + dtype: None = None, *, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., @@ -1034,7 +1034,7 @@ def arange( # both start and stop must always be specified for datetime64 start: datetime64, stop: datetime64, step: datetime64 = ..., - dtype: None = ..., + dtype: None = None, *, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., @@ -1090,7 +1090,7 @@ def busday_count( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> int_: ... @overload def busday_count( # type: ignore[misc] @@ -1099,7 +1099,7 @@ def busday_count( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> NDArray[int_]: ... @overload def busday_count( @@ -1130,7 +1130,7 @@ def busday_offset( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> datetime64: ... @overload def busday_offset( # type: ignore[misc] @@ -1140,7 +1140,7 @@ def busday_offset( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> NDArray[datetime64]: ... @overload def busday_offset( # type: ignore[misc] @@ -1171,7 +1171,7 @@ def busday_offset( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> datetime64: ... @overload def busday_offset( # type: ignore[misc] @@ -1181,7 +1181,7 @@ def busday_offset( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> NDArray[datetime64]: ... @overload def busday_offset( @@ -1211,7 +1211,7 @@ def is_busday( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> np.bool: ... @overload def is_busday( # type: ignore[misc] @@ -1219,7 +1219,7 @@ def is_busday( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> NDArray[np.bool]: ... @overload def is_busday( diff --git a/numpy/_pytesttester.pyi b/numpy/_pytesttester.pyi index a12abb1c1a10..bd71239314b4 100644 --- a/numpy/_pytesttester.pyi +++ b/numpy/_pytesttester.pyi @@ -8,11 +8,11 @@ class PytestTester: def __init__(self, module_name: str) -> None: ... def __call__( self, - label: L["fast", "full"] = ..., - verbose: int = ..., - extra_argv: Iterable[str] | None = ..., - doctests: L[False] = ..., - coverage: bool = ..., - durations: int = ..., - tests: Iterable[str] | None = ..., + label: L["fast", "full"] = "fast", + verbose: int = 1, + extra_argv: Iterable[str] | None = None, + doctests: L[False] = False, + coverage: bool = False, + durations: int = -1, + tests: Iterable[str] | None = None, ) -> bool: ... diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 3f58801004d1..bcb423e58110 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -346,7 +346,7 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i self, x1: _ScalarLike_co, out1: EllipsisType | None = ..., - out2: None = ..., + out2: None = None, /, *, out: EllipsisType | None = ..., @@ -421,7 +421,7 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: _ScalarLike_co, x2: _ScalarLike_co, out1: EllipsisType | None = ..., - out2: None = ..., + out2: None = None, /, *, out: EllipsisType | None = ..., diff --git a/numpy/exceptions.pyi b/numpy/exceptions.pyi index 9bcc097dfc0f..4cc4eff5d321 100644 --- a/numpy/exceptions.pyi +++ b/numpy/exceptions.pyi @@ -22,6 +22,6 @@ class AxisError(ValueError, IndexError): axis: int | None ndim: int | None @overload - def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ... + def __init__(self, axis: str, ndim: None = None, msg_prefix: None = None) -> None: ... @overload - def __init__(self, axis: int, ndim: int, msg_prefix: str | None = ...) -> None: ... + def __init__(self, axis: int, ndim: int, msg_prefix: str | None = None) -> None: ... diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 81064bb174fe..3234c64ed169 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -107,7 +107,7 @@ def irfftn( def fft2( a: ArrayLike, s: Sequence[int] | None = None, - axes: Sequence[int] | None = ..., # = (-2, -1) + axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... @@ -115,7 +115,7 @@ def fft2( def ifft2( a: ArrayLike, s: Sequence[int] | None = None, - axes: Sequence[int] | None = ..., # = (-2, -1) + axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... @@ -123,7 +123,7 @@ def ifft2( def rfft2( a: ArrayLike, s: Sequence[int] | None = None, - axes: Sequence[int] | None = ..., # = (-2, -1) + axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... @@ -131,7 +131,7 @@ def rfft2( def irfft2( a: ArrayLike, s: Sequence[int] | None = None, - axes: Sequence[int] | None = ..., # = (-2, -1) + axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi index ad52b7f67af0..dba0434a5fab 100644 --- a/numpy/lib/_datasource.pyi +++ b/numpy/lib/_datasource.pyi @@ -8,7 +8,7 @@ _Mode: TypeAlias = OpenBinaryMode | OpenTextMode # exported in numpy.lib.nppyio class DataSource: - def __init__(self, /, destpath: Path | str | None = ...) -> None: ... + def __init__(self, /, destpath: Path | str | None = ".") -> None: ... def __del__(self, /) -> None: ... def abspath(self, /, path: str) -> str: ... def exists(self, /, path: str) -> bool: ... @@ -18,13 +18,13 @@ class DataSource: def open(self, /, path: str, mode: _Mode = "r", encoding: str | None = None, newline: str | None = None) -> IO[Any]: ... class Repository(DataSource): - def __init__(self, /, baseurl: str, destpath: str | None = ...) -> None: ... + def __init__(self, /, baseurl: str, destpath: str | None = ".") -> None: ... def listdir(self, /) -> list[str]: ... def open( path: str, mode: _Mode = "r", - destpath: str | None = ..., + destpath: str | None = ".", encoding: str | None = None, newline: str | None = None, ) -> IO[Any]: ... diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 8e58cf342ae0..9c2e2b1e28b0 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -123,13 +123,13 @@ class _TrimZerosSequence(Protocol[_T_co]): def rot90( m: _ArrayLike[_ScalarT], k: int = 1, - axes: tuple[int, int] = ..., + axes: tuple[int, int] = (0, 1), ) -> NDArray[_ScalarT]: ... @overload def rot90( m: ArrayLike, k: int = 1, - axes: tuple[int, int] = ..., + axes: tuple[int, int] = (0, 1), ) -> NDArray[Any]: ... @overload diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index b624cfff4481..3c3074b1c1a4 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -109,7 +109,7 @@ class nd_grid(Generic[_BoolT_co]): __slots__ = ("sparse",) sparse: _BoolT_co - def __init__(self, sparse: _BoolT_co = ...) -> None: ... + def __init__(self, sparse: _BoolT_co = ...) -> None: ... # stubdefaulter: ignore[missing-default] @overload def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Incomplete]: ... @overload @@ -141,10 +141,10 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] def __init__( self, /, - axis: _AxisT_co = ..., - matrix: _MatrixT_co = ..., - ndmin: _NDMinT_co = ..., - trans1d: _Trans1DT_co = ..., + axis: _AxisT_co = 0, + matrix: _MatrixT_co = False, + ndmin: _NDMinT_co = 1, + trans1d: _Trans1DT_co = -1, ) -> None: ... # TODO(jorenham): annotate this diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index cdef1003cff5..8a4dfa27ed9b 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -215,7 +215,7 @@ def genfromtxt( usecols: Sequence[int] | None = None, names: L[True] | str | Collection[str] | None = None, excludelist: Sequence[str] | None = None, - deletechars: str = ..., + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", replace_space: str = "_", autostrip: bool = False, case_sensitive: bool | L["upper", "lower"] = True, @@ -244,7 +244,7 @@ def genfromtxt( usecols: Sequence[int] | None = None, names: L[True] | str | Collection[str] | None = None, excludelist: Sequence[str] | None = None, - deletechars: str = ..., + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", replace_space: str = "_", autostrip: bool = False, case_sensitive: bool | L["upper", "lower"] = True, @@ -273,7 +273,7 @@ def genfromtxt( usecols: Sequence[int] | None = None, names: L[True] | str | Collection[str] | None = None, excludelist: Sequence[str] | None = None, - deletechars: str = ..., + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", replace_space: str = "_", autostrip: bool = False, case_sensitive: bool | L["upper", "lower"] = True, diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index 51d377e06a18..94651dff36ed 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -14,7 +14,7 @@ class DummyArray: def __init__( self, interface: dict[str, Any], - base: NDArray[Any] | None = ..., + base: NDArray[Any] | None = None, ) -> None: ... @overload diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index fba688b26206..7396fa1b0370 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -86,7 +86,7 @@ def eye( N: int, M: int | None = None, k: int = 0, - dtype: None = ..., # = float + dtype: None = ..., # = float # stubdefaulter: ignore[missing-default] order: _OrderCF = "C", *, device: L["cpu"] | None = None, @@ -141,7 +141,7 @@ def tri( N: int, M: int | None = None, k: int = 0, - dtype: None = ..., # = float + dtype: None = ..., # = float # stubdefaulter: ignore[missing-default] *, like: _SupportsArrayFunc | None = None ) -> NDArray[float64]: ... diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 876384d6cacc..4b8ac3da0ee2 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -460,7 +460,7 @@ def cross( x2: _ArrayLikeUInt_co, /, *, - axis: int = ..., + axis: int = -1, ) -> NDArray[unsignedinteger]: ... @overload def cross( @@ -468,7 +468,7 @@ def cross( x2: _ArrayLikeInt_co, /, *, - axis: int = ..., + axis: int = -1, ) -> NDArray[signedinteger]: ... @overload def cross( @@ -476,7 +476,7 @@ def cross( x2: _ArrayLikeFloat_co, /, *, - axis: int = ..., + axis: int = -1, ) -> NDArray[floating]: ... @overload def cross( @@ -484,7 +484,7 @@ def cross( x2: _ArrayLikeComplex_co, /, *, - axis: int = ..., + axis: int = -1, ) -> NDArray[complexfloating]: ... @overload diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index e82e441e4c64..d922a08ffa9d 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -72,9 +72,9 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): self, /, coef: _SeriesLikeCoef_co, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> None: ... @overload @@ -183,7 +183,7 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): self, /, m: SupportsIndex = 1, - k: _CoefLike_co | _SeriesLikeCoef_co = ..., # = [] + k: _CoefLike_co | _SeriesLikeCoef_co = [], lbnd: _CoefLike_co | None = None, ) -> Self: ... @@ -246,7 +246,7 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): def fromroots( cls, roots: _ArrayLikeCoef_co, - domain: _SeriesLikeCoef_co | None = ..., # = [] + domain: _SeriesLikeCoef_co | None = [], window: _SeriesLikeCoef_co | None = None, symbol: str = "x", ) -> Self: ... diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 241a65be2fa2..7b003c5742c6 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -729,7 +729,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): y: _ArrayLikeFloat_co, deg: int | _SeriesLikeInt_co, rcond: float | None = ..., - full: Literal[False] = ..., + full: Literal[False] = False, w: _SeriesLikeFloat_co | None = ..., ) -> _FloatArray: ... @overload @@ -764,7 +764,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): y: _ArrayLikeComplex_co, deg: int | _SeriesLikeInt_co, rcond: float | None = ..., - full: Literal[False] = ..., + full: Literal[False] = False, w: _SeriesLikeFloat_co | None = ..., ) -> _ComplexArray: ... @overload @@ -799,7 +799,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): y: _ArrayLikeCoef_co, deg: int | _SeriesLikeInt_co, rcond: float | None = ..., - full: Literal[False] = ..., + full: Literal[False] = False, w: _SeriesLikeFloat_co | None = ..., ) -> _ObjectArray: ... @overload diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 30db7dfb9e6b..71f8021bccc3 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -128,13 +128,13 @@ _RT = TypeVar("_RT", bound=np.number | np.bool | np.object_) def chebinterpolate( func: np.ufunc, deg: _IntLike_co, - args: tuple[()] = ..., + args: tuple[()] = (), ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... @overload def chebinterpolate( func: Callable[[npt.NDArray[np.float64]], _RT], deg: _IntLike_co, - args: tuple[()] = ..., + args: tuple[()] = (), ) -> npt.NDArray[_RT]: ... @overload def chebinterpolate( @@ -151,7 +151,7 @@ class Chebyshev(ABCPolyBase[L["T"]]): func: Callable[[npt.NDArray[np.float64]], _CoefSeries], deg: _IntLike_co, domain: _SeriesLikeCoef_co | None = None, - args: tuple[()] = ..., + args: tuple[()] = (), ) -> Self: ... @overload @classmethod diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index b090663a104f..b81f5d322416 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -68,134 +68,134 @@ class Generator: @overload def standard_normal( # type: ignore[misc] self, - size: None = ..., + size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., + out: None = None, ) -> float: ... @overload def standard_normal( # type: ignore[misc] self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_normal( # type: ignore[misc] self, *, - out: NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def standard_normal( # type: ignore[misc] self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = ..., + out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload def standard_normal( # type: ignore[misc] self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload - def permutation(self, x: int, axis: int = ...) -> NDArray[int64]: ... + def permutation(self, x: int, axis: int = 0) -> NDArray[int64]: ... @overload - def permutation(self, x: ArrayLike, axis: int = ...) -> NDArray[Any]: ... + def permutation(self, x: ArrayLike, axis: int = 0) -> NDArray[Any]: ... @overload def standard_exponential( # type: ignore[misc] self, - size: None = ..., + size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = ..., - out: None = ..., + method: Literal["zig", "inv"] = "zig", + out: None = None, ) -> float: ... @overload def standard_exponential( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_exponential( self, *, - out: NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def standard_exponential( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, *, - method: Literal["zig", "inv"] = ..., - out: NDArray[float64] | None = ..., + method: Literal["zig", "inv"] = "zig", + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def standard_exponential( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., - method: Literal["zig", "inv"] = ..., - out: NDArray[float32] | None = ..., + method: Literal["zig", "inv"] = "zig", + out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload def standard_exponential( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = ..., - out: NDArray[float64] | None = ..., + method: Literal["zig", "inv"] = "zig", + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def random( # type: ignore[misc] self, - size: None = ..., + size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., + out: None = None, ) -> float: ... @overload def random( self, *, - out: NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def random( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, *, - out: NDArray[float64] | None = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def random( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = ..., + out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload def random( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def beta( self, a: _FloatLike_co, b: _FloatLike_co, - size: None = ..., + size: None = None, ) -> float: ... # type: ignore[misc] @overload def beta( self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + def exponential(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload - def exponential(self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ...) -> NDArray[float64]: ... + def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ... # @overload @@ -481,135 +481,136 @@ class Generator: def choice( self, a: int, - size: None = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., - axis: int = ..., - shuffle: bool = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> int: ... @overload def choice( self, a: int, - size: _ShapeLike = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., - axis: int = ..., - shuffle: bool = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> NDArray[int64]: ... @overload def choice( self, a: ArrayLike, - size: None = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., - axis: int = ..., - shuffle: bool = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> Any: ... @overload def choice( self, a: ArrayLike, - size: _ShapeLike = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., - axis: int = ..., - shuffle: bool = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> NDArray[Any]: ... @overload def uniform( self, - low: _FloatLike_co = ..., - high: _FloatLike_co = ..., - size: None = ..., + low: _FloatLike_co = 0.0, + high: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def uniform( self, - low: _ArrayLikeFloat_co = ..., - high: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + low: _ArrayLikeFloat_co = 0.0, + high: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def normal( self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def normal( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_gamma( # type: ignore[misc] self, shape: _FloatLike_co, - size: None = ..., + size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., + out: None = None, ) -> float: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, *, - out: NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = ..., + out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def gamma( - self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ... + self, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def gamma( self, shape: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def f( - self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ... + self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None ) -> float: ... # type: ignore[misc] @overload def f( self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def noncentral_f( self, dfnum: _FloatLike_co, dfden: _FloatLike_co, - nonc: _FloatLike_co, size: None = ... + nonc: _FloatLike_co, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def noncentral_f( @@ -617,140 +618,140 @@ class Generator: dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def chisquare(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def chisquare( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def noncentral_chisquare( - self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ... + self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None ) -> float: ... # type: ignore[misc] @overload def noncentral_chisquare( self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def standard_t(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: None = ... + self, df: _ArrayLikeFloat_co, size: None = None ) -> NDArray[float64]: ... @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def vonmises( - self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ... + self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None ) -> float: ... # type: ignore[misc] @overload def vonmises( self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def pareto(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def pareto( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def weibull(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def weibull( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def power(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def power( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] @overload - def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... + def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... @overload def laplace( self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def laplace( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def gumbel( self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def gumbel( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def logistic( self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def logistic( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def lognormal( self, - mean: _FloatLike_co = ..., - sigma: _FloatLike_co = ..., - size: None = ..., + mean: _FloatLike_co = 0.0, + sigma: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def lognormal( self, - mean: _ArrayLikeFloat_co = ..., - sigma: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + mean: _ArrayLikeFloat_co = 0.0, + sigma: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload def rayleigh( - self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def wald( - self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ... + self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None ) -> float: ... # type: ignore[misc] @overload def wald( self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def triangular( @@ -758,7 +759,7 @@ class Generator: left: _FloatLike_co, mode: _FloatLike_co, right: _FloatLike_co, - size: None = ..., + size: None = None, ) -> float: ... # type: ignore[misc] @overload def triangular( @@ -766,46 +767,46 @@ class Generator: left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def binomial(self, n: int, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] @overload def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload def negative_binomial( - self, n: _FloatLike_co, p: _FloatLike_co, size: None = ... + self, n: _FloatLike_co, p: _FloatLike_co, size: None = None ) -> int: ... # type: ignore[misc] @overload def negative_binomial( self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc] + def poisson(self, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... # type: ignore[misc] @overload def poisson( - self, lam: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def zipf(self, a: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] @overload def zipf( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def geometric(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] @overload def geometric( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload def hypergeometric( - self, ngood: int, nbad: int, nsample: int, size: None = ... + self, ngood: int, nbad: int, nsample: int, size: None = None ) -> int: ... # type: ignore[misc] @overload def hypergeometric( @@ -813,44 +814,44 @@ class Generator: ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[int64]: ... @overload - def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def logseries(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] @overload def logseries( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... def multivariate_normal( self, mean: _ArrayLikeFloat_co, cov: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., - check_valid: Literal["warn", "raise", "ignore"] = ..., - tol: float = ..., + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, *, - method: Literal["svd", "eigh", "cholesky"] = ..., + method: Literal["svd", "eigh", "cholesky"] = "svd", ) -> NDArray[float64]: ... def multinomial( self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[int64]: ... def multivariate_hypergeometric( self, colors: _ArrayLikeInt_co, nsample: int, - size: _ShapeLike | None = ..., - method: Literal["marginals", "count"] = ..., + size: _ShapeLike | None = None, + method: Literal["marginals", "count"] = "marginals", ) -> NDArray[int64]: ... def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... def permuted( - self, x: ArrayLike, *, axis: int | None = ..., out: NDArray[Any] | None = ... + self, x: ArrayLike, *, axis: int | None = None, out: NDArray[Any] | None = None ) -> NDArray[Any]: ... - def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ... + def shuffle(self, x: ArrayLike, axis: int = 0) -> None: ... def default_rng( - seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = ... + seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None ) -> Generator: ... diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi index 70b2506da7af..99e10677c3a2 100644 --- a/numpy/random/_mt19937.pyi +++ b/numpy/random/_mt19937.pyi @@ -18,7 +18,7 @@ class _MT19937State(TypedDict): class MT19937(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... - def jumped(self, jumps: int = ...) -> MT19937: ... + def jumped(self, jumps: int = 1) -> MT19937: ... @property def state(self) -> _MT19937State: ... @state.setter diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index 5dc7bb66321b..0326781fd43a 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -17,7 +17,7 @@ class _PCG64State(TypedDict): class PCG64(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... - def jumped(self, jumps: int = ...) -> PCG64: ... + def jumped(self, jumps: int = 1) -> PCG64: ... @property def state( self, @@ -31,7 +31,7 @@ class PCG64(BitGenerator): class PCG64DXSM(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... - def jumped(self, jumps: int = ...) -> PCG64DXSM: ... + def jumped(self, jumps: int = 1) -> PCG64DXSM: ... @property def state( self, diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi index d8895bba67cf..353a0adb4861 100644 --- a/numpy/random/_philox.pyi +++ b/numpy/random/_philox.pyi @@ -35,5 +35,5 @@ class Philox(BitGenerator): self, value: _PhiloxState, ) -> None: ... - def jumped(self, jumps: int = ...) -> Philox: ... + def jumped(self, jumps: int = 1) -> Philox: ... def advance(self, delta: int) -> Philox: ... diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 54bb1462fb5f..3221048a8af7 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -50,45 +50,45 @@ class RandomState: def __getstate__(self) -> dict[str, Any]: ... def __setstate__(self, state: dict[str, Any]) -> None: ... def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... # noqa: E501 - def seed(self, seed: _ArrayLikeFloat_co | None = ...) -> None: ... + def seed(self, seed: _ArrayLikeFloat_co | None = None) -> None: ... @overload - def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ... + def get_state(self, legacy: Literal[False] = False) -> dict[str, Any]: ... @overload def get_state( - self, legacy: Literal[True] = ... + self, legacy: Literal[True] = True ) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ... def set_state( self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float] ) -> None: ... @overload - def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc] + def random_sample(self, size: None = None) -> float: ... # type: ignore[misc] @overload def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def random(self, size: None = ...) -> float: ... # type: ignore[misc] + def random(self, size: None = None) -> float: ... # type: ignore[misc] @overload def random(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] + def beta(self, a: float, b: float, size: None = None) -> float: ... # type: ignore[misc] @overload def beta( self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def exponential(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload def exponential( - self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_exponential(self, size: None = None) -> float: ... # type: ignore[misc] @overload def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc] + def tomaxint(self, size: None = None) -> int: ... # type: ignore[misc] @overload # Generates long values, but stores it in a 64bit int: def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... @@ -96,222 +96,222 @@ class RandomState: def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, ) -> int: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: type[bool] = ..., ) -> bool: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: type[np.bool] = ..., ) -> np.bool: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: type[int] = ..., ) -> int: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 ) -> uint8: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 ) -> uint16: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 ) -> uint32: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 ) -> uint: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 ) -> ulong: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 ) -> uint64: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 ) -> int8: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 ) -> int16: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 ) -> int32: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 ) -> int_: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 ) -> long: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., # noqa: E501 ) -> int64: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: _DTypeLikeBool = ..., ) -> NDArray[np.bool]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 ) -> NDArray[int8]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 ) -> NDArray[int16]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 ) -> NDArray[int32]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., # noqa: E501 ) -> NDArray[int64]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 ) -> NDArray[uint8]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 ) -> NDArray[uint16]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 ) -> NDArray[uint32]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 ) -> NDArray[uint64]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 ) -> NDArray[long]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 ) -> NDArray[ulong]: ... def bytes(self, length: int) -> builtins.bytes: ... @@ -319,44 +319,44 @@ class RandomState: def choice( self, a: int, - size: None = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> int: ... @overload def choice( self, a: int, - size: _ShapeLike = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> NDArray[long]: ... @overload def choice( self, a: ArrayLike, - size: None = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def choice( self, a: ArrayLike, - size: _ShapeLike = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> NDArray[Any]: ... @overload def uniform( - self, low: float = ..., high: float = ..., size: None = ... + self, low: float = 0.0, high: float = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def uniform( self, - low: _ArrayLikeFloat_co = ..., - high: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + low: _ArrayLikeFloat_co = 0.0, + high: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def rand(self) -> float: ... @@ -368,65 +368,65 @@ class RandomState: def randn(self, *args: int) -> NDArray[float64]: ... @overload def random_integers( - self, low: int, high: int | None = ..., size: None = ... + self, low: int, high: int | None = None, size: None = None ) -> int: ... # type: ignore[misc] @overload def random_integers( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_normal(self, size: None = None) -> float: ... # type: ignore[misc] @overload def standard_normal( # type: ignore[misc] - self, size: _ShapeLike = ... + self, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def normal( - self, loc: float = ..., scale: float = ..., size: None = ... + self, loc: float = 0.0, scale: float = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def normal( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_gamma( # type: ignore[misc] self, shape: float, - size: None = ..., + size: None = None, ) -> float: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def gamma(self, shape: float, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload def gamma( self, shape: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] + def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... # type: ignore[misc] @overload def f( self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def noncentral_f( - self, dfnum: float, dfden: float, nonc: float, size: None = ... + self, dfnum: float, dfden: float, nonc: float, size: None = None ) -> float: ... # type: ignore[misc] @overload def noncentral_f( @@ -434,128 +434,128 @@ class RandomState: dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + def chisquare(self, df: float, size: None = None) -> float: ... # type: ignore[misc] @overload def chisquare( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def noncentral_chisquare( - self, df: float, nonc: float, size: None = ... + self, df: float, nonc: float, size: None = None ) -> float: ... # type: ignore[misc] @overload def noncentral_chisquare( self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + def standard_t(self, df: float, size: None = None) -> float: ... # type: ignore[misc] @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: None = ... + self, df: _ArrayLikeFloat_co, size: None = None ) -> NDArray[float64]: ... @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] + def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... # type: ignore[misc] @overload def vonmises( self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def pareto(self, a: float, size: None = None) -> float: ... # type: ignore[misc] @overload def pareto( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def weibull(self, a: float, size: None = None) -> float: ... # type: ignore[misc] @overload def weibull( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def power(self, a: float, size: None = None) -> float: ... # type: ignore[misc] @overload def power( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] @overload - def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... + def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... @overload def laplace( - self, loc: float = ..., scale: float = ..., size: None = ... + self, loc: float = 0.0, scale: float = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def laplace( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def gumbel( - self, loc: float = ..., scale: float = ..., size: None = ... + self, loc: float = 0.0, scale: float = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def gumbel( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def logistic( - self, loc: float = ..., scale: float = ..., size: None = ... + self, loc: float = 0.0, scale: float = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def logistic( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def lognormal( - self, mean: float = ..., sigma: float = ..., size: None = ... + self, mean: float = 0.0, sigma: float = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def lognormal( self, - mean: _ArrayLikeFloat_co = ..., - sigma: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + mean: _ArrayLikeFloat_co = 0.0, + sigma: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload def rayleigh( - self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] + def wald(self, mean: float, scale: float, size: None = None) -> float: ... # type: ignore[misc] @overload def wald( self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def triangular( - self, left: float, mode: float, right: float, size: None = ... + self, left: float, mode: float, right: float, size: None = None ) -> float: ... # type: ignore[misc] @overload def triangular( @@ -563,50 +563,50 @@ class RandomState: left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def binomial( - self, n: int, p: float, size: None = ... + self, n: int, p: float, size: None = None ) -> int: ... # type: ignore[misc] @overload def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload def negative_binomial( - self, n: float, p: float, size: None = ... + self, n: float, p: float, size: None = None ) -> int: ... # type: ignore[misc] @overload def negative_binomial( self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload def poisson( - self, lam: float = ..., size: None = ... + self, lam: float = 1.0, size: None = None ) -> int: ... # type: ignore[misc] @overload def poisson( - self, lam: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] + def zipf(self, a: float, size: None = None) -> int: ... # type: ignore[misc] @overload def zipf( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def geometric(self, p: float, size: None = None) -> int: ... # type: ignore[misc] @overload def geometric( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload def hypergeometric( - self, ngood: int, nbad: int, nsample: int, size: None = ... + self, ngood: int, nbad: int, nsample: int, size: None = None ) -> int: ... # type: ignore[misc] @overload def hypergeometric( @@ -614,29 +614,29 @@ class RandomState: ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def logseries(self, p: float, size: None = None) -> int: ... # type: ignore[misc] @overload def logseries( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... def multivariate_normal( self, mean: _ArrayLikeFloat_co, cov: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., - check_valid: Literal["warn", "raise", "ignore"] = ..., - tol: float = ..., + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, ) -> NDArray[float64]: ... def multinomial( self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[long]: ... def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... def shuffle(self, x: ArrayLike) -> None: ... @overload diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 31c7fab7bdf8..69572aaa4c84 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -162,14 +162,14 @@ class suppress_warnings: # Contrary to runtime we can't do `os.name` checks while type checking, # only `sys.platform` checks if sys.platform == "win32" or sys.platform == "cygwin": - def memusage(processName: str = ..., instance: int = ...) -> int: ... + def memusage(processName: str = "python", instance: int = 0) -> int: ... elif sys.platform == "linux": - def memusage(_proc_pid_stat: StrOrBytesPath = ...) -> int | None: ... + def memusage(_proc_pid_stat: StrOrBytesPath | None = None) -> int | None: ... else: def memusage() -> NoReturn: ... if sys.platform == "linux": - def jiffies(_proc_pid_stat: StrOrBytesPath = ..., _load_time: list[float] = []) -> int: ... + def jiffies(_proc_pid_stat: StrOrBytesPath | None = None, _load_time: list[float] = []) -> int: ... else: def jiffies(_load_time: list[float] = []) -> int: ... @@ -179,7 +179,7 @@ def build_err_msg( err_msg: object, header: str = "Items are not equal:", verbose: bool = True, - names: Sequence[str] = ..., # = ('ACTUAL', 'DESIRED') + names: Sequence[str] = ("ACTUAL", "DESIRED"), # = ('ACTUAL', 'DESIRED') precision: SupportsIndex | None = 8, ) -> str: ... From 4206c664671bcaef276ba8dfc86a0df34d29271d Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 9 Oct 2025 19:54:48 +0200 Subject: [PATCH 0549/1018] MAINT: bump ``ruff`` from ``0.12.0`` to ``0.14.0`` (#29904) * MAINT: bump ``ruff`` to ``0.14.0`` * STY: fix new ``ruff`` errors * MAINT: ignored ruff rule ``PD901`` has been removed * TST: expect more specific f2py exceptions, resolving ``B017`` --- environment.yml | 2 +- numpy/f2py/tests/test_return_integer.py | 4 ++-- numpy/f2py/tests/test_return_real.py | 4 ++-- numpy/tests/test_configtool.py | 1 - requirements/linter_requirements.txt | 2 +- ruff.toml | 2 -- 6 files changed, 6 insertions(+), 9 deletions(-) diff --git a/environment.yml b/environment.yml index 3bf6fcbc4319..c5ee0c381bb3 100644 --- a/environment.yml +++ b/environment.yml @@ -46,7 +46,7 @@ dependencies: - breathe>4.33.0 # For linting - cython-lint - - ruff=0.12.0 + - ruff=0.14.0 - gitpython # Used in some tests - cffi diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py index 13a9f862f311..50309d5dadaf 100644 --- a/numpy/f2py/tests/test_return_integer.py +++ b/numpy/f2py/tests/test_return_integer.py @@ -29,8 +29,8 @@ def check_function(self, t, tname): pytest.raises(IndexError, t, []) pytest.raises(IndexError, t, ()) - pytest.raises(Exception, t, t) - pytest.raises(Exception, t, {}) + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) if tname in ["t8", "s8"]: pytest.raises(OverflowError, t, 100000000000000000000000) diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py index c871ed3d4fc2..4339657aa013 100644 --- a/numpy/f2py/tests/test_return_real.py +++ b/numpy/f2py/tests/test_return_real.py @@ -39,8 +39,8 @@ def check_function(self, t, tname): pytest.raises(IndexError, t, []) pytest.raises(IndexError, t, ()) - pytest.raises(Exception, t, t) - pytest.raises(Exception, t, {}) + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) try: r = t(10**400) diff --git a/numpy/tests/test_configtool.py b/numpy/tests/test_configtool.py index c4e9a9551c0c..917bbf55712f 100644 --- a/numpy/tests/test_configtool.py +++ b/numpy/tests/test_configtool.py @@ -1,4 +1,3 @@ -import importlib import importlib.metadata import os import pathlib diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index a28b989fbb03..da6bac6f7b84 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,4 +1,4 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.12.0 +ruff==0.14.0 GitPython>=3.1.30 diff --git a/ruff.toml b/ruff.toml index e0f0dd98872e..8b4d8358ba4a 100644 --- a/ruff.toml +++ b/ruff.toml @@ -54,8 +54,6 @@ ignore = [ "ISC002", # Implicitly concatenated string literals over multiple lines # flake8-pie "PIE790", # Unnecessary `pass` statement - # pandas-vet - "PD901", # Avoid using the generic variable name `df` for DataFrames # perflint "PERF401", # Use a list comprehension to create a transformed list # pycodestyle/error From a6f6f057c0f962b2d6b20de248a36aec6e97b6f7 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 9 Oct 2025 19:57:52 +0200 Subject: [PATCH 0550/1018] TYP: fix incorrect ``ma.sort`` arg default for ``stable`` (#29903) --- numpy/ma/core.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 9adab779776a..c9c4ab152030 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2381,7 +2381,7 @@ def sort( endwith: bool | None = True, fill_value: _ScalarLike_co | None = None, *, - stable: Literal[False] | None = False, + stable: Literal[False] | None = None, ) -> _ArrayT: ... @overload def sort( @@ -2392,7 +2392,7 @@ def sort( endwith: bool | None = True, fill_value: _ScalarLike_co | None = None, *, - stable: Literal[False] | None = False, + stable: Literal[False] | None = None, ) -> NDArray[Any]: ... @overload def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... From 30e4cfcbfb1acb05b963ce6234b0c84ec9200115 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Thu, 9 Oct 2025 11:19:28 -0700 Subject: [PATCH 0551/1018] DOC: Correct typos [skip actions][skip azp][skip cirrus] --- doc/source/dev/development_advanced_debugging.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 07c80314da1b..125f469f88de 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -170,7 +170,7 @@ Valgrind helps: Python allocators.) Even though using valgrind for memory leak detection is slow and less sensitive -it can be a convenient: you can run most programs with valgrind without +it can be convenient: you can run most programs with valgrind without modification. Things to be aware of: @@ -233,7 +233,7 @@ The NumPy developers often use both ``gdb`` and ``lldb`` to debug Numpy. As a rule of thumb, ``gdb`` is often easier to use on Linux while ``lldb`` is easier to use on a Mac environment. They have disjoint user interfaces, so you will need to learn how to use whichever one you land on. The ``gdb`` to ``lldb`` `command map -`_ is a convnient reference for how to +`_ is a convenient reference for how to accomplish common recipes in both debuggers. From 98ae47d77ed2542cdf5a8419a26cd69f9992b4c3 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Thu, 9 Oct 2025 11:27:02 -0700 Subject: [PATCH 0552/1018] DOC: Add URL to valgrind tool [skip actions][skip azp][skip cirrus] --- doc/source/dev/development_advanced_debugging.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 125f469f88de..9696639c1835 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -36,7 +36,8 @@ However, you can ensure that we can track down such issues more easily: consider creating an additional simpler test as well. This can be helpful, because often it is only easy to find which test triggers an issue and not which line of the test. -* Never use ``np.empty`` if data is read/used. ``valgrind`` will notice this +* Never use ``np.empty`` if data is read/used. + `Valgrind `_ will notice this and report an error. When you do not care about values, you can generate random values instead. @@ -131,7 +132,8 @@ to mark them, but expect some false positives. ``valgrind`` ============ -Valgrind is a powerful tool to find certain memory access problems and should +`Valgrind `_ is a powerful tool +to find certain memory access problems and should be run on complicated C code. Basic use of ``valgrind`` usually requires no more than:: From f0649aa6873d7136ed2989e0a45591b9f91c441c Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 02:35:19 +0530 Subject: [PATCH 0553/1018] DEP: remove helpers.pyi (finalize deprecation) --- numpy/fft/helper.pyi | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi index 5147652172a6..8b137891791f 100644 --- a/numpy/fft/helper.pyi +++ b/numpy/fft/helper.pyi @@ -1,20 +1 @@ -from typing import Any, Literal as L -from typing_extensions import deprecated -import numpy as np -from numpy._typing import ArrayLike, NDArray, _ShapeLike - -from ._helper import integer_types as integer_types - -__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] - -### - -@deprecated("Please use `numpy.fft.fftshift` instead.") -def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... -@deprecated("Please use `numpy.fft.ifftshift` instead.") -def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... -@deprecated("Please use `numpy.fft.fftfreq` instead.") -def fftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... -@deprecated("Please use `numpy.fft.rfftfreq` instead.") -def rfftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... From f618698988260e4839d47433694f59f093a17838 Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 02:36:09 +0530 Subject: [PATCH 0554/1018] DEP: remove linalg.pyi (finalize deprecation) --- numpy/linalg/linalg.pyi | 68 ----------------------------------------- 1 file changed, 68 deletions(-) diff --git a/numpy/linalg/linalg.pyi b/numpy/linalg/linalg.pyi index dbe9becfb8d5..8b137891791f 100644 --- a/numpy/linalg/linalg.pyi +++ b/numpy/linalg/linalg.pyi @@ -1,69 +1 @@ -from ._linalg import ( - LinAlgError, - cholesky, - cond, - cross, - det, - diagonal, - eig, - eigh, - eigvals, - eigvalsh, - inv, - lstsq, - matmul, - matrix_norm, - matrix_power, - matrix_rank, - matrix_transpose, - multi_dot, - norm, - outer, - pinv, - qr, - slogdet, - solve, - svd, - svdvals, - tensordot, - tensorinv, - tensorsolve, - trace, - vecdot, - vector_norm, -) -__all__ = [ - "LinAlgError", - "cholesky", - "cond", - "cross", - "det", - "diagonal", - "eig", - "eigh", - "eigvals", - "eigvalsh", - "inv", - "lstsq", - "matmul", - "matrix_norm", - "matrix_power", - "matrix_rank", - "matrix_transpose", - "multi_dot", - "norm", - "outer", - "pinv", - "qr", - "slogdet", - "solve", - "svd", - "svdvals", - "tensordot", - "tensorinv", - "tensorsolve", - "trace", - "vecdot", - "vector_norm", -] From f7a1b8fb404cdaebe63814e8f5eeeef873b7065d Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 03:31:17 +0530 Subject: [PATCH 0555/1018] __init__.pyi removed --- numpy/linalg/__init__.pyi | 70 --------------------------------------- 1 file changed, 70 deletions(-) diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 53c115f7bd65..8b137891791f 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -1,71 +1 @@ -from . import _linalg as _linalg, _umath_linalg as _umath_linalg, linalg as linalg -from ._linalg import ( - cholesky, - cond, - cross, - det, - diagonal, - eig, - eigh, - eigvals, - eigvalsh, - inv, - lstsq, - matmul, - matrix_norm, - matrix_power, - matrix_rank, - matrix_transpose, - multi_dot, - norm, - outer, - pinv, - qr, - slogdet, - solve, - svd, - svdvals, - tensordot, - tensorinv, - tensorsolve, - trace, - vecdot, - vector_norm, -) -__all__ = [ - "LinAlgError", - "cholesky", - "cond", - "cross", - "det", - "diagonal", - "eig", - "eigh", - "eigvals", - "eigvalsh", - "inv", - "lstsq", - "matmul", - "matrix_norm", - "matrix_power", - "matrix_rank", - "matrix_transpose", - "multi_dot", - "norm", - "outer", - "pinv", - "qr", - "slogdet", - "solve", - "svd", - "svdvals", - "tensordot", - "tensorinv", - "tensorsolve", - "trace", - "vecdot", - "vector_norm", -] - -class LinAlgError(ValueError): ... From e55674e36f907c13a0b6311c8a46048059d7cb0c Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 03:31:50 +0530 Subject: [PATCH 0556/1018] __init__.pyi removed --- numpy/fft/__init__.pyi | 37 ------------------------------------- 1 file changed, 37 deletions(-) diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index 893a697f1398..8b137891791f 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -1,38 +1 @@ -from ._helper import fftfreq, fftshift, ifftshift, rfftfreq -from ._pocketfft import ( - fft, - fft2, - fftn, - hfft, - ifft, - ifft2, - ifftn, - ihfft, - irfft, - irfft2, - irfftn, - rfft, - rfft2, - rfftn, -) -__all__ = [ - "fft", - "ifft", - "rfft", - "irfft", - "hfft", - "ihfft", - "rfftn", - "irfftn", - "rfft2", - "irfft2", - "fft2", - "ifft2", - "fftn", - "ifftn", - "fftshift", - "ifftshift", - "fftfreq", - "rfftfreq", -] From 107200bf5526c5f297ed42b17caa3542ef64967c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 10 Oct 2025 01:10:00 +0200 Subject: [PATCH 0557/1018] TYP: wrong argument defaults in ``testing._private`` (#29902) * TYP: fix `testing._private.utils.jiffies` signature on linux * TYP: fix incorrect function arg defaults in ``testing._private.extbuild`` --- numpy/testing/_private/extbuild.pyi | 6 +++--- numpy/testing/_private/utils.pyi | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/testing/_private/extbuild.pyi b/numpy/testing/_private/extbuild.pyi index 609a45e79d16..c1ae507d6a49 100644 --- a/numpy/testing/_private/extbuild.pyi +++ b/numpy/testing/_private/extbuild.pyi @@ -10,7 +10,7 @@ def build_and_import_extension( *, prologue: str = "", build_dir: pathlib.Path | None = None, - include_dirs: Sequence[str] = [], + include_dirs: Sequence[str] | None = None, more_init: str = "", ) -> types.ModuleType: ... @@ -20,6 +20,6 @@ def compile_extension_module( builddir: pathlib.Path, include_dirs: Sequence[str], source_string: str, - libraries: Sequence[str] = [], - library_dirs: Sequence[str] = [], + libraries: Sequence[str] | None = None, + library_dirs: Sequence[str] | None = None, ) -> pathlib.Path: ... diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 69572aaa4c84..1b298132c8ef 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -169,7 +169,7 @@ else: def memusage() -> NoReturn: ... if sys.platform == "linux": - def jiffies(_proc_pid_stat: StrOrBytesPath | None = None, _load_time: list[float] = []) -> int: ... + def jiffies(_proc_pid_stat: StrOrBytesPath | None = None, _load_time: list[float] | None = None) -> int: ... else: def jiffies(_load_time: list[float] = []) -> int: ... From 9167aba1d2e3e64187cd0212398d048d772257ca Mon Sep 17 00:00:00 2001 From: Inessa Pawson Date: Thu, 9 Oct 2025 20:29:34 -0400 Subject: [PATCH 0558/1018] Fix Plausible link and syntax in conf.py --- doc/source/conf.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 0204818aa094..af431db44351 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -290,10 +290,10 @@ def setup(app): "json_url": "https://numpy.org/doc/_static/versions.json", }, "show_version_warning_banner": True, - "analytics": [ + "analytics": { "plausible_analytics_domain": "numpy.org/doc/stable/", - "plausible_analytics_url": "https://views.scientific-python.org/js/script.file-downloads.hash.outbound-links.js", - ] + "plausible_analytics_url": ("https://views.scientific-python.org/js/script.js"), + }, } html_title = f"{project} v{version} Manual" From 71a15027c4f2ead27480a6193064a5f3a02368ba Mon Sep 17 00:00:00 2001 From: Riku Sakamoto Date: Fri, 10 Oct 2025 14:28:11 +0900 Subject: [PATCH 0559/1018] BUG: Avoid segfault when calling `numpy._core.strings._expandtabs_length.reduce` Fixes the first item of numpy#28829. Use `Py_XINCREF` instead of `Py_INCREF` to safely handle the case where `op_dtypes[0]` is NULL --- numpy/_core/src/umath/string_ufuncs.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 95f30ccb109e..9b3d86c25301 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -941,7 +941,7 @@ string_expandtabs_length_promoter(PyObject *NPY_UNUSED(ufunc), PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { - Py_INCREF(op_dtypes[0]); + Py_XINCREF(op_dtypes[0]); new_op_dtypes[0] = op_dtypes[0]; new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_Int64DType); new_op_dtypes[2] = PyArray_DTypeFromTypeNum(NPY_DEFAULT_INT); From a3f1ad9a9b5b7aae6be9c3f16c61e08ef9adec60 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 10 Oct 2025 08:51:10 +0200 Subject: [PATCH 0560/1018] TYP: minor fixes related to ``errstate`` (#29914) --- numpy/__init__.pyi | 28 +------------------- numpy/_core/_ufunc_config.pyi | 50 ++++++++++++++++++++++++++++++----- 2 files changed, 45 insertions(+), 33 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 7d0bd028c826..c18bc91ff4ba 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -292,8 +292,7 @@ from numpy._core._ufunc_config import ( getbufsize, seterrcall, geterrcall, - _ErrKind, - _ErrCall, + errstate, ) from numpy._core.arrayprint import ( @@ -757,8 +756,6 @@ _T_contra = TypeVar("_T_contra", contravariant=True) _RealT_co = TypeVar("_RealT_co", covariant=True) _ImagT_co = TypeVar("_ImagT_co", covariant=True) -_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) - _DTypeT = TypeVar("_DTypeT", bound=dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) _FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) @@ -5735,29 +5732,6 @@ permute_dims = transpose pow = power true_divide = divide -class errstate: - __slots__ = "_all", "_call", "_divide", "_invalid", "_over", "_token", "_under" - - def __init__( - self, - *, - call: _ErrCall = ..., - all: _ErrKind | None = None, - divide: _ErrKind | None = None, - over: _ErrKind | None = None, - under: _ErrKind | None = None, - invalid: _ErrKind | None = None, - ) -> None: ... - def __enter__(self) -> None: ... - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_value: BaseException | None, - traceback: TracebackType | None, - /, - ) -> None: ... - def __call__(self, func: _CallableT) -> _CallableT: ... - # TODO: The type of each `__next__` and `iters` return-type depends # on the length and dtype of `args`; we can't describe this behavior yet # as we lack variadics (PEP 646). diff --git a/numpy/_core/_ufunc_config.pyi b/numpy/_core/_ufunc_config.pyi index 1cc3595d5ba0..f1f0d88fe165 100644 --- a/numpy/_core/_ufunc_config.pyi +++ b/numpy/_core/_ufunc_config.pyi @@ -1,12 +1,22 @@ from _typeshed import SupportsWrite from collections.abc import Callable -from typing import Any, Literal, TypeAlias, TypedDict, type_check_only +from types import TracebackType +from typing import Any, Final, Literal, TypeAlias, TypedDict, TypeVar, type_check_only -from numpy import errstate as errstate +__all__ = [ + "seterr", + "geterr", + "setbufsize", + "getbufsize", + "seterrcall", + "geterrcall", + "errstate", +] _ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] -_ErrFunc: TypeAlias = Callable[[str, int], Any] -_ErrCall: TypeAlias = _ErrFunc | SupportsWrite[str] +_ErrCall: TypeAlias = Callable[[str, int], Any] | SupportsWrite[str] + +_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) @type_check_only class _ErrDict(TypedDict): @@ -15,6 +25,36 @@ class _ErrDict(TypedDict): under: _ErrKind invalid: _ErrKind +### + +class _unspecified: ... + +_Unspecified: Final[_unspecified] + +class errstate: + __slots__ = "_all", "_call", "_divide", "_invalid", "_over", "_token", "_under" + + def __init__( + self, + /, + *, + call: _ErrCall | _unspecified = ..., # = _Unspecified + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, + ) -> None: ... + def __call__(self, /, func: _CallableT) -> _CallableT: ... + def __enter__(self) -> None: ... + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + /, + ) -> None: ... + def seterr( all: _ErrKind | None = None, divide: _ErrKind | None = None, @@ -27,5 +67,3 @@ def setbufsize(size: int) -> int: ... def getbufsize() -> int: ... def seterrcall(func: _ErrCall | None) -> _ErrCall | None: ... def geterrcall() -> _ErrCall | None: ... - -# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings` From d2678ee105eeb22c2da66b1b79d7f1acaa34a93a Mon Sep 17 00:00:00 2001 From: Riku Sakamoto Date: Fri, 10 Oct 2025 16:05:50 +0900 Subject: [PATCH 0561/1018] TST: add tests to check `numpy._core.strings._expandtabs_length` --- numpy/_core/tests/test_strings.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 756c6e1bb549..939b7fbd465d 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -4,6 +4,7 @@ import pytest import numpy as np +from numpy._core._exceptions import _UFuncNoLoopError from numpy.testing import IS_PYPY, assert_array_equal, assert_raises from numpy.testing._private.utils import requires_memory @@ -821,6 +822,20 @@ def test_expandtabs_raises_overflow(self, dt): np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize) np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), 2**61) + def test_expandtabs_length_not_cause_segfault(self, dt): + # see gh-28829 + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np._core.strings._expandtabs_length.reduce(np.zeros(200)) + + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np.strings.expandtabs(np.zeros(200)) + FILL_ERROR = "The fill character must be exactly one character long" def test_center_raises_multiple_character_fill(self, dt): From 0a546cc17f95cd307a609d0fab249f9d5831f991 Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 13:23:40 +0530 Subject: [PATCH 0562/1018] Update __init__.pyi --- numpy/linalg/__init__.pyi | 70 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 8b137891791f..4d6aca760637 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -1 +1,71 @@ +from . import _linalg as _linalg, _umath_linalg as _umath_linalg +from ._linalg import ( + cholesky, + cond, + cross, + det, + diagonal, + eig, + eigh, + eigvals, + eigvalsh, + inv, + lstsq, + matmul, + matrix_norm, + matrix_power, + matrix_rank, + matrix_transpose, + multi_dot, + norm, + outer, + pinv, + qr, + slogdet, + solve, + svd, + svdvals, + tensordot, + tensorinv, + tensorsolve, + trace, + vecdot, + vector_norm, +) +__all__ = [ + "LinAlgError", + "cholesky", + "cond", + "cross", + "det", + "diagonal", + "eig", + "eigh", + "eigvals", + "eigvalsh", + "inv", + "lstsq", + "matmul", + "matrix_norm", + "matrix_power", + "matrix_rank", + "matrix_transpose", + "multi_dot", + "norm", + "outer", + "pinv", + "qr", + "slogdet", + "solve", + "svd", + "svdvals", + "tensordot", + "tensorinv", + "tensorsolve", + "trace", + "vecdot", + "vector_norm", +] + +class LinAlgError(ValueError): ... From ef1afa0156d261119fedcb9591c1402705abf609 Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 13:27:55 +0530 Subject: [PATCH 0563/1018] Update __init__.pyi From 148d85c15b0ff35f2a1d608fa3f14aaa515f9016 Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 13:30:19 +0530 Subject: [PATCH 0564/1018] Update __init__.pyi --- numpy/fft/__init__.pyi | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index 8b137891791f..893a697f1398 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -1 +1,38 @@ +from ._helper import fftfreq, fftshift, ifftshift, rfftfreq +from ._pocketfft import ( + fft, + fft2, + fftn, + hfft, + ifft, + ifft2, + ifftn, + ihfft, + irfft, + irfft2, + irfftn, + rfft, + rfft2, + rfftn, +) +__all__ = [ + "fft", + "ifft", + "rfft", + "irfft", + "hfft", + "ihfft", + "rfftn", + "irfftn", + "rfft2", + "irfft2", + "fft2", + "ifft2", + "fftn", + "ifftn", + "fftshift", + "ifftshift", + "fftfreq", + "rfftfreq", +] From 7e290de02f689ce8097f828cd6b3b058aaa2d28b Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 14:12:14 +0530 Subject: [PATCH 0565/1018] Rename 29000.expiring.rst to 29009.expiring.rst --- .../upcoming_changes/{29000.expiring.rst => 29009.expiring.rst} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename doc/release/upcoming_changes/{29000.expiring.rst => 29009.expiring.rst} (82%) diff --git a/doc/release/upcoming_changes/29000.expiring.rst b/doc/release/upcoming_changes/29009.expiring.rst similarity index 82% rename from doc/release/upcoming_changes/29000.expiring.rst rename to doc/release/upcoming_changes/29009.expiring.rst index 2353f38a7083..b1f7e0bdd7a3 100644 --- a/doc/release/upcoming_changes/29000.expiring.rst +++ b/doc/release/upcoming_changes/29009.expiring.rst @@ -7,4 +7,4 @@ The following deprecated functions and classes have been removed: Previously deprecated, use :mod:`numpy.linalg` instead. * :mod:`numpy.fft.helpers` - Previously deprecated, use :mod:`numpy.fft` instead. \ No newline at end of file + Previously deprecated, use :mod:`numpy.fft` instead. From 47db4f1b0a7dba2f1808e0d812a9e7b30747520b Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 16:50:23 +0530 Subject: [PATCH 0566/1018] Update doc/release/upcoming_changes/29009.expiring.rst Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- doc/release/upcoming_changes/29009.expiring.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/29009.expiring.rst b/doc/release/upcoming_changes/29009.expiring.rst index b1f7e0bdd7a3..efe0013e40a5 100644 --- a/doc/release/upcoming_changes/29009.expiring.rst +++ b/doc/release/upcoming_changes/29009.expiring.rst @@ -6,5 +6,5 @@ The following deprecated functions and classes have been removed: * :mod:`numpy.linalg.linalg` Previously deprecated, use :mod:`numpy.linalg` instead. -* :mod:`numpy.fft.helpers` +* :mod:`numpy.fft.helper` Previously deprecated, use :mod:`numpy.fft` instead. From fe33f9aaac986c20cd568a9247243c506c3a27ad Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 10 Oct 2025 16:11:31 +0300 Subject: [PATCH 0567/1018] fix file name --- .../upcoming_changes/{29009.expiring.rst => 29909.expired.rst} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename doc/release/upcoming_changes/{29009.expiring.rst => 29909.expired.rst} (100%) diff --git a/doc/release/upcoming_changes/29009.expiring.rst b/doc/release/upcoming_changes/29909.expired.rst similarity index 100% rename from doc/release/upcoming_changes/29009.expiring.rst rename to doc/release/upcoming_changes/29909.expired.rst From 801f86d1f28e6bdd08ef8a4ce5f56d268e0eda9f Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 10 Oct 2025 16:37:11 +0300 Subject: [PATCH 0568/1018] DOC: reformat release note [skip actions][skip azp][skip cirrus] --- doc/release/upcoming_changes/29909.expired.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/release/upcoming_changes/29909.expired.rst b/doc/release/upcoming_changes/29909.expired.rst index efe0013e40a5..e3cb557e2c28 100644 --- a/doc/release/upcoming_changes/29909.expired.rst +++ b/doc/release/upcoming_changes/29909.expired.rst @@ -1,10 +1,10 @@ -Deprecated features removed in 2.2.0 -==================================== +Remove numpy.linalg.linag and numpy.fft.helper +---------------------------------------------- -The following deprecated functions and classes have been removed: +The following were deprecated in NumPy 2.0 and have been moved to private modules -* :mod:`numpy.linalg.linalg` - Previously deprecated, use :mod:`numpy.linalg` instead. +* ``numpy.linalg.linalg`` + Use :mod:`numpy.linalg` instead. -* :mod:`numpy.fft.helper` - Previously deprecated, use :mod:`numpy.fft` instead. +* ``numpy.fft.helper`` + Use :mod:`numpy.fft` instead. From 7f9a77ad70244760cab0dadca4450ca3736e18c6 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Fri, 10 Oct 2025 18:13:51 +0300 Subject: [PATCH 0569/1018] TST: use requirements/test_requirements across CI (#29919) * TST: use requirements/test_requirements across CI * fixes * fixes * fixes * fixes * fixes --- .github/meson_actions/action.yml | 3 +-- .github/workflows/linux-ppc64le.yml | 5 ++--- .github/workflows/linux.yml | 2 +- .github/workflows/linux_blas.yml | 23 ++++++++++------------- .github/workflows/linux_simd.yml | 9 +++------ .github/workflows/macos.yml | 4 +--- 6 files changed, 18 insertions(+), 28 deletions(-) diff --git a/.github/meson_actions/action.yml b/.github/meson_actions/action.yml index 66868cbc3be0..476c0bbd7950 100644 --- a/.github/meson_actions/action.yml +++ b/.github/meson_actions/action.yml @@ -30,8 +30,7 @@ runs: TERM: xterm-256color run: | echo "::group::Installing Test Dependencies" - pip install pytest pytest-xdist pytest-timeout hypothesis typing_extensions - pip install -r requirements/setuptools_requirement.txt + python -m pip install -r requirements/test_requirements.txt echo "::endgroup::" echo "::group::Test NumPy" spin test -- --durations=10 --timeout=600 diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index c561c3be4611..f54b5dc74060 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -35,8 +35,7 @@ jobs: sudo apt install -y python3 python3-pip python3-dev ninja-build gfortran \ build-essential libopenblas-dev liblapack-dev pkg-config pip install --upgrade pip - pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout spin + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt echo "/home/runner/.local/bin" >> $GITHUB_PATH - name: Meson Build @@ -49,4 +48,4 @@ jobs: - name: Run Tests run: | - spin test -- --timeout=60 --durations=10 \ No newline at end of file + spin test -- --timeout=60 --durations=10 diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index b33213449561..42a9601f33e1 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -236,7 +236,7 @@ jobs: # - name: Check docstests # shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' # run: | - # pip install scipy-doctest>=1.8.0 hypothesis==6.104.1 matplotlib scipy pandas + # pip install -r requirements/doc_requirements.txt -r requirements/test_requirements.txt # spin check-docs -v # spin check-tutorials -v diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 570e90437e1a..98d45ab8c435 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -76,7 +76,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt # Install OpenBLAS if [[ $USE_NIGHTLY_OPENBLAS == "true" ]]; then python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple scipy-openblas32 @@ -113,7 +113,6 @@ jobs: env: TERM: xterm-256color run: | - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout spin test -j auto -- --timeout=600 --durations=10 @@ -135,8 +134,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install pytest hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build (LP64) run: spin build -- -Dblas=openblas -Dlapack=openblas -Ddisable-optimization=true -Dallow-noblas=false @@ -171,8 +169,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install pytest hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build run: spin build -- -Ddisable-optimization=true -Dallow-noblas=false @@ -205,8 +202,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install libopenblas-dev cmake sudo apt-get remove pkg-config @@ -234,7 +230,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install liblapack-dev pkg-config @@ -244,7 +240,6 @@ jobs: - name: Test run: | - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout spin test -j auto -- numpy/linalg --timeout=600 --durations=10 @@ -276,6 +271,8 @@ jobs: - name: Test run: | + # do not use test_requirements.txt, it includes coverage which requires + # sqlite3, which is not available on OpenSUSE python pip install --break-system-packages pytest pytest-xdist hypothesis typing_extensions pytest-timeout spin test -j auto -- numpy/linalg --timeout=600 --durations=10 @@ -297,7 +294,7 @@ jobs: - name: Install dependencies run: | pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt pip install mkl mkl-devel - name: Repair MKL pkg-config files and symlinks @@ -361,7 +358,7 @@ jobs: - name: Install dependencies run: | pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install libblis-dev libopenblas-dev pkg-config @@ -398,7 +395,7 @@ jobs: - name: Install dependencies run: | pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install libatlas-base-dev pkg-config diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 5bd1eab7f797..c000c7f05d86 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -132,8 +132,7 @@ jobs: python-version: '3.11' - name: Install dependencies run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build run: | spin build -- ${{ matrix.config.args }} @@ -208,8 +207,7 @@ jobs: - name: Install dependencies run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install pytest pytest-xdist hypothesis typing_extensions + python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=avx512_skx -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' @@ -259,8 +257,7 @@ jobs: - name: Install dependencies run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install pytest pytest-xdist hypothesis typing_extensions + python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=avx512_spr diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 868dac6dcbbc..d747ab959dd6 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -130,9 +130,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install -r requirements/setuptools_requirement.txt - pip install pytest pytest-xdist pytest-timeout hypothesis + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build against Accelerate (LP64) run: spin build -- -Ddisable-optimization=true -Dallow-noblas=false From dd0d9d6d131fc48ad0d69a9e55cf52ed898511fc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Oct 2025 20:43:29 +0200 Subject: [PATCH 0570/1018] MAINT: Bump github/codeql-action from 4.30.7 to 4.30.8 (#29924) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index d6719c246c0d..0d52c4126be6 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7 + uses: github/codeql-action/init@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v4.30.8 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7 + uses: github/codeql-action/autobuild@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v4.30.8 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7 + uses: github/codeql-action/analyze@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v4.30.8 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 6fb0391abde5..9a0c80aa3342 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@e296a935590eb16afc0c0108289f68c87e2a89a5 # v2.1.27 + uses: github/codeql-action/upload-sarif@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v2.1.27 with: sarif_file: results.sarif From 0f9ea0ef45968b1aa659665e0018b78a1c059391 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Godoy=20Gutierrez?= Date: Fri, 10 Oct 2025 11:20:49 +0200 Subject: [PATCH 0571/1018] Fix memory leak in import_array(). Move import_array() and import_umath() calls before PyModule_Create() to prevent memory leaks when imports fail. The import macros contain a return NULL statement that bypasses cleanup code. Fixes in: - numpy/_core/src/umath/_operand_flag_tests.c - numpy/_core/src/umath/_struct_ufunc_tests.c --- numpy/_core/src/umath/_operand_flag_tests.c | 6 +++--- numpy/_core/src/umath/_struct_ufunc_tests.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/umath/_operand_flag_tests.c b/numpy/_core/src/umath/_operand_flag_tests.c index 5cdff6220280..c97668c4b118 100644 --- a/numpy/_core/src/umath/_operand_flag_tests.c +++ b/numpy/_core/src/umath/_operand_flag_tests.c @@ -57,14 +57,14 @@ PyMODINIT_FUNC PyInit__operand_flag_tests(void) PyObject *m = NULL; PyObject *ufunc; + import_array(); + import_umath(); + m = PyModule_Create(&moduledef); if (m == NULL) { goto fail; } - import_array(); - import_umath(); - ufunc = PyUFunc_FromFuncAndData(funcs, data, types, 1, 2, 0, PyUFunc_None, "inplace_add", "inplace_add_docstring", 0); diff --git a/numpy/_core/src/umath/_struct_ufunc_tests.c b/numpy/_core/src/umath/_struct_ufunc_tests.c index 56c4be117e44..e85c67f9d903 100644 --- a/numpy/_core/src/umath/_struct_ufunc_tests.c +++ b/numpy/_core/src/umath/_struct_ufunc_tests.c @@ -123,15 +123,15 @@ PyMODINIT_FUNC PyInit__struct_ufunc_tests(void) PyArray_Descr *dtype; PyArray_Descr *dtypes[3]; + import_array(); + import_umath(); + m = PyModule_Create(&moduledef); if (m == NULL) { return NULL; } - import_array(); - import_umath(); - add_triplet = PyUFunc_FromFuncAndData(NULL, NULL, NULL, 0, 2, 1, PyUFunc_None, "add_triplet", NULL, 0); From 0ce0ef0bae095e44770a20b64177f07f11758715 Mon Sep 17 00:00:00 2001 From: Maaz <76714503+mmaaz-git@users.noreply.github.com> Date: Fri, 10 Oct 2025 17:50:22 -0400 Subject: [PATCH 0572/1018] BUG: fix negative samples generated by Wald distribution (#29609) In numpy.random, when generating Wald samples, if the mean and scale have a large discrepancy, the current implementation suffers from catastrophic cancellation. An explicit example of this has been added to test_generator_mt19937.py. The key line implicated in distributions.c: `X = mean + mu_2l * (Y - sqrt(4 * scale * Y + Y * Y));` which has numerical issues when Y >> scale. I have replaced this with the equivalent rationalized form: ``` d = Y + sqrt(Y) * sqrt(Y + 4 * scale); X = mean - (2 * mean * Y) / d; ``` And now the test passes. --- numpy/random/src/distributions/distributions.c | 6 +++--- numpy/random/tests/test_generator_mt19937.py | 5 +++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 8102fee72323..79cacb2df4a4 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -848,12 +848,12 @@ double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, double dfden, double random_wald(bitgen_t *bitgen_state, double mean, double scale) { double U, X, Y; - double mu_2l; + double d; - mu_2l = mean / (2 * scale); Y = random_standard_normal(bitgen_state); Y = mean * Y * Y; - X = mean + mu_2l * (Y - sqrt(4 * scale * Y + Y * Y)); + d = 1 + sqrt(1 + 4 * scale / Y); + X = mean * (1 - 2 / d); U = next_double(bitgen_state); if (U <= mean / (mean + X)) { return X; diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 6594f6008c8e..51065f24868d 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1874,6 +1874,11 @@ def test_wald(self): [2.07093587449261, 0.73073890064369]]) assert_array_almost_equal(actual, desired, decimal=14) + def test_wald_nonnegative(self): + random = Generator(MT19937(self.seed)) + samples = random.wald(mean=1e9, scale=2.25, size=1000) + assert_(np.all(samples >= 0.0)) + def test_weibull(self): random = Generator(MT19937(self.seed)) actual = random.weibull(a=1.23, size=(3, 2)) From 5c77403a6c0a70254976245f2e488e13fbee75c7 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Sat, 11 Oct 2025 17:40:39 +0530 Subject: [PATCH 0573/1018] =?UTF-8?q?DOC:=20Update=20Meson=20build=20examp?= =?UTF-8?q?les=20in=20usage.rst=20[skip=20actions][skip=20azp=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/source/f2py/usage.rst | 75 ++++++++++++++++++++------------------- 1 file changed, 38 insertions(+), 37 deletions(-) diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index d6e042863b55..6a8ffc9b75fe 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -342,53 +342,54 @@ To build a Fortran extension using f2py and Meson, you can use Meson's `custom_target` to invoke f2py and generate the extension module. The following minimal example demonstrates how to do this: +This example shows how to build the previously described `add` extension +(from `add.pyf` and `add.f`) using Meson instead of distutils. + +Project layout: + + f2py_examples/ + meson.build + add.f + add.pyf + __init__.py (can be empty) + +Example `meson.build`: + .. code-block:: meson - # List your Fortran source files - fortran_sources = files('your_module.f90') + project('f2py_examples', 'fortran') - # Find the Python installation py = import('python').find_installation() - # Create a custom target to build the extension with f2py - f2py_wrapper = custom_target( - 'your_module_wrapper', - output: 'your_module.so', - input: fortran_sources, + # List your Fortran source files + sources = files('add.pyf', 'add.f') + + # Build the extension by invoking f2py via a custom target + add_mod = custom_target( + 'add_extension', + input: sources, + output: ['add' + py.extension_suffix()], command: [ py.full_path(), '-m', 'numpy.f2py', - '-c', '@INPUT@', '-m', 'your_module' - ] + '-c', 'add.pyf', 'add.f', + '-m', 'add' + ], + build_by_default: true ) - # Install the built extension to the Python site-packages directory - install_data(f2py_wrapper, install_dir: py.site_packages_dir()) + # Install into site-packages under the f2py_examples package + install_subdir('.', install_dir: py.site_packages_dir() / 'f2py_examples', + strip_directory: false, + exclude_files: ['meson.build']) + + # Also install the built extension (place it beside __init__.py) + install_data(add_mod, install_dir: py.site_packages_dir() / 'f2py_examples') + +.. note:: + This uses a simple custom_target. For larger projects you may wish to + stage build outputs in a subdirectory and control install steps more + explicitly. For more details and advanced usage, see the Meson build guide in the user documentation or refer to SciPy's Meson build files for real-world examples: https://github.com/scipy/scipy/tree/main/meson.build - -Building NumPy ufunc Extensions with Meson -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To build a NumPy ufunc extension (C API) using Meson, you can use the -following template: - -.. code-block:: meson - - # List your C source files - c_sources = files('your_ufunc_module.c') - - # Find the Python installation - py = import('python').find_installation() - - # Create an extension module - extension_module = py.extension_module( - 'your_ufunc_module', - c_sources, - dependencies: py.dependency(), - install: true - ) - -For more information on writing NumPy ufunc extensions, see the official -NumPy documentation: https://numpy.org/doc/stable/reference/c-api.ufunc.html From 06102c990de8437d9d5b4934470cb6aa98c58622 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Sun, 12 Oct 2025 10:33:47 +0530 Subject: [PATCH 0574/1018] DOC: Make Meson/f2py example concrete, remove redundancy, and fix formatting [skip actions] [skip azp] [skip cirrus] --- doc/source/f2py/usage.rst | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index 6a8ffc9b75fe..ae510c20b01a 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -339,18 +339,19 @@ modules, especially starting with Python 3.12 and NumPy 2.x. Meson provides a robust and maintainable way to build Fortran extensions with f2py. To build a Fortran extension using f2py and Meson, you can use Meson's -`custom_target` to invoke f2py and generate the extension module. The +``custom_target`` to invoke f2py and generate the extension module. The following minimal example demonstrates how to do this: -This example shows how to build the previously described `add` extension -(from `add.pyf` and `add.f`) using Meson instead of distutils. +This example shows how to build the ``add`` extension from the ``add.f`` and ``add.pyf`` +files described in the :ref:`f2py-examples` (note that you do not always need +a ``.pyf`` file: in many cases ``f2py`` can figure out the annotations by itself). Project layout: f2py_examples/ meson.build add.f - add.pyf + add.pyf (optional) __init__.py (can be empty) Example `meson.build`: @@ -385,11 +386,6 @@ Example `meson.build`: # Also install the built extension (place it beside __init__.py) install_data(add_mod, install_dir: py.site_packages_dir() / 'f2py_examples') -.. note:: - This uses a simple custom_target. For larger projects you may wish to - stage build outputs in a subdirectory and control install steps more - explicitly. - For more details and advanced usage, see the Meson build guide in the user documentation or refer to SciPy's Meson build files for real-world examples: https://github.com/scipy/scipy/tree/main/meson.build From e48f9b27611ec11b2ee6c5942b95783377421bf7 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Sun, 12 Oct 2025 20:03:12 +0530 Subject: [PATCH 0575/1018] DOC: Make Meson/f2py example concrete, remove redundancy, and fix formatting [skip actions] [skip azp] [skip cirrus] --- doc/source/f2py/usage.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index ae510c20b01a..efcf2bec5266 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -354,7 +354,7 @@ Project layout: add.pyf (optional) __init__.py (can be empty) -Example `meson.build`: +Example ``meson.build``: .. code-block:: meson @@ -379,12 +379,12 @@ Example `meson.build`: ) # Install into site-packages under the f2py_examples package - install_subdir('.', install_dir: py.site_packages_dir() / 'f2py_examples', + install_subdir('.', install_dir: join_paths(py.site_packages_dir(), 'f2py_examples'), strip_directory: false, exclude_files: ['meson.build']) # Also install the built extension (place it beside __init__.py) - install_data(add_mod, install_dir: py.site_packages_dir() / 'f2py_examples') + install_data(add_mod, install_dir: join_paths(py.site_packages_dir(), 'f2py_examples')) For more details and advanced usage, see the Meson build guide in the user documentation or refer to SciPy's Meson build files for real-world From edd072fa5debe09a9a23921b53599850925edf80 Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Wed, 8 Oct 2025 17:18:43 -0400 Subject: [PATCH 0576/1018] ENH: Add registration for sort and argsort in new convenience ufunc registration function Includes the following commits: DOC: Document `PyUFunc_AddLoopsFromSpecs` and new sorting / argsorting arraymethods ENH: Update field names in PyUFunc_LoopSlot struct ENH: Update C-API hash (15, NumPy 2.4) ENH: Add 'PyUFunc_LoopSlots' to ufunc_funcs_api with MinVersion "2.4" ENH: Update hash for version 21 (NumPy 2.4.0) ENH: Move PyUFunc_LoopSlot struct definition and update version hash for NumPy 2.4.0 REF: Move PyUFunc_LoopSlot struct definition to dtype_api.h DOC: Clarify ArrayMethod API registration process in array.rst DOC: Update sorting parameters struct in array.rst to use correct member (flags) ENH: Clarify version 21 description in cversions REF: Remove unnecessary error message for non-ufunc attributes in PyUFunc_AddLoopsFromSpecs REF: Refactor ufunc initialization and consolidate sort loops in sfloat_init_ufuncs REF: Simplify ufunc import by replacing PyObject_GetAttrString with npy_import REF: Rename ufunc_name to name in PyUFunc_LoopSlot REF: Initialize res variable to -1 in sfloat_init_ufuncs for clarity STYLE: Refactor error message formatting in PyUFunc_AddLoopsFromSpecs for clarity REF: Simplify redundant Py_DECREF call in PyUFunc_AddLoopsFromSpecs DOC: Improve documentation for PyUFunc_AddLoopsFromSpecs and sorting ENH: Optimize ufunc handling in PyUFunc_AddLoopsFromSpecs by using cached imports for sort and argsort DOC: Clarify pointer type Co-authored-by: Sebastian Berg REF: Optimize array definition of loop slots for sfloat Co-authored-by: Sebastian Berg REF: Improve error message for missing ufunc by removing module name BUG: Fix incorrect use of `npy_cache_import_runtime` REF: Reuse dtypes variable in sfloat ufunc init REF: Cache sort and argsort functions in npy_runtime_imports_struct BUG: Add missing header file for and add error handling to `numpy_cache_import_runtime` BUG: Improve error handling for numpy runtime imports in PyUFunc_AddLoopsFromSpecs BUG: Replace npy_cache_import_runtime with direct npy_import calls for sort and argsort DOC: Fix code annotations and references in sorting and argsorting docs BUG: Optimize numpy import calls for sort and argsort in PyUFunc_AddLoopsFromSpecs BUG: Remove unused references to _sort and _argsort in npy_runtime_imports_struct BUG: Replace npy_import calls with npy_cache_import_runtime for sort and argsort in PyUFunc_AddLoopsFromSpecs DOC: Add versionadded directive for PyUFunc_AddLoopsFromSpecs in array.rst BUG: Add missing Py_DECREF for ufunc in PyUFunc_AddLoopsFromSpecs BUG: Replace npy_cache_import calls with npy_import for sort and argsort, simplify decrefs --- doc/source/reference/c-api/array.rst | 58 ++++++ numpy/_core/code_generators/cversions.txt | 6 +- numpy/_core/code_generators/numpy_api.py | 4 +- numpy/_core/include/numpy/dtype_api.h | 7 + numpy/_core/src/umath/_scaled_float_dtype.c | 213 +++++++++----------- numpy/_core/src/umath/dispatching.cpp | 78 +++++++ numpy/_core/src/umath/dispatching.h | 3 + 7 files changed, 248 insertions(+), 121 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 95f5cc033b4d..605516097274 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1886,6 +1886,34 @@ with the rest of the ArrayMethod API. the main ufunc registration function. This adds a new implementation/loop to a ufunc. It replaces `PyUFunc_RegisterLoopForType`. +.. c:type:: PyUFunc_LoopSlot + + Structure used to add multiple loops to ufuncs from ArrayMethod specs. + This is used in `PyUFunc_AddLoopsFromSpecs`. + + .. c:struct:: PyUFunc_LoopSlot + + .. c:member:: const char *name + + The name of the ufunc to add the loop to. + + .. c:member:: PyArrayMethod_Spec *spec + + The ArrayMethod spec to use to create the loop. + +.. c:function:: int PyUFunc_AddLoopsFromSpecs( \ + PyUFunc_LoopSlot *slots) + + .. versionadded:: 2.4 + + Add multiple loops to ufuncs from ArrayMethod specs. This also + handles the registration of methods for the ufunc-like functions + ``sort`` and ``argsort``. See :ref:`array-methods-sorting` for details. + + The ``slots`` argument must be a NULL-terminated array of + `PyUFunc_LoopSlot` (see above), which give the name of the + ufunc and spec needed to create the loop. + .. c:function:: int PyUFunc_AddPromoter( \ PyObject *ufunc, PyObject *DType_tuple, PyObject *promoter) @@ -2036,6 +2064,36 @@ code: Py_INCREF(loop_descrs[2]); } +.. _array-methods-sorting: + +Sorting and Argsorting +~~~~~~~~~~~~~~~~~~~~~~~ + +Sorting and argsorting methods for dtypes can be registered using the +ArrayMethod API. This is done by adding an ArrayMethod spec with the name +``"sort"`` or ``"argsort"`` respectively. The spec must have ``nin=1`` +and ``nout=1`` for both sort and argsort. Sorting is inplace, hence we +enforce that ``data[0] == data[1]``. Argsorting returns a new array of +indices, so the output must be of ``NPY_INTP`` type. + +The ``context`` passed to the loop contains the ``parameters`` field which +for these operations is a ``PyArrayMethod_SortParameters *`` struct. This +struct contains a ``flags`` field which is a bitwise OR of ``NPY_SORTKIND`` +values indicating the kind of sort to perform (that is, whether it is a +stable and/or descending sort). If the strided loop depends on the flags, +a good way to deal with this is to define :c:macro:`NPY_METH_get_loop`, +and not set any of the other loop slots. + +.. c:struct:: PyArrayMethod_SortParameters + + .. c:member:: NPY_SORTKIND flags + + The flags passed to the sort operation. This is a bitwise OR of + ``NPY_SORTKIND`` values indicating the kind of sort to perform. + +These specs can be registered using :c:func:`PyUFunc_AddLoopsFromSpecs` +along with other ufunc loops. + API for calling array methods ----------------------------- diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index a04dd784c67f..3a480dfd4ab3 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -80,5 +80,7 @@ 0x00000013 = 2b8f1f4da822491ff030b2b37dff07e3 # Version 20 (NumPy 2.3.0) 0x00000014 = e56b74d32a934d085e7c3414cb9999b8, -# Version 21 (NumPy 2.4.0) Add 'same_value' casting, header additions -0x00000015 = e56b74d32a934d085e7c3414cb9999b8, +# Version 21 (NumPy 2.4.0) +# Add 'same_value' casting, header additions. +# General loop registration for ufuncs, sort, and argsort +0x00000015 = fbd24fc5b2ba4f7cd3606ec6128de7a5 diff --git a/numpy/_core/code_generators/numpy_api.py b/numpy/_core/code_generators/numpy_api.py index b366dc99dfb8..ac108aa20370 100644 --- a/numpy/_core/code_generators/numpy_api.py +++ b/numpy/_core/code_generators/numpy_api.py @@ -412,7 +412,7 @@ def get_annotations(): } ufunc_types_api = { - 'PyUFunc_Type': (0,) + 'PyUFunc_Type': (0,), } ufunc_funcs_api = { @@ -468,6 +468,8 @@ def get_annotations(): 'PyUFunc_AddPromoter': (44, MinVersion("2.0")), 'PyUFunc_AddWrappingLoop': (45, MinVersion("2.0")), 'PyUFunc_GiveFloatingpointErrors': (46, MinVersion("2.0")), + # End 2.0 API + 'PyUFunc_AddLoopsFromSpecs': (47, MinVersion("2.4")), } # List of all the dicts which define the C API diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index 2222ff342253..5ac964782ec0 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -145,6 +145,13 @@ typedef struct { } PyArrayMethod_Spec; +// This is used for the convenience function `PyUFunc_AddLoopsFromSpecs` +typedef struct { + const char *name; + PyArrayMethod_Spec *spec; +} PyUFunc_LoopSlot; + + /* * ArrayMethod slots * ----------------- diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 463ccbffae0b..574891360219 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -777,92 +777,6 @@ promote_to_sfloat(PyUFuncObject *NPY_UNUSED(ufunc), } -/* - * Add new ufunc loops (this is somewhat clumsy as of writing it, but should - * get less so with the introduction of public API). - */ -static int -sfloat_init_ufuncs(void) { - PyArray_DTypeMeta *dtypes[3] = { - &PyArray_SFloatDType, &PyArray_SFloatDType, &PyArray_SFloatDType}; - PyType_Slot slots[3] = {{0, NULL}}; - PyArrayMethod_Spec spec = { - .nin = 2, - .nout =1, - .dtypes = dtypes, - .slots = slots, - }; - spec.name = "sfloat_multiply"; - spec.casting = NPY_NO_CASTING; - - slots[0].slot = NPY_METH_resolve_descriptors; - slots[0].pfunc = &multiply_sfloats_resolve_descriptors; - slots[1].slot = NPY_METH_strided_loop; - slots[1].pfunc = &multiply_sfloats; - PyBoundArrayMethodObject *bmeth = PyArrayMethod_FromSpec_int(&spec, 0); - if (bmeth == NULL) { - return -1; - } - int res = sfloat_add_loop("multiply", - bmeth->dtypes, (PyObject *)bmeth->method); - Py_DECREF(bmeth); - if (res < 0) { - return -1; - } - - spec.name = "sfloat_add"; - spec.casting = NPY_SAME_KIND_CASTING; - - slots[0].slot = NPY_METH_resolve_descriptors; - slots[0].pfunc = &add_sfloats_resolve_descriptors; - slots[1].slot = NPY_METH_strided_loop; - slots[1].pfunc = &add_sfloats; - bmeth = PyArrayMethod_FromSpec_int(&spec, 0); - if (bmeth == NULL) { - return -1; - } - res = sfloat_add_loop("add", - bmeth->dtypes, (PyObject *)bmeth->method); - Py_DECREF(bmeth); - if (res < 0) { - return -1; - } - - /* N.B.: Wrapping isn't actually correct if scaling can be negative */ - if (sfloat_add_wrapping_loop("hypot", dtypes) < 0) { - return -1; - } - - /* - * Add a promoter for both directions of multiply with double. - */ - PyArray_DTypeMeta *double_DType = &PyArray_DoubleDType; - - PyArray_DTypeMeta *promoter_dtypes[3] = { - &PyArray_SFloatDType, double_DType, NULL}; - - PyObject *promoter = PyCapsule_New( - &promote_to_sfloat, "numpy._ufunc_promoter", NULL); - if (promoter == NULL) { - return -1; - } - res = sfloat_add_loop("multiply", promoter_dtypes, promoter); - if (res < 0) { - Py_DECREF(promoter); - return -1; - } - promoter_dtypes[0] = double_DType; - promoter_dtypes[1] = &PyArray_SFloatDType; - res = sfloat_add_loop("multiply", promoter_dtypes, promoter); - Py_DECREF(promoter); - if (res < 0) { - return -1; - } - - return 0; -} - - NPY_NO_EXPORT int sfloat_stable_sort_loop( PyArrayMethod_Context *context, @@ -1044,54 +958,121 @@ sfloat_argsort_resolve_descriptors( } +/* + * Add new ufunc loops (this is somewhat clumsy as of writing it, but should + * get less so with the introduction of public API). + */ static int -sfloat_init_sort(void) -{ - PyArray_DTypeMeta *dtypes[2] = {&PyArray_SFloatDType, &PyArray_SFloatDType}; - PyType_Slot slots[3] = { +sfloat_init_ufuncs(void) { + PyArray_DTypeMeta *all_sfloat_dtypes[3] = { + &PyArray_SFloatDType, &PyArray_SFloatDType, &PyArray_SFloatDType}; + PyType_Slot multiply_slots[3] = { + {NPY_METH_resolve_descriptors, &multiply_sfloats_resolve_descriptors}, + {NPY_METH_strided_loop, &multiply_sfloats}, + {0, NULL} + }; + PyArrayMethod_Spec multiply_spec = { + .nin = 2, + .nout = 1, + .dtypes = all_sfloat_dtypes, + .slots = multiply_slots, + .name = "sfloat_multiply", + .casting = NPY_NO_CASTING, + }; + + PyType_Slot add_slots[3] = { + {NPY_METH_resolve_descriptors, &add_sfloats_resolve_descriptors}, + {NPY_METH_strided_loop, &add_sfloats}, + {0, NULL} + }; + PyArrayMethod_Spec add_spec = { + .nin = 2, + .nout = 1, + .dtypes = all_sfloat_dtypes, + .slots = add_slots, + .name = "sfloat_add", + .casting = NPY_SAME_KIND_CASTING, + }; + + PyArray_DTypeMeta *sort_dtypes[2] = {&PyArray_SFloatDType, &PyArray_SFloatDType}; + PyType_Slot sort_slots[3] = { {NPY_METH_resolve_descriptors, &sfloat_sort_resolve_descriptors}, {NPY_METH_get_loop, &sfloat_sort_get_loop}, {0, NULL} }; - PyArrayMethod_Spec spec = { + PyArrayMethod_Spec sort_spec = { .nin = 1, .nout = 1, - .dtypes = dtypes, - .slots = slots, + .dtypes = sort_dtypes, + .slots = sort_slots, }; - spec.name = "sfloat_sort"; - spec.casting = NPY_NO_CASTING; - spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + sort_spec.name = "sfloat_sort"; + sort_spec.casting = NPY_NO_CASTING; + sort_spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + + PyArray_DTypeMeta *argsort_dtypes[2] = {&PyArray_SFloatDType, &PyArray_IntpDType}; + PyType_Slot argsort_slots[3] = { + {NPY_METH_resolve_descriptors, &sfloat_argsort_resolve_descriptors}, + {NPY_METH_get_loop, &sfloat_argsort_get_loop}, + {0, NULL} + }; + PyArrayMethod_Spec argsort_spec = { + .nin = 1, + .nout = 1, + .dtypes = argsort_dtypes, + .slots = argsort_slots, + }; + argsort_spec.name = "sfloat_argsort"; + argsort_spec.casting = NPY_NO_CASTING; + argsort_spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + + PyUFunc_LoopSlot loops[] = { + {"multiply", &multiply_spec}, + {"add", &add_spec}, + {"sort", &sort_spec}, + {"argsort", &argsort_spec}, + {NULL, NULL} + }; + if (PyUFunc_AddLoopsFromSpecs(loops) < 0) { + return -1; + } - PyBoundArrayMethodObject *sort_meth = PyArrayMethod_FromSpec_int(&spec, 0); - if (sort_meth == NULL) { + /* N.B.: Wrapping isn't actually correct if scaling can be negative */ + if (sfloat_add_wrapping_loop("hypot", all_sfloat_dtypes) < 0) { return -1; } - // TODO: once registration method is in place, use it instead of setting hidden slot - NPY_DT_SLOTS(&PyArray_SFloatDType)->sort_meth = sort_meth->method; - Py_INCREF(sort_meth->method); - Py_DECREF(sort_meth); - spec.name = "sfloat_argsort"; - dtypes[1] = &PyArray_IntpDType; + /* + * Add a promoter for both directions of multiply with double. + */ + int res = -1; + PyArray_DTypeMeta *double_DType = &PyArray_DoubleDType; - slots[0].slot = NPY_METH_resolve_descriptors; - slots[0].pfunc = &sfloat_argsort_resolve_descriptors; - slots[1].slot = NPY_METH_get_loop; - slots[1].pfunc = &sfloat_argsort_get_loop; + PyArray_DTypeMeta *promoter_dtypes[3] = { + &PyArray_SFloatDType, double_DType, NULL}; - // TODO: once registration method is in place, use it instead of setting hidden slot - PyBoundArrayMethodObject *argsort_meth = PyArrayMethod_FromSpec_int(&spec, 0); - if (argsort_meth == NULL) { + PyObject *promoter = PyCapsule_New( + &promote_to_sfloat, "numpy._ufunc_promoter", NULL); + if (promoter == NULL) { + return -1; + } + res = sfloat_add_loop("multiply", promoter_dtypes, promoter); + if (res < 0) { + Py_DECREF(promoter); + return -1; + } + promoter_dtypes[0] = double_DType; + promoter_dtypes[1] = &PyArray_SFloatDType; + res = sfloat_add_loop("multiply", promoter_dtypes, promoter); + Py_DECREF(promoter); + if (res < 0) { return -1; } - NPY_DT_SLOTS(&PyArray_SFloatDType)->argsort_meth = argsort_meth->method; - Py_INCREF(argsort_meth->method); - Py_DECREF(argsort_meth); return 0; } + /* * Python entry point, exported via `umathmodule.h` and `multiarraymodule.c`. * TODO: Should be moved when the necessary API is not internal anymore. @@ -1128,10 +1109,6 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } - if (sfloat_init_sort() < 0) { - return NULL; - } - npy_thread_unsafe_state.get_sfloat_dtype_initialized = NPY_TRUE; return (PyObject *)&PyArray_SFloatDType; } diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index ba98a9b5c5d1..c40ddd3b9f39 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -47,6 +47,7 @@ #include "numpy/ndarraytypes.h" #include "numpy/npy_3kcompat.h" +#include "npy_import.h" #include "common.h" #include "npy_pycompat.h" @@ -193,6 +194,83 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) } +/*UFUNC_API + * Add multiple loops to ufuncs from ArrayMethod specs. This also + * handles the registration of sort and argsort methods for dtypes + * from ArrayMethod specs. + */ +NPY_NO_EXPORT int +PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) +{ + static PyObject *sort = NULL; + static PyObject *argsort = NULL; + if (sort == NULL) { + sort = npy_import("numpy", "sort"); + if (sort == NULL) { + return -1; + } + } + if (argsort == NULL) { + argsort = npy_import("numpy", "argsort"); + if (argsort == NULL) { + return -1; + } + } + + PyUFunc_LoopSlot *slot; + for (slot = slots; slot->name != NULL; slot++) { + PyObject *ufunc = npy_import("numpy", slot->name); + if (ufunc == NULL) { + return -1; + } + + if (ufunc == sort) { + Py_DECREF(ufunc); + + PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; + PyBoundArrayMethodObject *sort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); + if (sort_meth == NULL) { + PyErr_Format(PyExc_TypeError, "Failed to create sort method for %R", dtype); + return -1; + } + + NPY_DT_SLOTS(dtype)->sort_meth = sort_meth->method; + Py_INCREF(sort_meth->method); + Py_DECREF(sort_meth); + } + else if (ufunc == argsort) { + Py_DECREF(ufunc); + + PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; + PyBoundArrayMethodObject *argsort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); + if (argsort_meth == NULL) { + PyErr_Format(PyExc_TypeError, "Failed to create argsort method for %R", dtype); + return -1; + } + + NPY_DT_SLOTS(dtype)->argsort_meth = argsort_meth->method; + Py_INCREF(argsort_meth->method); + Py_DECREF(argsort_meth); + } + else { + if (!PyObject_TypeCheck(ufunc, &PyUFunc_Type)) { + PyErr_Format(PyExc_TypeError, "%s was not a ufunc!", slot->name); + Py_DECREF(ufunc); + return -1; + } + + int ret = PyUFunc_AddLoopFromSpec_int(ufunc, slot->spec, 0); + Py_DECREF(ufunc); + if (ret < 0) { + return -1; + } + } + } + + return 0; +} + + /** * Resolves the implementation to use, this uses typical multiple dispatching * methods of finding the best matching implementation or resolver. diff --git a/numpy/_core/src/umath/dispatching.h b/numpy/_core/src/umath/dispatching.h index 95bcb32bf0ce..7ca8bd7a1598 100644 --- a/numpy/_core/src/umath/dispatching.h +++ b/numpy/_core/src/umath/dispatching.h @@ -16,6 +16,9 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate); NPY_NO_EXPORT int PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv); +NPY_NO_EXPORT int +PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots); + NPY_NO_EXPORT PyArrayMethodObject * promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], From 7bc78c2a3f98a357d8a5859066833f1fe8ec00ce Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 12 Oct 2025 10:56:40 +0200 Subject: [PATCH 0577/1018] Go back to cached import for sort/argsort --- numpy/_core/src/common/npy_import.h | 10 ++++++++++ numpy/_core/src/umath/dispatching.cpp | 22 ++++++++-------------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 87944989e95d..0ec70a0f5142 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -6,6 +6,10 @@ #include "numpy/npy_common.h" #include "npy_atomic.h" +#ifdef __cplusplus +extern "C" { +#endif + /* * Cached references to objects obtained via an import. All of these are * can be initialized at any time by npy_cache_import_runtime. @@ -45,6 +49,8 @@ typedef struct npy_runtime_imports_struct { PyObject *_var; PyObject *_view_is_safe; PyObject *_void_scalar_to_string; + PyObject *sort; + PyObject *argsort; } npy_runtime_imports_struct; NPY_VISIBILITY_HIDDEN extern npy_runtime_imports_struct npy_runtime_imports; @@ -112,4 +118,8 @@ npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { NPY_NO_EXPORT int init_import_mutex(void); +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_COMMON_NPY_IMPORT_H_ */ diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index c40ddd3b9f39..7d8935d4bf40 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -202,19 +202,13 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) NPY_NO_EXPORT int PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) { - static PyObject *sort = NULL; - static PyObject *argsort = NULL; - if (sort == NULL) { - sort = npy_import("numpy", "sort"); - if (sort == NULL) { - return -1; - } + if (npy_cache_import_runtime( + "numpy", "sort", &npy_runtime_imports.sort) < 0) { + return -1; } - if (argsort == NULL) { - argsort = npy_import("numpy", "argsort"); - if (argsort == NULL) { - return -1; - } + if (npy_cache_import_runtime( + "numpy", "argsort", &npy_runtime_imports.argsort) < 0) { + return -1; } PyUFunc_LoopSlot *slot; @@ -224,7 +218,7 @@ PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) return -1; } - if (ufunc == sort) { + if (ufunc == npy_runtime_imports.sort) { Py_DECREF(ufunc); PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; @@ -238,7 +232,7 @@ PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) Py_INCREF(sort_meth->method); Py_DECREF(sort_meth); } - else if (ufunc == argsort) { + else if (ufunc == npy_runtime_imports.argsort) { Py_DECREF(ufunc); PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; From b5ee0f0427c006ccbf139626308fd47232bb4000 Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Sun, 12 Oct 2025 08:20:13 -0400 Subject: [PATCH 0578/1018] DOC: Add release notes for #29900 --- doc/release/upcoming_changes/29900.c_api.rst | 5 +++++ doc/release/upcoming_changes/29900.new_feature.rst | 9 +++++++++ 2 files changed, 14 insertions(+) create mode 100644 doc/release/upcoming_changes/29900.c_api.rst create mode 100644 doc/release/upcoming_changes/29900.new_feature.rst diff --git a/doc/release/upcoming_changes/29900.c_api.rst b/doc/release/upcoming_changes/29900.c_api.rst new file mode 100644 index 000000000000..b29014ac95fc --- /dev/null +++ b/doc/release/upcoming_changes/29900.c_api.rst @@ -0,0 +1,5 @@ +A new `PyUFunc_AddLoopsFromSpecs` convenience function has been added to the C API. +----------------------------------------------------------------------------------- +This function allows adding multiple ufunc loops from their specs in one call using +a NULL-terminated array of `PyUFunc_LoopSlot` structs. It allows registering +sorting and argsorting loops using the new ArrayMethod API. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29900.new_feature.rst b/doc/release/upcoming_changes/29900.new_feature.rst new file mode 100644 index 000000000000..1799b6043e29 --- /dev/null +++ b/doc/release/upcoming_changes/29900.new_feature.rst @@ -0,0 +1,9 @@ +DType sorting and argsorting supports the ArrayMethod API +--------------------------------------------------------- +User-defined dtypes can now implement custom sorting and argsorting using +the ArrayMethod API. This mechanism can be used in place of the `PyArray_ArrFuncs` +slots which may be deprecated in the future. + +The sorting and argsorting methods are registered by passing the arraymethod +specs that implement the operations to the new `PyUFunc_AddLoopsFromSpecs` function. +See the ArrayMethod API documentation for details. \ No newline at end of file From ba50a255e881fcf4b2794e696c29a866d0ec6b72 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sun, 12 Oct 2025 10:56:26 -0400 Subject: [PATCH 0579/1018] MAINT: Just pass on errors instead of raising. --- numpy/_core/src/umath/dispatching.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index 7d8935d4bf40..cb40793c4e7f 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -224,7 +224,6 @@ PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; PyBoundArrayMethodObject *sort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); if (sort_meth == NULL) { - PyErr_Format(PyExc_TypeError, "Failed to create sort method for %R", dtype); return -1; } @@ -238,7 +237,6 @@ PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; PyBoundArrayMethodObject *argsort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); if (argsort_meth == NULL) { - PyErr_Format(PyExc_TypeError, "Failed to create argsort method for %R", dtype); return -1; } From a2519b560f14b5f86030fb63b89a97cef591f302 Mon Sep 17 00:00:00 2001 From: Swayam Date: Mon, 13 Oct 2025 17:09:58 +0530 Subject: [PATCH 0580/1018] DOC: Documentation related finfo refactors and new slot addition in #29836 (#29889) This is a follow-up PR of work happened in gh-29836 * The deprecated MachAr runtime discovery mechanism has been removed. * np.finfo fetches the constants provided by the compiler macros * new slot to fetch the dtype related constants --- doc/release/upcoming_changes/29836.c_api.rst | 15 +++ .../upcoming_changes/29836.improvement.rst | 26 ++++ doc/source/reference/c-api/array.rst | 115 ++++++++++++++++++ doc/source/reference/c-api/dtype.rst | 1 + numpy/_core/getlimits.py | 13 +- 5 files changed, 168 insertions(+), 2 deletions(-) create mode 100644 doc/release/upcoming_changes/29836.c_api.rst create mode 100644 doc/release/upcoming_changes/29836.improvement.rst diff --git a/doc/release/upcoming_changes/29836.c_api.rst b/doc/release/upcoming_changes/29836.c_api.rst new file mode 100644 index 000000000000..9ac5478c742a --- /dev/null +++ b/doc/release/upcoming_changes/29836.c_api.rst @@ -0,0 +1,15 @@ +New ``NPY_DT_get_constant`` slot for DType constant retrieval +------------------------------------------------------------- +A new slot ``NPY_DT_get_constant`` has been added to the DType API, allowing +dtype implementations to provide constant values such as machine limits and +special values. The slot function has the signature:: + + int get_constant(PyArray_Descr *descr, int constant_id, void *ptr) + +It returns 1 on success, 0 if the constant is not available, or -1 on error. +The function is always called with the GIL held and may write to unaligned memory. + +Integer constants (marked with the ``1 << 16`` bit) return ``npy_intp`` values, +while floating-point constants return values of the dtype's native type. + +Implementing this can be used by user DTypes to provide `numpy.finfo` values. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29836.improvement.rst b/doc/release/upcoming_changes/29836.improvement.rst new file mode 100644 index 000000000000..0d7df429d125 --- /dev/null +++ b/doc/release/upcoming_changes/29836.improvement.rst @@ -0,0 +1,26 @@ +``numpy.finfo`` Refactor +------------------------ +The ``numpy.finfo`` class has been completely refactored to obtain floating-point +constants directly from C compiler macros rather than deriving them at runtime. +This provides better accuracy, platform compatibility and corrected +several attribute calculations: + +* Constants like ``eps``, ``min``, ``max``, ``smallest_normal``, and + ``smallest_subnormal`` now come directly from standard C macros (``FLT_EPSILON``, + ``DBL_MIN``, etc.), ensuring platform-correct values. + +* The deprecated ``MachAr`` runtime discovery mechanism has been removed. + +* Derived attributes have been corrected to match standard definitions: + ``machep`` and ``negep`` now use ``int(log2(eps))``; ``nexp`` accounts for + all exponent patterns; ``nmant`` excludes the implicit bit; and ``minexp`` + follows the C standard definition. + +* longdouble constants, Specifically ``smallest_normal`` now follows the + C standard definitions as per respecitive platform. + +* Special handling added for PowerPC's IBM double-double format. + +* New test suite added in ``test_finfo.py`` to validate all + ``finfo`` properties against expected machine arithmetic values for + float16, float32, and float64 types. \ No newline at end of file diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 605516097274..00a0a399d601 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -3594,6 +3594,121 @@ member of ``PyArrayDTypeMeta_Spec`` struct. force newly created arrays to have a newly created descriptor instance, no matter what input descriptor is provided by a user. +.. c:macro:: NPY_DT_get_constant + +.. c:type:: int (PyArrayDTypeMeta_GetConstant)( \ + PyArray_Descr *descr, int constant_id, void *out) + + If defined, allows the DType to expose constant values such as machine + limits, special values (infinity, NaN), and floating-point characteristics. + The *descr* is the descriptor instance, *constant_id* is one of the + ``NPY_CONSTANT_*`` macros, and *out* is a pointer to uninitialized memory + where the constant value should be written. The memory pointed to by *out* + may be unaligned and is uninitialized. + Returns 1 on success, 0 if the constant is not available, + or -1 with an error set. + + **Constant IDs**: + + The following constant IDs are defined for retrieving dtype-specific values: + + **Basic constants** (available for all numeric types): + + .. c:macro:: NPY_CONSTANT_zero + + The zero value for the dtype. + + .. c:macro:: NPY_CONSTANT_one + + The one value for the dtype. + + .. c:macro:: NPY_CONSTANT_minimum_finite + + The minimum finite value representable by the dtype. For floating-point types, + this is the most negative finite value (e.g., ``-FLT_MAX``). + + .. c:macro:: NPY_CONSTANT_maximum_finite + + The maximum finite value representable by the dtype. + + **Floating-point special values**: + + .. c:macro:: NPY_CONSTANT_inf + + Positive infinity (only for floating-point types). + + .. c:macro:: NPY_CONSTANT_ninf + + Negative infinity (only for floating-point types). + + .. c:macro:: NPY_CONSTANT_nan + + Not-a-Number (only for floating-point types). + + **Floating-point characteristics** (values of the dtype's native type): + + .. c:macro:: NPY_CONSTANT_finfo_radix + + The radix (base) of the floating-point representation. This is 2 for all + floating-point types. + + .. c:macro:: NPY_CONSTANT_finfo_eps + + Machine epsilon: the difference between 1.0 and the next representable value + greater than 1.0. Corresponds to C macros like ``FLT_EPSILON``, ``DBL_EPSILON``. + + .. note:: + For long double in IBM double-double format (PowerPC), this is defined as + ``0x1p-105L`` (2^-105) based on the ~106 bits of mantissa precision. + + .. c:macro:: NPY_CONSTANT_finfo_epsneg + + The difference between 1.0 and the next representable value less than 1.0. + Typically ``eps / radix`` for binary floating-point types. + + .. c:macro:: NPY_CONSTANT_finfo_smallest_normal + + The smallest positive normalized floating-point number. Corresponds to C + macros like ``FLT_MIN``, ``DBL_MIN``. This is the smallest value with a + leading 1 bit in the mantissa. + + .. c:macro:: NPY_CONSTANT_finfo_smallest_subnormal + + The smallest positive subnormal (denormalized) floating-point number. + Corresponds to C macros like ``FLT_TRUE_MIN``, ``DBL_TRUE_MIN``. This is + the smallest representable positive value, with leading 0 bits in the mantissa. + + **Floating-point characteristics** (integer values, type ``npy_intp``): + + These constants return integer metadata about the floating-point representation. + They are marked with the ``1 << 16`` bit to indicate they return ``npy_intp`` + values rather than the dtype's native type. + + .. c:macro:: NPY_CONSTANT_finfo_nmant + + Number of mantissa bits (excluding the implicit leading bit). For example, + IEEE 754 binary64 (double) has 52 explicit mantissa bits, so this returns 52. + Corresponds to ``MANT_DIG - 1`` from C standard macros. + + .. c:macro:: NPY_CONSTANT_finfo_min_exp + + Minimum exponent value. This is the minimum negative integer such that the + radix raised to the power of one less than that integer is a normalized + floating-point number. Corresponds to ``MIN_EXP - 1`` from C standard macros + (e.g., ``FLT_MIN_EXP - 1``). + + .. c:macro:: NPY_CONSTANT_finfo_max_exp + + Maximum exponent value. This is the maximum positive integer such that the + radix raised to the power of one less than that integer is a representable + finite floating-point number. Corresponds to ``MAX_EXP`` from C standard + macros (e.g., ``FLT_MAX_EXP``). + + .. c:macro:: NPY_CONSTANT_finfo_decimal_digits + + The number of decimal digits of precision. Corresponds to ``DIG`` from C + standard macros (e.g., ``FLT_DIG``, ``DBL_DIG``). + PyArray_ArrFuncs slots ^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst index 43869d5b4c55..f6b2289ba18a 100644 --- a/doc/source/reference/c-api/dtype.rst +++ b/doc/source/reference/c-api/dtype.rst @@ -502,3 +502,4 @@ format specifier in printf and related commands. .. c:macro:: NPY_UINTP_FMT .. c:macro:: NPY_LONGDOUBLE_FMT + diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index b8b6f69cd5b7..3c03d81165fb 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -89,17 +89,20 @@ class finfo: The largest representable number. maxexp : int The smallest positive power of the base (2) that causes overflow. + Corresponds to the C standard MAX_EXP. min : floating point number of the appropriate type The smallest representable number, typically ``-max``. minexp : int The most negative power of the base (2) consistent with there - being no leading 0's in the mantissa. + being no leading 0's in the mantissa. Corresponds to the C + standard MIN_EXP - 1. negep : int The exponent that yields `epsneg`. nexp : int The number of bits in the exponent including its sign and bias. nmant : int - The number of bits in the mantissa. + The number of explicit bits in the mantissa (excluding the implicit + leading bit for normalized numbers). precision : int The approximate number of decimal digits to which this kind of float is precise. @@ -140,6 +143,12 @@ class finfo: fill the gap between 0 and ``smallest_normal``. However, subnormal numbers may have significantly reduced precision [2]_. + For ``longdouble``, the representation varies across platforms. On most + platforms it is IEEE 754 binary128 (quad precision) or binary64-extended + (80-bit extended precision). On PowerPC systems, it may use the IBM + double-double format (a pair of float64 values), which has special + characteristics for precision and range. + This function can also be used for complex data types as well. If used, the output will be the same as the corresponding real float type (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)). From a410b2c9aa2f930e8e852c9b1f1d4a9eed37968e Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 13 Oct 2025 17:09:29 +0200 Subject: [PATCH 0581/1018] CI: Run mypy on Python 3.14 --- .github/workflows/mypy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 8045d6bb3969..04db23a22ae3 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -46,7 +46,7 @@ jobs: fail-fast: false matrix: os_python: - - [macos-latest, '3.13'] + - [macos-latest, '3.14'] - [ubuntu-latest, '3.12'] - [windows-latest, '3.11'] steps: From 2be60cce10aff73552a8c4b51a9b452f4d1d007f Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 13 Oct 2025 17:11:41 +0200 Subject: [PATCH 0582/1018] CI: Additional mypy workflow ignore paths --- .github/workflows/mypy.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 04db23a22ae3..22130793f106 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -18,11 +18,14 @@ on: - main - maintenance/** paths-ignore: - - 'benchmarks/' - '.circlecl/' + - '.devcontainer/' + - 'benchmarks/' + - 'branding/' - 'docs/' - 'meson_cpu/' - 'tools/' + - 'vendored-meson/' workflow_dispatch: defaults: From 1718e1c2a20d09412e816857dae8316f04d4f108 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 17:27:08 +0000 Subject: [PATCH 0583/1018] MAINT: Bump astral-sh/setup-uv from 7.0.0 to 7.1.0 Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.0.0 to 7.1.0. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/eb1897b8dc4b5d5bfe39a428a8f2304605e0983c...3259c6206f993105e3a61b142c2d97bf4b9ef83d) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.1.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 22130793f106..b64cd8becd8c 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -58,7 +58,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@eb1897b8dc4b5d5bfe39a428a8f2304605e0983c # v7.0.0 + - uses: astral-sh/setup-uv@3259c6206f993105e3a61b142c2d97bf4b9ef83d # v7.1.0 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true From a52661748694ef018845aefac689acbe71bdee75 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 17:27:14 +0000 Subject: [PATCH 0584/1018] MAINT: Bump pypa/cibuildwheel from 3.2.0 to 3.2.1 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.2.0 to 3.2.1. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/7c619efba910c04005a835b110b057fc28fd6e93...9c00cb4f6b517705a3794b22395aedc36257242c) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.2.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 5af18aa14107..e152870efd7d 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -30,7 +30,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@7c619efba910c04005a835b110b057fc28fd6e93 # v3.2.0 + - uses: pypa/cibuildwheel@9c00cb4f6b517705a3794b22395aedc36257242c # v3.2.1 env: CIBW_PLATFORM: pyodide CIBW_BUILD: cp312-* diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 5294c34cbb63..e53e4bbefc57 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -94,7 +94,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@7c619efba910c04005a835b110b057fc28fd6e93 # v3.2.0 + uses: pypa/cibuildwheel@9c00cb4f6b517705a3794b22395aedc36257242c # v3.2.1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 9ebaa26789905480e1220705fb3c3e4e880bc860 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 17:27:17 +0000 Subject: [PATCH 0585/1018] MAINT: Bump int128/hide-comment-action from 1.44.0 to 1.46.0 Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.44.0 to 1.46.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/9803637eab610cca14ac6f64c42c0d7ffe9327e0...bddc4e774ea6f0b45b9621c3a689db72b9a3cec5) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.46.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 266d7a8f2074..95e487cd60b4 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@9803637eab610cca14ac6f64c42c0d7ffe9327e0 # v1.44.0 + uses: int128/hide-comment-action@bddc4e774ea6f0b45b9621c3a689db72b9a3cec5 # v1.46.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From 27bcb23d80ede3d6d4f4cc4b9d8a3d5dec3e600c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 17:27:21 +0000 Subject: [PATCH 0586/1018] MAINT: Bump actions/dependency-review-action from 4.8.0 to 4.8.1 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.8.0 to 4.8.1. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/56339e523c0409420f6c2c9a2f4292bbb3c07dd3...40c09b7dc99638e5ddb0bfd91c1673effc064d8a) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.8.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index a592a969eb94..797918b2ceff 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@56339e523c0409420f6c2c9a2f4292bbb3c07dd3 # v4.8.0 + uses: actions/dependency-review-action@40c09b7dc99638e5ddb0bfd91c1673effc064d8a # v4.8.1 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 From b78f89101f0878dd0af18ccab17e5a22b0d2fb4e Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Mon, 13 Oct 2025 14:42:44 -0400 Subject: [PATCH 0587/1018] ENH: In spec registration, allow looking up ufuncs in any module. (#29931) This allows the addition of loops from specs also from more complicated names such as numpy:strings.str_len. --- doc/source/reference/c-api/array.rst | 5 ++- numpy/_core/meson.build | 1 + numpy/_core/src/common/npy_import.c | 41 +++++++++++++++++++ numpy/_core/src/common/npy_import.h | 11 ++++- .../src/multiarray/_multiarray_tests.c.src | 13 ++++++ numpy/_core/src/umath/_scaled_float_dtype.c | 9 ++-- numpy/_core/src/umath/dispatching.cpp | 2 +- numpy/_core/tests/test_multiarray.py | 32 +++++++++++++++ 8 files changed, 107 insertions(+), 7 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 00a0a399d601..7a1bace2c63e 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1895,7 +1895,10 @@ with the rest of the ArrayMethod API. .. c:member:: const char *name - The name of the ufunc to add the loop to. + The name of the ufunc to add the loop to, in the form like that of + entry points, ``(module ':')? (object '.')* name``, with ``numpy`` + the default module. Examples: ``sin``, ``strings.str_len``, + ``numpy.strings:str_len``. .. c:member:: PyArrayMethod_Spec *spec diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 35a792c29909..055d93e942f6 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -759,6 +759,7 @@ py.extension_module('_multiarray_tests', 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', 'src/common/npy_hashtable.cpp', + 'src/common/npy_import.c', src_file.process('src/common/templ_common.h.src') ], c_args: c_args_common, diff --git a/numpy/_core/src/common/npy_import.c b/numpy/_core/src/common/npy_import.c index cff071e9b522..a0308ff3e4c7 100644 --- a/numpy/_core/src/common/npy_import.c +++ b/numpy/_core/src/common/npy_import.c @@ -19,3 +19,44 @@ init_import_mutex(void) { #endif return 0; } + + +/*! \brief Import a Python object from an entry point string. + + * The name should be of the form "(module ':')? (object '.')* attr". + * If no module is present, it is assumed to be "numpy". + * On error, returns NULL. + */ +NPY_NO_EXPORT PyObject* +npy_import_entry_point(const char *entry_point) { + PyObject *result; + const char *item; + + const char *colon = strchr(entry_point, ':'); + if (colon) { // there is a module. + result = PyUnicode_FromStringAndSize(entry_point, colon - entry_point); + if (result != NULL) { + Py_SETREF(result, PyImport_Import(result)); + } + item = colon + 1; + } + else { + result = PyImport_ImportModule("numpy"); + item = entry_point; + } + + const char *dot = item - 1; + while (result != NULL && dot != NULL) { + item = dot + 1; + dot = strchr(item, '.'); + PyObject *string = PyUnicode_FromStringAndSize( + item, dot ? dot - item : strlen(item)); + if (string == NULL) { + Py_DECREF(result); + return NULL; + } + Py_SETREF(result, PyObject_GetAttr(result, string)); + Py_DECREF(string); + } + return result; +} diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 0ec70a0f5142..d7fb8d20857d 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -112,12 +112,21 @@ npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { #endif Py_DECREF(value); } - return 0; + return 0; } NPY_NO_EXPORT int init_import_mutex(void); +/*! \brief Import a Python object from an entry point string. + + * The name should be of the form "(module ':')? (object '.')* attr". + * If no module is present, it is assumed to be "numpy". + * On error, returns NULL. + */ +NPY_NO_EXPORT PyObject* +npy_import_entry_point(const char *entry_point); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 8b0c4b3f85d1..b79908e1d5e4 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -7,6 +7,7 @@ #include "numpy/arrayscalars.h" #include "numpy/npy_math.h" #include "numpy/halffloat.h" +#include "npy_import.h" #include "common.h" #include "npy_argparse.h" #include "mem_overlap.h" @@ -2238,6 +2239,15 @@ run_scalar_intp_from_sequence(PyObject *NPY_UNUSED(self), PyObject *obj) return PyArray_IntTupleFromIntp(1, vals); } +static PyObject * +_npy_import_entry_point(PyObject *NPY_UNUSED(self), PyObject *obj) { + PyObject *res = PyUnicode_AsASCIIString(obj); + if (res != NULL) { + Py_SETREF(res, npy_import_entry_point(PyBytes_AS_STRING(res))); + } + return res; +} + static PyMethodDef Multiarray_TestsMethods[] = { {"argparse_example_function", (PyCFunction)argparse_example_function, @@ -2422,6 +2432,9 @@ static PyMethodDef Multiarray_TestsMethods[] = { {"run_intp_converter", run_intp_converter, METH_VARARGS, NULL}, + {"npy_import_entry_point", + _npy_import_entry_point, + METH_O, NULL}, {NULL, NULL, 0, NULL} /* Sentinel */ }; diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 574891360219..da842cd8c55d 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -947,7 +947,7 @@ sfloat_argsort_resolve_descriptors( { assert(given_descrs[1] == NULL || given_descrs[1]->type_num == NPY_INTP); assert(PyArray_IsNativeByteOrder(given_descrs[0]->byteorder)); - + loop_descrs[0] = given_descrs[0]; Py_INCREF(loop_descrs[0]); loop_descrs[1] = PyArray_DescrFromType(NPY_INTP); @@ -1026,11 +1026,12 @@ sfloat_init_ufuncs(void) { argsort_spec.casting = NPY_NO_CASTING; argsort_spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + /* here we chose weirdish names to test the lookup mechanism */ PyUFunc_LoopSlot loops[] = { {"multiply", &multiply_spec}, - {"add", &add_spec}, - {"sort", &sort_spec}, - {"argsort", &argsort_spec}, + {"_core._multiarray_umath.add", &add_spec}, + {"numpy:sort", &sort_spec}, + {"numpy._core.fromnumeric:argsort", &argsort_spec}, {NULL, NULL} }; if (PyUFunc_AddLoopsFromSpecs(loops) < 0) { diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index cb40793c4e7f..3ce6624bbf4a 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -213,7 +213,7 @@ PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) PyUFunc_LoopSlot *slot; for (slot = slots; slot->name != NULL; slot++) { - PyObject *ufunc = npy_import("numpy", slot->name); + PyObject *ufunc = npy_import_entry_point(slot->name); if (ufunc == NULL) { return -1; } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index bf339c07f068..a2ad97c79de8 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -3,6 +3,7 @@ import ctypes import functools import gc +import importlib import io import itertools import mmap @@ -4475,6 +4476,37 @@ def test_intp_sequence_converters_errors(self, converter): # These converters currently convert overflows to a ValueError converter(2**64) + @pytest.mark.parametrize( + "entry_point", + [ + module + item + for item in ("sin", "strings.str_len", "fft._pocketfft_umath.ifft") + for module in ("", "numpy:") + ] + [ + "numpy.strings:str_len", + "functools:reduce", + "functools:reduce.__doc__" + ] + ) + def test_import_entry_point(self, entry_point): + modname, _, items = entry_point.rpartition(":") + if modname: + module = obj = importlib.import_module(modname) + else: + module = np + exp = functools.reduce(getattr, items.split("."), module) + got = _multiarray_tests.npy_import_entry_point(entry_point) + assert got == exp + + @pytest.mark.parametrize( + "entry_point", + ["sin.", "numpy:", "numpy:sin:__call__", "numpy.sin:__call__."] + ) + def test_import_entry_point_errors(self, entry_point): + # Don't really care about precise error. + with pytest.raises((ImportError, AttributeError)): + _multiarray_tests.npy_import_entry_point(entry_point) + class TestSubscripting: def test_test_zero_rank(self): From 01ba640336521ed28093b62b7d1e830a3a71ad8e Mon Sep 17 00:00:00 2001 From: Alverok <143798318+Alverok@users.noreply.github.com> Date: Tue, 14 Oct 2025 03:02:19 +0530 Subject: [PATCH 0588/1018] DOC: add note on meson buildtype for debug builds [skip azp][skip actions][skip cirrus] (#29883) --- .../dev/development_advanced_debugging.rst | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 07c80314da1b..594f88dcba17 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -244,6 +244,25 @@ The ``spin`` `development workflow tool `_. has built-in support for working with both ``gdb`` and ``lldb`` via the ``spin gdb`` and ``spin lldb`` commands. +.. note:: + + Building with ``-Dbuildtype=debug`` has a couple of important effects to + be aware of: + + * **Assertions are enabled**: This build type does not define the ``NDEBUG`` + macro, which means that any C-level assertions in the code will be + active. This is very useful for debugging, as it can help pinpoint + where an unexpected condition occurs. + + * **Compiler flags may need overriding**: Some compiler toolchains, + particularly those from ``conda-forge``, may set optimization flags + like ``-O2`` by default. These can override the ``debug`` build type. + To ensure a true debug build in such environments, you may need to + manually unset or override this flag. + + For more details on both points, see the `meson-python guide on + debug builds `_. + For both debuggers, it's advisable to build NumPy in either the ``debug`` or ``debugoptimized`` meson build profile. To use ``debug`` you can pass the option via ``spin build``: From d470611b6b112da659fa843af8a68d05525dddd1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 14 Oct 2025 10:35:01 +0200 Subject: [PATCH 0589/1018] MAINT: Simplify string arena growth strategy (#29885) Not sure if there is any downsides to this, but I think this is much simpler and pretty much identical. Plus, I don't quite like that the 1.25 factor could be large, although I admit it seems very unlikely (maybe impossible) to actually trigger a problem there. Signed-off-by: Sebastian Berg --- .../multiarray/stringdtype/static_string.c | 28 ++++++------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index f5c2025e183a..c437fab2d336 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -155,8 +155,6 @@ vstring_buffer(npy_string_arena *arena, _npy_static_string_u *string) return (char *)((size_t)arena->buffer + string->vstring.offset); } -#define ARENA_EXPAND_FACTOR 1.25 - static char * arena_malloc(npy_string_arena *arena, npy_string_realloc_func r, size_t size) { @@ -168,25 +166,17 @@ arena_malloc(npy_string_arena *arena, npy_string_realloc_func r, size_t size) else { string_storage_size = size + sizeof(size_t); } - if ((arena->size - arena->cursor) <= string_storage_size) { - // realloc the buffer so there is enough room - // first guess is to double the size of the buffer - size_t newsize; - if (arena->size == 0) { - newsize = string_storage_size; - } - else if (((ARENA_EXPAND_FACTOR * arena->size) - arena->cursor) > - string_storage_size) { - newsize = (size_t)(ARENA_EXPAND_FACTOR * arena->size); + if ((arena->size - arena->cursor) < string_storage_size) { + size_t minsize = arena->cursor + string_storage_size; + if (minsize < arena->cursor) { + return NULL; // overflow means out of memory } - else { - newsize = arena->size + string_storage_size; - } - // If there enough room for both the payload and its header - if ((arena->cursor + string_storage_size) > newsize) { - // need extra room beyond the expansion factor, leave some padding - newsize = (size_t)(ARENA_EXPAND_FACTOR * (arena->cursor + string_storage_size)); + // Allocate 25% more than needed for this string. + size_t newsize = minsize + minsize / 4; + if (newsize < minsize) { + return NULL; // overflow means out of memory } + // passing a NULL buffer to realloc is the same as malloc char *newbuf = r(arena->buffer, newsize); if (newbuf == NULL) { From 3ef745ffa270bde3a0a7d68073bf802aceeff1be Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 14:41:57 +0200 Subject: [PATCH 0590/1018] STY: rename ``@classmethod`` arg to ``cls`` --- numpy/_core/tests/test_stringdtype.py | 32 +++++++++++++-------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 7a253c981c4d..492894087aa9 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1672,17 +1672,17 @@ class TestImplementation: """ @classmethod - def setup_class(self): - self.MISSING = 0x80 - self.INITIALIZED = 0x40 - self.OUTSIDE_ARENA = 0x20 - self.LONG = 0x10 - self.dtype = StringDType(na_object=np.nan) - self.sizeofstr = self.dtype.itemsize - sp = self.dtype.itemsize // 2 # pointer size = sizeof(size_t) + def setup_class(cls): + cls.MISSING = 0x80 + cls.INITIALIZED = 0x40 + cls.OUTSIDE_ARENA = 0x20 + cls.LONG = 0x10 + cls.dtype = StringDType(na_object=np.nan) + cls.sizeofstr = cls.dtype.itemsize + sp = cls.dtype.itemsize // 2 # pointer size = sizeof(size_t) # Below, size is not strictly correct, since it really uses # 7 (or 3) bytes, but good enough for the tests here. - self.view_dtype = np.dtype([ + cls.view_dtype = np.dtype([ ('offset', f'u{sp}'), ('size', f'u{sp // 2}'), ('xsiz', f'V{sp // 2 - 1}'), @@ -1693,13 +1693,13 @@ def setup_class(self): ('size', f'u{sp // 2}'), ('offset', f'u{sp}'), ]) - self.s_empty = "" - self.s_short = "01234" - self.s_medium = "abcdefghijklmnopqrstuvwxyz" - self.s_long = "-=+" * 100 - self.a = np.array( - [self.s_empty, self.s_short, self.s_medium, self.s_long], - self.dtype) + cls.s_empty = "" + cls.s_short = "01234" + cls.s_medium = "abcdefghijklmnopqrstuvwxyz" + cls.s_long = "-=+" * 100 + cls.a = np.array( + [cls.s_empty, cls.s_short, cls.s_medium, cls.s_long], + cls.dtype) def get_view(self, a): # Cannot view a StringDType as anything else directly, since From c0d939e7c4977a3c3bac2eb0b86d297f38b3d3b2 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Tue, 14 Oct 2025 13:18:32 -0400 Subject: [PATCH 0591/1018] TST: Make temporary file usage thread safe (#29858) * TST: Make temporary file usage thread safe * TST: Refactor TestIO to still use fixture * STY: Remove extra changes? * TST: Fix pytest-run-parallel import issue * TST: Refactor most of fixture into method * TST: Remove unneeded changes to conftest * STY: Remove whitespace changes * TST: Rename and move tmp_filename function --- numpy/_core/tests/test_multiarray.py | 137 +++++++++------ numpy/lib/tests/test__datasource.py | 246 ++++++++++++--------------- 2 files changed, 198 insertions(+), 185 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index a2ad97c79de8..12ea10e5b568 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -5613,6 +5613,16 @@ def test_invalid_axis(self): # gh-7528 x = np.linspace(0., 1., 42 * 3).reshape(42, 3) assert_raises(AxisError, np.lexsort, x, axis=2) + +def normalize_filename(tmp_path, param): + # Handles two cases, where filename should + # be a string, or a path object. + path = tmp_path / "file" + if param == "string": + return str(path) + return path + + class TestIO: """Test tofile, fromfile, tobytes, and fromstring""" @@ -5624,14 +5634,11 @@ def _create_data(self): return x @pytest.fixture(params=["string", "path_obj"]) - def tmp_filename(self, tmp_path, request): - # This fixture covers two cases: - # one where the filename is a string and - # another where it is a pathlib object - filename = tmp_path / "file" - if request.param == "string": - filename = str(filename) - yield filename + def param_filename(self, request): + # This fixtures returns string or path_obj + # so that every test doesn't need to have the + # paramterize marker. + return request.param def test_nofile(self): # this should probably be supported as a file @@ -5662,19 +5669,22 @@ def test_fromstring_count0(self): d = np.fromstring("1,2", sep=",", dtype=np.int64, count=0) assert d.shape == (0,) - def test_empty_files_text(self, tmp_filename): + def test_empty_files_text(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) with open(tmp_filename, 'w') as f: pass y = np.fromfile(tmp_filename) assert_(y.size == 0, "Array not empty") - def test_empty_files_binary(self, tmp_filename): + def test_empty_files_binary(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) with open(tmp_filename, 'wb') as f: pass y = np.fromfile(tmp_filename, sep=" ") assert_(y.size == 0, "Array not empty") - def test_roundtrip_file(self, tmp_filename): + def test_roundtrip_file(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() with open(tmp_filename, 'wb') as f: x.tofile(f) @@ -5683,13 +5693,15 @@ def test_roundtrip_file(self, tmp_filename): y = np.fromfile(f, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_roundtrip(self, tmp_filename): + def test_roundtrip(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() x.tofile(tmp_filename) y = np.fromfile(tmp_filename, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_roundtrip_dump_pathlib(self, tmp_filename): + def test_roundtrip_dump_pathlib(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() p = pathlib.Path(tmp_filename) x.dump(p) @@ -5722,8 +5734,9 @@ def test_roundtrip_repr(self): y = np.fromstring(s, sep="@") assert_array_equal(x, y) - def test_unseekable_fromfile(self, tmp_filename): + def test_unseekable_fromfile(self, tmp_path, param_filename): # gh-6246 + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() x.tofile(tmp_filename) @@ -5735,16 +5748,18 @@ def fail(*args, **kwargs): f.tell = fail assert_raises(OSError, np.fromfile, f, dtype=x.dtype) - def test_io_open_unbuffered_fromfile(self, tmp_filename): + def test_io_open_unbuffered_fromfile(self, tmp_path, param_filename): # gh-6632 + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() x.tofile(tmp_filename) with open(tmp_filename, 'rb', buffering=0) as f: y = np.fromfile(f, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_largish_file(self, tmp_filename): + def test_largish_file(self, tmp_path, param_filename): # check the fallocate path on files > 16MB + tmp_filename = normalize_filename(tmp_path, param_filename) d = np.zeros(4 * 1024 ** 2) d.tofile(tmp_filename) assert_equal(os.path.getsize(tmp_filename), d.nbytes) @@ -5763,19 +5778,21 @@ def test_largish_file(self, tmp_filename): d.tofile(f) assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) - def test_io_open_buffered_fromfile(self, tmp_filename): + def test_io_open_buffered_fromfile(self, tmp_path, param_filename): # gh-6632 + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() x.tofile(tmp_filename) with open(tmp_filename, 'rb', buffering=-1) as f: y = np.fromfile(f, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_file_position_after_fromfile(self, tmp_filename): + def test_file_position_after_fromfile(self, tmp_path, param_filename): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE // 8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE * 8] + tmp_filename = normalize_filename(tmp_path, param_filename) for size in sizes: with open(tmp_filename, 'wb') as f: @@ -5791,11 +5808,12 @@ def test_file_position_after_fromfile(self, tmp_filename): pos = f.tell() assert_equal(pos, 10, err_msg=err_msg) - def test_file_position_after_tofile(self, tmp_filename): + def test_file_position_after_tofile(self, tmp_path, param_filename): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE // 8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE * 8] + tmp_filename = normalize_filename(tmp_path, param_filename) for size in sizes: err_msg = "%d" % (size,) @@ -5816,8 +5834,9 @@ def test_file_position_after_tofile(self, tmp_filename): pos = f.tell() assert_equal(pos, 10, err_msg=err_msg) - def test_load_object_array_fromfile(self, tmp_filename): + def test_load_object_array_fromfile(self, tmp_path, param_filename): # gh-12300 + tmp_filename = normalize_filename(tmp_path, param_filename) with open(tmp_filename, 'w') as f: # Ensure we have a file with consistent contents pass @@ -5829,7 +5848,8 @@ def test_load_object_array_fromfile(self, tmp_filename): assert_raises_regex(ValueError, "Cannot read into object array", np.fromfile, tmp_filename, dtype=object) - def test_fromfile_offset(self, tmp_filename): + def test_fromfile_offset(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() with open(tmp_filename, 'wb') as f: x.tofile(f) @@ -5865,23 +5885,21 @@ def test_fromfile_offset(self, tmp_filename): sep=",", offset=1) @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") - def test_fromfile_bad_dup(self, tmp_filename): + def test_fromfile_bad_dup(self, tmp_path, param_filename, monkeypatch): def dup_str(fd): return 'abc' def dup_bigint(fd): return 2**68 + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() - old_dup = os.dup - try: - with open(tmp_filename, 'wb') as f: - x.tofile(f) - for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): - os.dup = dup - assert_raises(exc, np.fromfile, f) - finally: - os.dup = old_dup + + with open(tmp_filename, 'wb') as f: + x.tofile(f) + for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): + monkeypatch.setattr(os, "dup", dup) + assert_raises(exc, np.fromfile, f) def _check_from(self, s, value, filename, **kw): if 'sep' not in kw: @@ -5923,38 +5941,44 @@ def test_decimal_comma_separator(): else: assert False, request.param - def test_nan(self, tmp_filename, decimal_sep_localization): + def test_nan(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], tmp_filename, sep=' ') - def test_inf(self, tmp_filename, decimal_sep_localization): + def test_inf(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"inf +inf -inf infinity -Infinity iNfInItY -inF", [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], tmp_filename, sep=' ') - def test_numbers(self, tmp_filename, decimal_sep_localization): + def test_numbers(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"1.234 -1.234 .3 .3e55 -123133.1231e+133", [1.234, -1.234, .3, .3e55, -123133.1231e+133], tmp_filename, sep=' ') - def test_binary(self, tmp_filename): + def test_binary(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', np.array([1, 2, 3, 4]), tmp_filename, dtype=' Date: Tue, 14 Oct 2025 19:52:07 +0200 Subject: [PATCH 0592/1018] TYP: remove non-existent ``version.__all__`` stub --- numpy/version.pyi | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/numpy/version.pyi b/numpy/version.pyi index b3284d7608b0..073885c017c2 100644 --- a/numpy/version.pyi +++ b/numpy/version.pyi @@ -1,18 +1,9 @@ from typing import Final, LiteralString -__all__ = ( - "__version__", - "full_version", - "git_revision", - "release", - "short_version", - "version", -) +version: Final[LiteralString] = ... +__version__: Final[LiteralString] = ... +full_version: Final[LiteralString] = ... -version: Final[LiteralString] -__version__: Final[LiteralString] -full_version: Final[LiteralString] - -git_revision: Final[LiteralString] -release: Final[bool] -short_version: Final[LiteralString] +git_revision: Final[LiteralString] = ... +release: Final[bool] = ... +short_version: Final[LiteralString] = ... From 71c1c627882a4fc650aabb745e0426fd7cdbe61d Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 20:04:50 +0200 Subject: [PATCH 0593/1018] TYP: update ``__all__`` in ``testing`` --- numpy/testing/__init__.pyi | 4 ++++ numpy/testing/_private/utils.pyi | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index ba3c9a2b7a44..6dc98930f6fd 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -2,8 +2,10 @@ from unittest import TestCase from . import overrides from ._private.utils import ( + BLAS_SUPPORTS_FPE, HAS_LAPACK64, HAS_REFCOUNT, + IS_64BIT, IS_EDITABLE, IS_INSTALLED, IS_MUSL, @@ -51,8 +53,10 @@ from ._private.utils import ( ) __all__ = [ + "BLAS_SUPPORTS_FPE", "HAS_LAPACK64", "HAS_REFCOUNT", + "IS_64BIT", "IS_EDITABLE", "IS_INSTALLED", "IS_MUSL", diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 1b298132c8ef..670424f6a599 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -44,9 +44,13 @@ __all__ = [ # noqa: RUF022 "IS_PYPY", "IS_PYSTON", "IS_WASM", + "IS_INSTALLED", + "IS_64BIT", "HAS_LAPACK64", "HAS_REFCOUNT", + "BLAS_SUPPORTS_FPE", "NOGIL_BUILD", + "NUMPY_ROOT", "assert_", "assert_array_almost_equal_nulp", "assert_raises_regex", @@ -130,8 +134,10 @@ IS_MUSL: Final[bool] = ... IS_PYPY: Final[bool] = ... IS_PYSTON: Final[bool] = ... IS_WASM: Final[bool] = ... +IS_64BIT: Final[bool] = ... HAS_REFCOUNT: Final[bool] = ... HAS_LAPACK64: Final[bool] = ... +BLAS_SUPPORTS_FPE: Final[bool] = ... NOGIL_BUILD: Final[bool] = ... class KnownFailureException(Exception): ... From 88316b73e09e07b85fb22fbe80bf9dfc9b9d202c Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 20:08:29 +0200 Subject: [PATCH 0594/1018] TYP: implicit re-exports in ``testing`` --- numpy/testing/__init__.pyi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index 6dc98930f6fd..684a7c36adec 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -1,6 +1,7 @@ from unittest import TestCase -from . import overrides +from . import _private as _private, overrides +from ._private import extbuild as extbuild from ._private.utils import ( BLAS_SUPPORTS_FPE, HAS_LAPACK64, From d28ba9b8e3ffb12e74ef8931ce8f7da480596df8 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Tue, 14 Oct 2025 14:14:50 -0400 Subject: [PATCH 0595/1018] TST: Convert mixed_types_structured to method (#29942) --- numpy/lib/tests/test_loadtxt.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index a2022a0d5175..7a4ed17e7f07 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -40,10 +40,9 @@ def test_comment_multiple_chars(comment): assert_equal(a, [[1.5, 2.5], [3.0, 4.0], [5.5, 6.0]]) -@pytest.fixture def mixed_types_structured(): """ - Fixture providing heterogeneous input data with a structured dtype, along + Function providing heterogeneous input data with a structured dtype, along with the associated structured array. """ data = StringIO( @@ -74,15 +73,14 @@ def mixed_types_structured(): @pytest.mark.parametrize('skiprows', [0, 1, 2, 3]) -def test_structured_dtype_and_skiprows_no_empty_lines( - skiprows, mixed_types_structured): - data, dtype, expected = mixed_types_structured +def test_structured_dtype_and_skiprows_no_empty_lines(skiprows): + data, dtype, expected = mixed_types_structured() a = np.loadtxt(data, dtype=dtype, delimiter=";", skiprows=skiprows) assert_array_equal(a, expected[skiprows:]) -def test_unpack_structured(mixed_types_structured): - data, dtype, expected = mixed_types_structured +def test_unpack_structured(): + data, dtype, expected = mixed_types_structured() a, b, c, d = np.loadtxt(data, dtype=dtype, delimiter=";", unpack=True) assert_array_equal(a, expected["f0"]) From e25b4829c2eede2fe9e779c7abf33ac74c7f7b00 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 20:14:56 +0200 Subject: [PATCH 0596/1018] TYP: add missing ``__all__`` stubs in submodules of ``random`` --- numpy/random/_mt19937.pyi | 2 ++ numpy/random/_pcg64.pyi | 2 ++ numpy/random/_philox.pyi | 2 ++ numpy/random/_sfc64.pyi | 2 ++ numpy/random/mtrand.pyi | 56 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 64 insertions(+) diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi index 99e10677c3a2..c8ea2dccda02 100644 --- a/numpy/random/_mt19937.pyi +++ b/numpy/random/_mt19937.pyi @@ -5,6 +5,8 @@ from numpy._typing import _ArrayLikeInt_co from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy.typing import NDArray +__all__ = ["MT19937"] + @type_check_only class _MT19937Internal(TypedDict): key: NDArray[uint32] diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index 0326781fd43a..6055c5d2921d 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -3,6 +3,8 @@ from typing import TypedDict, type_check_only from numpy._typing import _ArrayLikeInt_co from numpy.random.bit_generator import BitGenerator, SeedSequence +__all__ = ["PCG64"] + @type_check_only class _PCG64Internal(TypedDict): state: int diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi index 353a0adb4861..ab1aee31eb02 100644 --- a/numpy/random/_philox.pyi +++ b/numpy/random/_philox.pyi @@ -5,6 +5,8 @@ from numpy._typing import _ArrayLikeInt_co from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy.typing import NDArray +__all__ = ["Philox"] + @type_check_only class _PhiloxInternal(TypedDict): counter: NDArray[uint64] diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi index a6f0d8445f25..1e563fdebfde 100644 --- a/numpy/random/_sfc64.pyi +++ b/numpy/random/_sfc64.pyi @@ -4,6 +4,8 @@ from numpy import uint64 from numpy._typing import NDArray, _ArrayLikeInt_co from numpy.random.bit_generator import BitGenerator, SeedSequence +__all__ = ["SFC64"] + @type_check_only class _SFC64Internal(TypedDict): state: NDArray[uint64] diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 3221048a8af7..c20d35193d45 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -42,6 +42,62 @@ from numpy._typing import ( ) from numpy.random.bit_generator import BitGenerator +__all__ = [ + "RandomState", + "beta", + "binomial", + "bytes", + "chisquare", + "choice", + "dirichlet", + "exponential", + "f", + "gamma", + "geometric", + "get_bit_generator", + "get_state", + "gumbel", + "hypergeometric", + "laplace", + "logistic", + "lognormal", + "logseries", + "multinomial", + "multivariate_normal", + "negative_binomial", + "noncentral_chisquare", + "noncentral_f", + "normal", + "pareto", + "permutation", + "poisson", + "power", + "rand", + "randint", + "randn", + "random", + "random_integers", + "random_sample", + "ranf", + "rayleigh", + "sample", + "seed", + "set_bit_generator", + "set_state", + "shuffle", + "standard_cauchy", + "standard_exponential", + "standard_gamma", + "standard_normal", + "standard_t", + "triangular", + "uniform", + "vonmises", + "wald", + "weibull", + "zipf", +] + class RandomState: _bit_generator: BitGenerator def __init__(self, seed: _ArrayLikeInt_co | BitGenerator | None = ...) -> None: ... From af379dca868870412f7de8fce55e9a95d89f8d96 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 20:35:57 +0200 Subject: [PATCH 0597/1018] TYP: add missing ``_core._asarray.__all__`` stub --- numpy/_core/_asarray.pyi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/_asarray.pyi b/numpy/_core/_asarray.pyi index 349933323b95..6bef69d8e4ea 100644 --- a/numpy/_core/_asarray.pyi +++ b/numpy/_core/_asarray.pyi @@ -3,6 +3,8 @@ from typing import Any, Literal, TypeAlias, TypeVar, overload from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc +__all__ = ["require"] + _ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) _Requirements: TypeAlias = Literal[ From 602176a00ccf228f2be325dac201fe57c944fc2e Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 20:36:23 +0200 Subject: [PATCH 0598/1018] TYP: remove non-existent ``_core._type_aliases.__all__`` stub --- numpy/_core/_type_aliases.pyi | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/numpy/_core/_type_aliases.pyi b/numpy/_core/_type_aliases.pyi index 3d57e8135378..e28541cc8987 100644 --- a/numpy/_core/_type_aliases.pyi +++ b/numpy/_core/_type_aliases.pyi @@ -3,16 +3,6 @@ from typing import Final, Literal as L, TypeAlias, TypedDict, type_check_only import numpy as np -__all__ = ( - "_abstract_type_names", - "_aliases", - "_extra_aliases", - "allTypes", - "c_names_dict", - "sctypeDict", - "sctypes", -) - sctypeDict: Final[dict[str, type[np.generic]]] allTypes: Final[dict[str, type[np.generic]]] From bfb0f77f1126b97a6afb89c232d50f503077f569 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 14 Oct 2025 22:10:51 +0200 Subject: [PATCH 0599/1018] TYP: fix inconsistent ``float64.__getformat__`` stub (#29954) --- numpy/__init__.pyi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index c18bc91ff4ba..b90acfaf0c88 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4921,9 +4921,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @property def imag(self) -> Self: ... def conjugate(self) -> Self: ... - def __getformat__(self, typestr: L["double", "float"], /) -> str: ... def __getnewargs__(self, /) -> tuple[float]: ... + @classmethod + def __getformat__(cls, typestr: L["double", "float"], /) -> str: ... # undocumented + # float64-specific operator overrides @overload def __add__(self, other: _Float64_co, /) -> float64: ... From 583ba3fb6b7b9f299319dbc3f5a5a4b935a634bd Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 14 Oct 2025 22:11:41 +0200 Subject: [PATCH 0600/1018] TYP: add ``__class_getitem__`` to ``bool`` and ``datetime64`` (#29952) --- numpy/__init__.pyi | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b90acfaf0c88..fbdccf8a4d76 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3909,6 +3909,8 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + def __bool__(self, /) -> _BoolItemT_co: ... @overload @@ -5429,6 +5431,8 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> Self: ... + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + @overload def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... @overload From f9592bf23bd6fa69ebe7d037bd31cf835d45f2c6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 22:51:53 +0200 Subject: [PATCH 0601/1018] TYP: stub ``numpy.ma.testutils`` --- numpy/ma/testutils.pyi | 69 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 numpy/ma/testutils.pyi diff --git a/numpy/ma/testutils.pyi b/numpy/ma/testutils.pyi new file mode 100644 index 000000000000..92b843b93a43 --- /dev/null +++ b/numpy/ma/testutils.pyi @@ -0,0 +1,69 @@ +import numpy as np +from numpy._typing import NDArray +from numpy.testing import ( + TestCase, + assert_, + assert_allclose, + assert_array_almost_equal_nulp, + assert_raises, +) +from numpy.testing._private.utils import _ComparisonFunc + +__all__ = [ + "TestCase", + "almost", + "approx", + "assert_", + "assert_allclose", + "assert_almost_equal", + "assert_array_almost_equal", + "assert_array_almost_equal_nulp", + "assert_array_approx_equal", + "assert_array_compare", + "assert_array_equal", + "assert_array_less", + "assert_close", + "assert_equal", + "assert_equal_records", + "assert_mask_equal", + "assert_not_equal", + "assert_raises", + "fail_if_array_equal", +] + +def approx( + a: object, b: object, fill_value: bool = True, rtol: float = 1e-5, atol: float = 1e-8 +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +def almost(a: object, b: object, decimal: int = 6, fill_value: bool = True) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... + +# +def assert_equal_records(a: NDArray[np.void], b: NDArray[np.void]) -> None: ... +def assert_equal(actual: object, desired: object, err_msg: str = "") -> None: ... +def fail_if_equal(actual: object, desired: object, err_msg: str = "") -> None: ... +def assert_almost_equal( + actual: object, desired: object, decimal: int = 7, err_msg: str = "", verbose: bool = True +) -> None: ... + +# +def assert_array_compare( + comparison: _ComparisonFunc, + x: object, + y: object, + err_msg: str = "", + verbose: bool = True, + header: str = "", + fill_value: bool = True, +) -> None: ... +def assert_array_equal(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def fail_if_array_equal(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def assert_array_approx_equal( + x: object, y: object, decimal: int = 6, err_msg: str = "", verbose: bool = True +) -> None: ... +def assert_array_almost_equal( + x: object, y: object, decimal: int = 6, err_msg: str = "", verbose: bool = True +) -> None: ... +def assert_array_less(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def assert_mask_equal(m1: object, m2: object, err_msg: str = "") -> None: ... + +assert_not_equal = fail_if_equal +assert_close = assert_almost_equal From d0eb6493cc23e59c52148832b97562de5060bd1a Mon Sep 17 00:00:00 2001 From: Jake VanderPlas Date: Tue, 14 Oct 2025 13:57:09 -0700 Subject: [PATCH 0602/1018] [doc] fix formatting in np.percentile docstring --- numpy/lib/_function_base_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index b8d6926bca88..4adb0c187395 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4163,7 +4163,7 @@ def percentile(a, the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. - weights : array_like, optional + weights : array_like, optional An array of weights associated with the values in `a`. Each value in `a` contributes to the percentile according to its associated weight. The weights array can either be 1-D (in which case its length must be From 94b2986f55362b53fa9228235174ebac5decb283 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 23:11:56 +0200 Subject: [PATCH 0603/1018] TYP: update the ``finfo`` stubs --- numpy/_core/getlimits.pyi | 44 ++++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/numpy/_core/getlimits.pyi b/numpy/_core/getlimits.pyi index eb832f55bb7e..a22149ceb5c6 100644 --- a/numpy/_core/getlimits.pyi +++ b/numpy/_core/getlimits.pyi @@ -1,3 +1,4 @@ +from functools import cached_property from types import GenericAlias from typing import Final, Generic, Self, overload from typing_extensions import TypeVar @@ -72,28 +73,25 @@ class iinfo(Generic[_IntegerT_co]): def __class_getitem__(cls, item: object, /) -> GenericAlias: ... class finfo(Generic[_FloatingT_co]): - dtype: np.dtype[_FloatingT_co] - eps: _FloatingT_co - epsneg: _FloatingT_co - resolution: _FloatingT_co - smallest_subnormal: _FloatingT_co - max: _FloatingT_co - min: _FloatingT_co + dtype: np.dtype[_FloatingT_co] # readonly + eps: _FloatingT_co # readonly + _radix: _FloatingT_co # readonly + smallest_normal: _FloatingT_co # readonly + smallest_subnormal: _FloatingT_co # readonly + max: _FloatingT_co # readonly + min: _FloatingT_co # readonly + + _fmt: str | None # `__str__` cache + _repr: str | None # `__repr__` cache bits: Final[int] - iexp: Final[int] - machep: Final[int] maxexp: Final[int] minexp: Final[int] - negep: Final[int] - nexp: Final[int] nmant: Final[int] precision: Final[int] - @property - def smallest_normal(self, /) -> _FloatingT_co: ... - @property - def tiny(self, /) -> _FloatingT_co: ... + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... # @overload @@ -110,5 +108,17 @@ class finfo(Generic[_FloatingT_co]): def __new__(cls, dtype: str) -> finfo: ... # - @classmethod - def __class_getitem__(cls, item: object, /) -> GenericAlias: ... + @cached_property + def epsneg(self, /) -> _FloatingT_co: ... + @cached_property + def resolution(self, /) -> _FloatingT_co: ... + @cached_property + def machep(self, /) -> int: ... + @cached_property + def negep(self, /) -> int: ... + @cached_property + def nexp(self, /) -> int: ... + @cached_property + def iexp(self, /) -> int: ... + @cached_property + def tiny(self, /) -> _FloatingT_co: ... From 46d892084f1f2ce4f6312ebbbab2d094cf216b16 Mon Sep 17 00:00:00 2001 From: Jake Vanderplas Date: Tue, 14 Oct 2025 14:15:55 -0700 Subject: [PATCH 0604/1018] TYP: fix return type annotation for ``normalize_axis_tuple`` utility (#29956) --- numpy/lib/_array_utils_impl.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_array_utils_impl.pyi b/numpy/lib/_array_utils_impl.pyi index db9ef852ba57..1b290829caf4 100644 --- a/numpy/lib/_array_utils_impl.pyi +++ b/numpy/lib/_array_utils_impl.pyi @@ -17,7 +17,7 @@ def normalize_axis_tuple( ndim: int, argname: str | None = None, allow_duplicate: bool | None = False, -) -> tuple[int, int]: ... +) -> tuple[int, ...]: ... def normalize_axis_index( axis: int = ..., From 68dfbd873d9dbcdec2417da75e8e433d2f9088cf Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 23:54:07 +0200 Subject: [PATCH 0605/1018] MAINT: remove obsolete ``generic.tostring`` method descriptor and docstring --- numpy/_core/_add_newdocs.py | 3 --- numpy/_core/defchararray.py | 1 - numpy/_core/src/multiarray/scalartypes.c.src | 5 +---- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 264bb8e25e7c..da9ea90eef34 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -6849,9 +6849,6 @@ def refer_to_array_attribute(attr, method=True): add_newdoc('numpy._core.numerictypes', 'generic', refer_to_array_attribute('tolist')) -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tostring')) - add_newdoc('numpy._core.numerictypes', 'generic', refer_to_array_attribute('trace')) diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 0378e976254d..098f4ab9ce9b 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -496,7 +496,6 @@ class adds the following functionality: title tofile tolist - tostring translate transpose upper diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index ad49de0c231e..337d8b9b48b2 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -2185,7 +2185,7 @@ gentype_byteswap(PyObject *self, PyObject *args, PyObject *kwds) * #name = take, getfield, put, repeat, tofile, mean, trace, diagonal, clip, * std, var, sum, cumsum, prod, cumprod, compress, sort, argsort, * round, argmax, argmin, max, min, any, all, astype, resize, - * reshape, choose, tostring, tobytes, copy, searchsorted, view, + * reshape, choose, tobytes, copy, searchsorted, view, * flatten, ravel, squeeze# */ static PyObject * @@ -2643,9 +2643,6 @@ static PyMethodDef gentype_methods[] = { {"tofile", (PyCFunction)gentype_tofile, METH_VARARGS | METH_KEYWORDS, NULL}, - {"tostring", - (PyCFunction)gentype_tostring, - METH_VARARGS | METH_KEYWORDS, NULL}, {"byteswap", (PyCFunction)gentype_byteswap, METH_VARARGS | METH_KEYWORDS, NULL}, From 2375e15d390669700725c362a3a0ea6db6fdc336 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 02:02:59 +0200 Subject: [PATCH 0606/1018] MAINT: Remove remnants of methods removed in numpy 2 --- numpy/_core/src/multiarray/getset.c | 40 ------------------- numpy/_core/src/multiarray/scalartypes.c.src | 36 ----------------- numpy/matrixlib/tests/test_defmatrix.py | 4 +- .../tests/data/reveal/ndarray_conversion.pyi | 1 - 4 files changed, 2 insertions(+), 79 deletions(-) diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 48da52dd3178..16876639897c 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -865,34 +865,6 @@ array_matrix_transpose_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) return PyArray_MatrixTranspose(self); } -static PyObject * -array_ptp(PyArrayObject *self, void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`ptp` was removed from the ndarray class in NumPy 2.0. " - "Use np.ptp(arr, ...) instead."); - return NULL; -} - -static PyObject * -array_newbyteorder(PyArrayObject *self, PyObject *args) -{ - PyErr_SetString(PyExc_AttributeError, - "`newbyteorder` was removed from the ndarray class " - "in NumPy 2.0. " - "Use `arr.view(arr.dtype.newbyteorder(order))` instead."); - return NULL; -} - -static PyObject * -array_itemset(PyArrayObject *self, PyObject *args) -{ - PyErr_SetString(PyExc_AttributeError, - "`itemset` was removed from the ndarray class in " - "NumPy 2.0. Use `arr[index] = value` instead."); - return NULL; -} - NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = { {"ndim", (getter)array_ndim_get, @@ -958,18 +930,6 @@ NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = { (getter)array_matrix_transpose_get, NULL, NULL, NULL}, - {"ptp", - (getter)array_ptp, - NULL, - NULL, NULL}, - {"newbyteorder", - (getter)array_newbyteorder, - NULL, - NULL, NULL}, - {"itemset", - (getter)array_itemset, - NULL, - NULL, NULL}, {"device", (getter)array_device, NULL, diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index ad49de0c231e..af7895dbb549 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -1940,33 +1940,6 @@ gentype_transpose_get(PyObject *self, void *NPY_UNUSED(ignored)) return self; } -static PyObject * -gentype_newbyteorder(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`newbyteorder` was removed from scalar types in NumPy 2.0. " - "Use `sc.view(sc.dtype.newbyteorder(order))` instead."); - return NULL; -} - -static PyObject * -gentype_itemset(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`itemset` was removed from scalar types in NumPy 2.0 " - "because scalars are immutable."); - return NULL; -} - -static PyObject * -gentype_ptp(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`ptp` was removed from scalar types in NumPy 2.0. " - "For a scalar, the range of values always equals 0."); - return NULL; -} - static PyGetSetDef gentype_getsets[] = { {"ndim", @@ -2011,15 +1984,6 @@ static PyGetSetDef gentype_getsets[] = { {"T", (getter)gentype_transpose_get, (setter)0, NULL, NULL}, - {"newbyteorder", - (getter)gentype_newbyteorder, - (setter)0, NULL, NULL}, - {"itemset", - (getter)gentype_itemset, - (setter)0, NULL, NULL}, - {"ptp", - (getter)gentype_ptp, - (setter)0, NULL, NULL}, {"device", (getter)array_device, (setter)0, NULL, NULL}, diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py index ce23933ab7f7..a0e868f5fe2c 100644 --- a/numpy/matrixlib/tests/test_defmatrix.py +++ b/numpy/matrixlib/tests/test_defmatrix.py @@ -288,10 +288,10 @@ def test_instance_methods(self): 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', 'searchsorted', 'setflags', 'setfield', 'sort', - 'partition', 'argpartition', 'newbyteorder', 'to_device', + 'partition', 'argpartition', 'to_device', 'take', 'tofile', 'tolist', 'tobytes', 'all', 'any', 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', - 'prod', 'std', 'ctypes', 'itemset', 'bitwise_count', + 'prod', 'std', 'ctypes', 'bitwise_count', ] for attrib in dir(a): if attrib.startswith('_') or attrib in excluded_methods: diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index 5cb9b98029dd..4f7d0c28b747 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -37,7 +37,6 @@ any_sctype: np.ndarray[Any, Any] assert_type(any_dtype.tolist(), Any) assert_type(any_sctype.tolist(), Any) -# itemset does not return a value # tobytes is pretty simple # tofile does not return a value # dump does not return a value From 7f91f841b44db9a9596c7655b4af2353e5e3c1b6 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Tue, 14 Oct 2025 20:58:44 -0400 Subject: [PATCH 0607/1018] TST: Remove recwarn from test (#29957) --- numpy/lib/tests/test_nanfunctions.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 447b84db3edc..ef11ecfdf518 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -723,22 +723,22 @@ def test_ddof(self): res = nf(_ndat, axis=1, ddof=ddof) assert_almost_equal(res, tgt) - def test_ddof_too_big(self, recwarn): + def test_ddof_too_big(self): nanfuncs = [np.nanvar, np.nanstd] stdfuncs = [np.var, np.std] dsize = [len(d) for d in _rdat] for nf, rf in zip(nanfuncs, stdfuncs): for ddof in range(5): - with warnings.catch_warnings(): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') warnings.simplefilter('ignore', ComplexWarning) tgt = [ddof >= d for d in dsize] res = nf(_ndat, axis=1, ddof=ddof) assert_equal(np.isnan(res), tgt) if any(tgt): - assert_(len(recwarn) == 1) - recwarn.pop(RuntimeWarning) + assert_(len(w) == 1) else: - assert_(len(recwarn) == 0) + assert_(len(w) == 0) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) From 12562d611e6f5b56e26b2267b5bf3a58500e72d3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 04:24:06 +0200 Subject: [PATCH 0608/1018] TYP: add missing ``generic`` methods --- numpy/__init__.pyi | 84 ++++++++++++++++--- numpy/typing/tests/data/fail/ndarray_misc.pyi | 27 ++++-- 2 files changed, 93 insertions(+), 18 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbdccf8a4d76..78dddd5cf26a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -6,7 +6,7 @@ import ctypes as ct import array as _array import datetime as dt from abc import abstractmethod -from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias +from types import EllipsisType, GetSetDescriptorType, ModuleType, TracebackType, MappingProxyType, GenericAlias from decimal import Decimal from fractions import Fraction from uuid import UUID @@ -767,6 +767,7 @@ _RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelt _NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) _ShapeT = TypeVar("_ShapeT", bound=_Shape) +_Shape1T = TypeVar("_Shape1T", bound=tuple[int, *tuple[int, ...]]) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) _1DShapeT = TypeVar("_1DShapeT", bound=_1D) _2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) @@ -1687,6 +1688,15 @@ class _ArrayOrScalarCommon: def tolist(self) -> Any: ... def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ... + # NOTE: for `generic`, these two methods don't do anything + def fill(self, value: _ScalarLike_co, /) -> None: ... + def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + + # NOTE: even on `generic` this seems to work + def setflags( + self, /, write: builtins.bool | None = None, align: builtins.bool | None = None, uic: builtins.bool | None = None + ) -> None: ... + @property def __array_interface__(self) -> dict[str, Any]: ... @property @@ -2168,7 +2178,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") def strides(self, value: _ShapeLike) -> None: ... def byteswap(self, inplace: builtins.bool = ...) -> Self: ... - def fill(self, value: Any, /) -> None: ... @property def flat(self) -> flatiter[Self]: ... @@ -2349,13 +2358,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... - # `nonzero()` is deprecated for 0d arrays/generics + # `nonzero()` raises for 0d arrays/generics def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... - # `put` is technically available to `generic`, - # but is pointless as `generic`s are immutable - def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... - @overload def searchsorted( self, # >= 1D array @@ -3574,13 +3579,47 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod def __new__(cls) -> Self: ... - def __hash__(self) -> int: ... + + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + @overload def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... @overload def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + + @overload + def __array_wrap__( + self, + array: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None, + return_scalar: L[False], + /, + ) -> ndarray[_ShapeT, _DTypeT]: ... + @overload + def __array_wrap__( + self, + array: ndarray[tuple[()], dtype[_ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> _ScalarT: ... + @overload + def __array_wrap__( + self, + array: ndarray[_Shape1T, _DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> ndarray[_Shape1T, _DTypeT]: ... + @overload + def __array_wrap__( + self, + array: ndarray[_ShapeT, dtype[_ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> _ScalarT | ndarray[_ShapeT, dtype[_ScalarT]]: ... @property def base(self) -> None: ... @@ -3599,10 +3638,33 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): def item(self, /) -> _ItemT_co: ... @overload def item(self, arg0: L[0, -1] | tuple[L[0, -1]] | tuple[()] = ..., /) -> _ItemT_co: ... + @override def tolist(self, /) -> _ItemT_co: ... - def byteswap(self, inplace: L[False] = ...) -> Self: ... + # NOTE: these technically exist, but will always raise when called + def trace( # type: ignore[misc] + self: Never, + /, + offset: Never = ..., + axis1: Never = ..., + axis2: Never = ..., + dtype: Never = ..., + out: Never = ..., + ) -> Never: ... + def diagonal(self: Never, /, offset: Never = ..., axis1: Never = ..., axis2: Never = ...) -> Never: ... # type: ignore[misc] + def swapaxes(self: Never, /, axis1: Never, axis2: Never) -> Never: ... # type: ignore[misc] + def sort(self: Never, /, axis: Never = ..., kind: Never = ..., order: Never = ...) -> Never: ... # type: ignore[misc] + def nonzero(self: Never, /) -> Never: ... # type: ignore[misc] + def setfield(self: Never, /, val: Never, dtype: Never, offset: Never = ...) -> None: ... # type: ignore[misc] + def searchsorted(self: Never, /, v: Never, side: Never = ..., sorter: Never = ...) -> Never: ... # type: ignore[misc] + + # NOTE: this wont't raise, but won't do anything either + def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: builtins.bool = False) -> None: ... + + # + def byteswap(self, /, inplace: L[False] = False) -> Self: ... + # @overload def astype( self, diff --git a/numpy/typing/tests/data/fail/ndarray_misc.pyi b/numpy/typing/tests/data/fail/ndarray_misc.pyi index 93e1bce8fecb..29418930061c 100644 --- a/numpy/typing/tests/data/fail/ndarray_misc.pyi +++ b/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -5,6 +5,7 @@ More extensive tests are performed for the methods' function-based counterpart in `../from_numeric.py`. """ +from typing import Never import numpy as np import numpy.typing as npt @@ -17,14 +18,26 @@ AR_b: npt.NDArray[np.bool] ctypes_obj = AR_f8.ctypes f8.argpartition(0) # type: ignore[attr-defined] -f8.diagonal() # type: ignore[attr-defined] -f8.dot(1) # type: ignore[attr-defined] -f8.nonzero() # type: ignore[attr-defined] f8.partition(0) # type: ignore[attr-defined] -f8.put(0, 2) # type: ignore[attr-defined] -f8.setfield(2, np.float64) # type: ignore[attr-defined] -f8.sort() # type: ignore[attr-defined] -f8.trace() # type: ignore[attr-defined] +f8.dot(1) # type: ignore[attr-defined] + +# NOTE: The following functions retur `Never`, causing mypy to stop analysis at that +# point, which we circumvent by wrapping them in a function. + +def f8_diagonal(x: np.float64) -> Never: + return x.diagonal() # type: ignore[misc] + +def f8_nonzero(x: np.float64) -> Never: + return x.nonzero() # type: ignore[misc] + +def f8_setfield(x: np.float64) -> Never: + return x.setfield(2, np.float64) # type: ignore[misc] + +def f8_sort(x: np.float64) -> Never: + return x.sort() # type: ignore[misc] + +def f8_trace(x: np.float64) -> Never: + return x.trace() # type: ignore[misc] AR_M.__complex__() # type: ignore[misc] AR_b.__index__() # type: ignore[misc] From a9a8d0685cc063f95a3d8f615b7806fc2cca2e79 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 04:38:03 +0200 Subject: [PATCH 0609/1018] TYP: mark ``flexible`` as ``@final`` --- numpy/__init__.pyi | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbdccf8a4d76..e637589f67df 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5529,11 +5529,10 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __ge__(self, other: _SupportsGT, /) -> bool_: ... -class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): - @abstractmethod - def __new__(cls) -> Self: ... +@final # cannot be subclassed at runtime +class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... # type: ignore[misc] -class void(flexible[bytes | tuple[Any, ...]]): +class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] @overload def __new__(cls, value: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... @overload @@ -5547,13 +5546,13 @@ class void(flexible[bytes | tuple[Any, ...]]): def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ... -class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): +class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): # type: ignore[misc] @abstractmethod def __new__(cls) -> Self: ... # NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart -class bytes_(character[bytes], bytes): +class bytes_(character[bytes], bytes): # type: ignore[misc] @overload def __new__(cls, o: object = ..., /) -> Self: ... @overload @@ -5562,7 +5561,7 @@ class bytes_(character[bytes], bytes): # def __bytes__(self, /) -> bytes: ... -class str_(character[str], str): +class str_(character[str], str): # type: ignore[misc] @overload def __new__(cls, value: object = ..., /) -> Self: ... @overload From b5de4719fadf799b563fed75f6e3b96c1a6386d2 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 04:42:47 +0200 Subject: [PATCH 0610/1018] TYP: revert narrowing the ``ndarray.fill`` input type --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 78dddd5cf26a..9246a37441e3 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1689,7 +1689,7 @@ class _ArrayOrScalarCommon: def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ... # NOTE: for `generic`, these two methods don't do anything - def fill(self, value: _ScalarLike_co, /) -> None: ... + def fill(self, value: Incomplete, /) -> None: ... def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... # NOTE: even on `generic` this seems to work From 838e213c1c12b52ecc1e7e80b5714717a2dd6e6f Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:02:03 +0200 Subject: [PATCH 0611/1018] TYP: remove ``mod`` from ``ma.MaskedArray.__pow__`` --- numpy/ma/core.pyi | 68 ++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 36 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index c9c4ab152030..2e069164c3d0 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1102,73 +1102,69 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - # Keep in sync with `ndarray.__pow__` + # Keep in sync with `ndarray.__pow__` (minus the `mod` parameter) @overload - def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, mod: None = None, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> _MaskedArray[float64]: ... + def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload - def __pow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> _MaskedArray[float64]: ... + def __pow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @overload - def __pow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> _MaskedArray[complex128]: ... + def __pow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... @overload - def __pow__( - self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / - ) -> _MaskedArray[complex128]: ... + def __pow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> _MaskedArray[complexfloating]: ... + def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __pow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> _MaskedArray[number]: ... + def __pow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload - def __pow__(self: _MaskedArray[object_], other: Any, mod: None = None, /) -> Any: ... + def __pow__(self: _MaskedArray[object_], other: Any, /) -> Any: ... @overload - def __pow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + def __pow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - # Keep in sync with `ndarray.__rpow__` + # Keep in sync with `ndarray.__rpow__` (minus the `mod` parameter) @overload - def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, mod: None = None, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> _MaskedArray[float64]: ... + def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload - def __rpow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> _MaskedArray[float64]: ... + def __rpow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @overload - def __rpow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> _MaskedArray[complex128]: ... + def __rpow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... @overload - def __rpow__( - self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / - ) -> _MaskedArray[complex128]: ... + def __rpow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> _MaskedArray[complexfloating]: ... + def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __rpow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> _MaskedArray[number]: ... + def __rpow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload - def __rpow__(self: _MaskedArray[object_], other: Any, mod: None = None, /) -> Any: ... + def __rpow__(self: _MaskedArray[object_], other: Any, /) -> Any: ... @overload - def __rpow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + def __rpow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__iadd__` @overload From 801ad71122502a97ab4be8c431257cc375a57acb Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:02:54 +0200 Subject: [PATCH 0612/1018] TYP: add ``mod`` parameter to ``number.__pow__`` --- numpy/__init__.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbdccf8a4d76..8cdd35154fdb 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3857,8 +3857,8 @@ class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): def __rsub__(self, other: _NumberLike_co, /) -> Incomplete: ... def __mul__(self, other: _NumberLike_co, /) -> Incomplete: ... def __rmul__(self, other: _NumberLike_co, /) -> Incomplete: ... - def __pow__(self, other: _NumberLike_co, /) -> Incomplete: ... - def __rpow__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __pow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... + def __rpow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... def __truediv__(self, other: _NumberLike_co, /) -> Incomplete: ... def __rtruediv__(self, other: _NumberLike_co, /) -> Incomplete: ... From 89b4eb740d8f473207d88615d80ba43fb4782acf Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:03:56 +0200 Subject: [PATCH 0613/1018] TYP: remove ``mod`` parameter from ``matrix.__pow__`` --- numpy/__init__.pyi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8cdd35154fdb..7ce16be02f87 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -6013,9 +6013,9 @@ class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): def __imul__(self, other: ArrayLike, /) -> Self: ... # - def __pow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __rpow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __ipow__(self, other: ArrayLike, /) -> Self: ... # type: ignore[misc, override] + def __pow__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ipow__(self, other: ArrayLike, /) -> Self: ... # type: ignore[override] # keep in sync with `prod` and `mean` @overload # type: ignore[override] From b70eed2893a7e94a96ca7b1f430f60c5893fbfe3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:24:12 +0200 Subject: [PATCH 0614/1018] TYP: improved ``busdaycalendar`` annotations --- numpy/__init__.pyi | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbdccf8a4d76..441748e53fd0 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5764,15 +5764,16 @@ class broadcast: @final class busdaycalendar: - def __new__( - cls, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - ) -> busdaycalendar: ... + def __init__( + self, + /, + weekmask: str | Sequence[int | bool_ | integer] | _SupportsArray[dtype[bool_ | integer]] = "1111100", + holidays: Sequence[dt.date | datetime64] | _SupportsArray[dtype[datetime64]] | None = None, + ) -> None: ... @property - def weekmask(self) -> NDArray[np.bool]: ... + def weekmask(self) -> ndarray[tuple[int], dtype[bool_]]: ... @property - def holidays(self) -> NDArray[datetime64]: ... + def holidays(self) -> ndarray[tuple[int], dtype[datetime64[dt.date]]]: ... @final class nditer: From 8898e580d7764ad1dd0ceebe76bc6c37090d21b6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:34:13 +0200 Subject: [PATCH 0615/1018] TYP: missing ``vectorize`` default argument --- numpy/__init__.pyi | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbdccf8a4d76..4ce6e3e0108a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -407,7 +407,7 @@ from numpy._core.shape_base import ( from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ from ._globals import _CopyMode as _CopyMode -from ._globals import _NoValue as _NoValue +from ._globals import _NoValue as _NoValue, _NoValueType from numpy.lib import ( scimath as emath, @@ -5902,7 +5902,8 @@ class vectorize: __doc__: str | None def __init__( self, - pyfunc: Callable[..., Any], + /, + pyfunc: Callable[..., Any] | _NoValueType = ..., # = _NoValue otypes: str | Iterable[DTypeLike] | None = None, doc: str | None = None, excluded: Iterable[int | str] | None = None, From a68b24cc792ced91602a9a04a1194fb457c3e340 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:44:00 +0200 Subject: [PATCH 0616/1018] TYP: ``corrcoef``: fix parameter name --- numpy/lib/_function_base_impl.pyi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 9c2e2b1e28b0..d0018f54b161 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -478,7 +478,7 @@ def cov( # NOTE `bias` and `ddof` are deprecated and ignored @overload def corrcoef( - m: _ArrayLikeFloat_co, + x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co | None = None, rowvar: bool = True, bias: _NoValueType = ..., @@ -488,7 +488,7 @@ def corrcoef( ) -> NDArray[floating]: ... @overload def corrcoef( - m: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, bias: _NoValueType = ..., @@ -498,7 +498,7 @@ def corrcoef( ) -> NDArray[complexfloating]: ... @overload def corrcoef( - m: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, bias: _NoValueType = ..., @@ -508,7 +508,7 @@ def corrcoef( ) -> NDArray[_ScalarT]: ... @overload def corrcoef( - m: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, bias: _NoValueType = ..., From e64f193b3b44a060d4137017036b186c5188bb40 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:48:12 +0200 Subject: [PATCH 0617/1018] TYP: ``diff``: fix argument default type --- numpy/lib/_function_base_impl.pyi | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index d0018f54b161..6aa1eb24e00e 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -288,21 +288,21 @@ def gradient( ) -> Any: ... @overload -def diff( +def diff( # type: ignore[overload-overlap] a: _T, n: L[0], axis: SupportsIndex = -1, - prepend: ArrayLike = ..., - append: ArrayLike = ..., + prepend: ArrayLike | _NoValueType = ..., # = _NoValue + append: ArrayLike | _NoValueType = ..., # = _NoValue ) -> _T: ... @overload def diff( a: ArrayLike, n: int = 1, axis: SupportsIndex = -1, - prepend: ArrayLike = ..., - append: ArrayLike = ..., -) -> NDArray[Any]: ... + prepend: ArrayLike | _NoValueType = ..., # = _NoValue + append: ArrayLike | _NoValueType = ..., # = _NoValue +) -> NDArray[Incomplete]: ... @overload # float scalar def interp( From 43d81d49a530f6fb04f5faedcae5ac4f21a7d27d Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:52:39 +0200 Subject: [PATCH 0618/1018] TYP: ``percentile`` and ``quantile``: missing (deprecated) ``interpolation`` kwarg --- numpy/lib/_function_base_impl.pyi | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 6aa1eb24e00e..26400295482c 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -627,6 +627,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> floating: ... @overload def percentile( @@ -639,6 +640,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> complexfloating: ... @overload def percentile( @@ -651,6 +653,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> timedelta64: ... @overload def percentile( @@ -663,6 +666,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> datetime64: ... @overload def percentile( @@ -675,6 +679,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> Any: ... @overload def percentile( @@ -687,6 +692,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[floating]: ... @overload def percentile( @@ -699,6 +705,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[complexfloating]: ... @overload def percentile( @@ -711,6 +718,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[timedelta64]: ... @overload def percentile( @@ -723,6 +731,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[datetime64]: ... @overload def percentile( @@ -735,6 +744,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[object_]: ... @overload def percentile( @@ -747,6 +757,7 @@ def percentile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> Any: ... @overload def percentile( @@ -759,6 +770,7 @@ def percentile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> _ArrayT: ... @overload def percentile( @@ -771,6 +783,7 @@ def percentile( method: _MethodKind = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> _ArrayT: ... # NOTE: keep in sync with `percentile` @@ -785,6 +798,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> floating: ... @overload def quantile( @@ -797,6 +811,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> complexfloating: ... @overload def quantile( @@ -809,6 +824,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> timedelta64: ... @overload def quantile( @@ -821,6 +837,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> datetime64: ... @overload def quantile( @@ -833,6 +850,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> Any: ... @overload def quantile( @@ -845,6 +863,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[floating]: ... @overload def quantile( @@ -857,6 +876,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[complexfloating]: ... @overload def quantile( @@ -869,6 +889,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[timedelta64]: ... @overload def quantile( @@ -881,6 +902,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[datetime64]: ... @overload def quantile( @@ -893,6 +915,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[object_]: ... @overload def quantile( @@ -905,6 +928,7 @@ def quantile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> Any: ... @overload def quantile( @@ -917,6 +941,7 @@ def quantile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> _ArrayT: ... @overload def quantile( @@ -929,6 +954,7 @@ def quantile( method: _MethodKind = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> _ArrayT: ... _ScalarT_fm = TypeVar( From 396219f9787aa388cbabff0d5f5623b19c410e67 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:54:11 +0200 Subject: [PATCH 0619/1018] TYP: ``piecewise``: fix pos-only parameters --- numpy/lib/_function_base_impl.pyi | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 26400295482c..3f7457bff82b 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -231,7 +231,6 @@ def piecewise( Callable[Concatenate[NDArray[_ScalarT], _Pss], NDArray[_ScalarT | Any]] | _ScalarT | object ], - /, *args: _Pss.args, **kw: _Pss.kwargs, ) -> NDArray[_ScalarT]: ... @@ -243,7 +242,6 @@ def piecewise( Callable[Concatenate[NDArray[Any], _Pss], NDArray[Any]] | object ], - /, *args: _Pss.args, **kw: _Pss.kwargs, ) -> NDArray[Any]: ... From 9269f5ea030dfd514ac3caa9509da2fa3af4669a Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:56:21 +0200 Subject: [PATCH 0620/1018] TYP: ``trim_zeros``: missing ``axis`` parameter --- numpy/lib/_function_base_impl.pyi | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 3f7457bff82b..a20032e047c6 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -415,6 +415,7 @@ def sort_complex(a: ArrayLike) -> NDArray[complexfloating]: ... def trim_zeros( filt: _TrimZerosSequence[_T], trim: L["f", "b", "fb", "bf"] = "fb", + axis: _ShapeLike | None = None, ) -> _T: ... @overload From 5d1ca7c07c9a4b80b19bf4419f4fc99194fd01de Mon Sep 17 00:00:00 2001 From: Alverok Date: Mon, 13 Oct 2025 00:54:33 +0530 Subject: [PATCH 0621/1018] DOC: Completed and fixed PR #29578 [skip azp][skip actions][skip cirrus] Co-authored-by: olivier --- doc/source/reference/routines.polynomials.rst | 34 +++++++++++-------- numpy/polynomial/polynomial.py | 4 +++ 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/doc/source/reference/routines.polynomials.rst b/doc/source/reference/routines.polynomials.rst index 0763a1cf719a..00b4460eae21 100644 --- a/doc/source/reference/routines.polynomials.rst +++ b/doc/source/reference/routines.polynomials.rst @@ -47,23 +47,28 @@ The `~numpy.polynomial.polynomial.Polynomial` class is imported for brevity:: from numpy.polynomial import Polynomial -+------------------------+------------------------------+---------------------------------------+ -| **How to...** | Legacy (`numpy.poly1d`) | `numpy.polynomial` | -+------------------------+------------------------------+---------------------------------------+ -| Create a | ``p = np.poly1d([1, 2, 3])`` | ``p = Polynomial([3, 2, 1])`` | -| polynomial object | | | -| from coefficients [1]_ | | | -+------------------------+------------------------------+---------------------------------------+ -| Create a polynomial | ``r = np.poly([-1, 1])`` | ``p = Polynomial.fromroots([-1, 1])`` | -| object from roots | ``p = np.poly1d(r)`` | | -+------------------------+------------------------------+---------------------------------------+ -| Fit a polynomial of | | | -| degree ``deg`` to data | ``np.polyfit(x, y, deg)`` | ``Polynomial.fit(x, y, deg)`` | -+------------------------+------------------------------+---------------------------------------+ - ++------------------------+----------------------------------------+---------------------------------------+ +| **How to...** | Legacy (`numpy.poly1d`) | `numpy.polynomial` | ++------------------------+----------------------------------------+---------------------------------------+ +| Create a | ``p = np.poly1d([1, 2, 3])`` | ``p = Polynomial([3, 2, 1])`` | +| polynomial object | | | +| from coefficients [1]_ | | | ++------------------------+----------------------------------------+---------------------------------------+ +| Create a polynomial | ``r = np.poly([-1, 1])`` | ``p = Polynomial.fromroots([-1, 1])`` | +| object from roots | ``p = np.poly1d(r)`` | | ++------------------------+----------------------------------------+---------------------------------------+ +| Fit a polynomial of | | | +| degree ``deg`` to data | ``np.polyfit(x, y, deg)`` | ``Polynomial.fit(x, y, deg)`` | ++------------------------+----------------------------------------+---------------------------------------+ +| Evaluate a polynomial | ``p(2.0)`` or | ``p(2.0)`` or ``polyval(2.0, p.coef)``| +| at a point [2]_ | ``np.polyval([1, 2, 3], 2.0)`` | (use ``p.convert().coef`` after fit) | ++------------------------+----------------------------------------+---------------------------------------+ .. [1] Note the reversed ordering of the coefficients +.. [2] When evaluating polynomials created with ``fit()``, use ``p(x)`` or + ``polyval(x, p.convert().coef)`` to handle domain/window scaling correctly. + Transition Guide ~~~~~~~~~~~~~~~~ @@ -188,3 +193,4 @@ Documentation for legacy polynomials :maxdepth: 2 routines.polynomials.poly1d + \ No newline at end of file diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 6ec0dc58a1de..220306693cf9 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -717,6 +717,10 @@ def polyval(x, c, tensor=True): ----- The evaluation uses Horner's method. + When using coefficients from polynomials created with ``Polynomial.fit()``, + use ``p(x)`` or ``polyval(x, p.convert().coef)`` to handle domain/window + scaling correctly, not ``polyval(x, p.coef)``. + Examples -------- >>> import numpy as np From 3958757f49799ef4f0b4d8926178057d4bc60db6 Mon Sep 17 00:00:00 2001 From: Aaron Kollasch Date: Wed, 15 Oct 2025 11:35:13 -0400 Subject: [PATCH 0622/1018] BUG: Fix np.strings.slice if stop=None or start and stop >= len (#29944) Python treats `slice(-1)` differently from `slice(-1, None)`: The first is interpreted as `slice(None, -1, None)`, while the second becomes `slice(-1, None, None)`, according to the logic in `slice_new`. However, `np.strings.slice` treats these identically, as it cannot distinguish unset arguments from arguments set to None. This makes it impossible to get the last characters of each string, for example: ```python >>> a = np.array(['hello', 'world']) >>> np.strings.slice(a, -2, None) # should return last two characters array(['hel', 'wor'], dtype='>> a = np.array(['hello', 'world']) >>> np.strings.slice(a, -2, None) # returns last characters as expected array(['lo', 'ld'], dtype='>> np.strings.slice(a, -2) # original behavior preserved if no stop array(['hel', 'wor'], dtype='>> np.__version__ '2.3.3' >>> a = np.array(['hello', 'world'], dtype="T") >>> np.strings.slice(a, 5, 7) Traceback (most recent call last): File "", line 1, in File "numpy-dev/lib/python3.12/site-packages/numpy/_core/strings.py", line 1823, in slice return _slice(a, start, stop, step) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ MemoryError: Failed to allocate string in slice ``` This causes either a MemoryError or kills the process with code 251. * BUG: Fix np.strings.slice when start and stop >= len Allows commented test_slice conditions to be uncommented. --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 13 +++++++----- numpy/_core/strings.py | 10 ++++++++-- numpy/_core/tests/test_strings.py | 21 ++++++++++++++++++++ 3 files changed, 37 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index ca574f605c1a..e00612a369f0 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -2264,10 +2264,13 @@ slice_strided_loop(PyArrayMethod_Context *context, char *const data[], if (step == 1) { // step == 1 is the easy case, we can just use memcpy - npy_intp outsize = ((size_t)stop < num_codepoints - ? codepoint_offsets[stop] - : (unsigned char *)is.buf + is.size) - - codepoint_offsets[start]; + unsigned char *start_bounded = ((size_t)start < num_codepoints + ? codepoint_offsets[start] + : (unsigned char *)is.buf + is.size); + unsigned char *stop_bounded = ((size_t)stop < num_codepoints + ? codepoint_offsets[stop] + : (unsigned char *)is.buf + is.size); + npy_intp outsize = stop_bounded - start_bounded; if (load_new_string(ops, &os, outsize, oallocator, "slice") < 0) { goto fail; @@ -2276,7 +2279,7 @@ slice_strided_loop(PyArrayMethod_Context *context, char *const data[], /* explicitly discard const; initializing new buffer */ char *buf = (char *)os.buf; - memcpy(buf, codepoint_offsets[start], outsize); + memcpy(buf, start_bounded, outsize); } else { // handle step != 1 diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 4d56f1e0c779..e9fa7f58e3ea 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -1727,7 +1727,7 @@ def translate(a, table, deletechars=None): ) @set_module("numpy.strings") -def slice(a, start=None, stop=None, step=None, /): +def slice(a, start=None, stop=np._NoValue, step=None, /): """ Slice the strings in `a` by slices specified by `start`, `stop`, `step`. Like in the regular Python `slice` object, if only `start` is @@ -1760,6 +1760,9 @@ def slice(a, start=None, stop=None, step=None, /): >>> np.strings.slice(a, 2) array(['he', 'wo'], dtype='>> np.strings.slice(a, 2, None) + array(['llo', 'rld'], dtype='>> np.strings.slice(a, 1, 5, 2) array(['el', 'ol'], dtype='>> np.strings.slice(b, -2) array(['hello wor', 'γεια σου κόσ', '你好', '👋'], dtype=StringDType()) + >>> np.strings.slice(b, -2, None) + array(['ld', 'με', '世界', ' 🌍'], dtype=StringDType()) + >>> np.strings.slice(b, [3, -10, 2, -3], [-1, -2, -1, 3]) array(['lo worl', ' σου κόσ', '世', '👋 🌍'], dtype=StringDType()) @@ -1785,7 +1791,7 @@ def slice(a, start=None, stop=None, step=None, /): """ # Just like in the construction of a regular slice object, if only start # is specified then start will become stop, see logic in slice_new. - if stop is None: + if stop is np._NoValue: stop = start start = None diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 939b7fbd465d..e5c3bb87c773 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -975,17 +975,38 @@ def test_rpartition(self, buf, sep, res1, res2, res3, dt): @pytest.mark.parametrize("args", [ (None,), + (None, None), + (None, None, -1), (0,), + (0, None), + (0, None, -1), (1,), + (1, None), + (1, None, -1), (3,), + (3, None), (5,), + (5, None), + (5, 5), + (5, 5, -1), (6,), # test index past the end + (6, None), + (6, None, -1), + (6, 7), # test stop index past the end (-1,), + (-1, None), + (-1, None, -1), (-3,), + (-3, None), ([3, 4],), + ([3, 4], None), ([2, 4],), ([-3, 5],), + ([-3, 5], None), + ([-3, 5], None, -1), ([0, -5],), + ([0, -5], None), + ([0, -5], None, -1), (1, 4), (-3, 5), (None, -1), From 55b6234e98d7f9e4fe50e776dce8141d9620accb Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 19:54:23 +0200 Subject: [PATCH 0623/1018] TYP: update ``ScalarType`` type --- numpy/_core/numerictypes.pyi | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index beec14079b48..46bb6a379861 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -168,28 +168,28 @@ ScalarType: Final[ type[str], type[memoryview[Any]], type[np.bool], - type[csingle], - type[cdouble], - type[clongdouble], - type[half], - type[single], - type[double], - type[longdouble], - type[byte], - type[short], - type[intc], - type[long], - type[longlong], - type[timedelta64], + type[complex64], + type[complex128], + type[complex128 | complex192 | complex256], + type[float16], + type[float32], + type[float64], + type[float64 | float96 | float128], + type[int8], + type[int16], + type[int32], + type[int32 | int64], + type[int64], type[datetime64], + type[timedelta64], type[object_], type[bytes_], type[str_], - type[ubyte], - type[ushort], - type[uintc], - type[ulong], - type[ulonglong], + type[uint8], + type[uint16], + type[uint32], + type[uint32 | uint64], + type[uint64], type[void], ] ] = ... From 95b4e135dea7f2196995310d92e03533a3ff553e Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 20:05:48 +0200 Subject: [PATCH 0624/1018] TYP: fix and expand type-tests for ``ScalarType`` --- numpy/typing/tests/data/reveal/numerictypes.pyi | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index aa5cf10410d4..75d108ce5a0f 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -4,8 +4,9 @@ import numpy as np assert_type(np.ScalarType[0], type[int]) assert_type(np.ScalarType[3], type[bool]) -assert_type(np.ScalarType[8], type[np.csingle]) -assert_type(np.ScalarType[10], type[np.clongdouble]) +assert_type(np.ScalarType[8], type[np.complex64]) +assert_type(np.ScalarType[9], type[np.complex128]) +assert_type(np.ScalarType[-1], type[np.void]) assert_type(np.bool_(object()), np.bool) assert_type(np.typecodes["Character"], Literal["c"]) From 955647d43fcced237a332b058dd835b7e61b7d9d Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 20:16:36 +0200 Subject: [PATCH 0625/1018] TYP: expand ``TypedDict`` kwargs in ``full`` to appease stubtest --- numpy/_core/numeric.pyi | 48 ++++++++++++++++++++++++++++++----------- 1 file changed, 36 insertions(+), 12 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index f9ce1ce3a4a3..7e205bd43cda 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -739,7 +739,9 @@ def full( fill_value: _ScalarT, dtype: None = None, order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[tuple[int], _ScalarT]: ... @overload def full( @@ -747,7 +749,9 @@ def full( fill_value: Any, dtype: _DTypeT | _SupportsDType[_DTypeT], order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> np.ndarray[tuple[int], _DTypeT]: ... @overload def full( @@ -755,7 +759,9 @@ def full( fill_value: Any, dtype: type[_ScalarT], order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[tuple[int], _ScalarT]: ... @overload def full( @@ -763,7 +769,9 @@ def full( fill_value: Any, dtype: DTypeLike | None = None, order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[tuple[int], Any]: ... # known shape @overload @@ -772,7 +780,9 @@ def full( fill_value: _ScalarT, dtype: None = None, order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[_AnyShapeT, _ScalarT]: ... @overload def full( @@ -780,7 +790,9 @@ def full( fill_value: Any, dtype: _DTypeT | _SupportsDType[_DTypeT], order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> np.ndarray[_AnyShapeT, _DTypeT]: ... @overload def full( @@ -788,7 +800,9 @@ def full( fill_value: Any, dtype: type[_ScalarT], order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[_AnyShapeT, _ScalarT]: ... @overload def full( @@ -796,7 +810,9 @@ def full( fill_value: Any, dtype: DTypeLike | None = None, order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[_AnyShapeT, Any]: ... # unknown shape @overload @@ -805,7 +821,9 @@ def full( fill_value: _ScalarT, dtype: None = None, order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def full( @@ -813,7 +831,9 @@ def full( fill_value: Any, dtype: _DTypeT | _SupportsDType[_DTypeT], order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> np.ndarray[Any, _DTypeT]: ... @overload def full( @@ -821,7 +841,9 @@ def full( fill_value: Any, dtype: type[_ScalarT], order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def full( @@ -829,7 +851,9 @@ def full( fill_value: Any, dtype: DTypeLike | None = None, order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload From e0398755df41852b660b6c8e98e3a9397a45325e Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 20:24:36 +0200 Subject: [PATCH 0626/1018] TYP: remove unused imports in ``_core.numeric`` --- numpy/_core/numeric.pyi | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 7e205bd43cda..314bce708306 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -11,7 +11,6 @@ from typing import ( TypeAlias, TypeGuard, TypeVar, - Unpack, overload, ) @@ -127,7 +126,6 @@ from .multiarray import ( WRAP as WRAP, _Array, _ConstructorEmpty, - _KwargsEmpty, arange, array, asanyarray, From e1a484d3cb7b87def91272b87a52c0b2a0b7c680 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 20:37:12 +0200 Subject: [PATCH 0627/1018] TYP: remove deprecated ``interpolation`` kwarg in ``percentile`` and ``quantile`` Co-authored-by: Matti Picus --- numpy/lib/_function_base_impl.pyi | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index a20032e047c6..47dd0749f07f 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -626,7 +626,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> floating: ... @overload def percentile( @@ -639,7 +638,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> complexfloating: ... @overload def percentile( @@ -652,7 +650,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> timedelta64: ... @overload def percentile( @@ -665,7 +662,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> datetime64: ... @overload def percentile( @@ -678,7 +674,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> Any: ... @overload def percentile( @@ -691,7 +686,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[floating]: ... @overload def percentile( @@ -704,7 +698,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[complexfloating]: ... @overload def percentile( @@ -717,7 +710,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[timedelta64]: ... @overload def percentile( @@ -730,7 +722,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[datetime64]: ... @overload def percentile( @@ -743,7 +734,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[object_]: ... @overload def percentile( @@ -756,7 +746,6 @@ def percentile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> Any: ... @overload def percentile( @@ -769,7 +758,6 @@ def percentile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> _ArrayT: ... @overload def percentile( @@ -782,7 +770,6 @@ def percentile( method: _MethodKind = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> _ArrayT: ... # NOTE: keep in sync with `percentile` @@ -797,7 +784,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> floating: ... @overload def quantile( @@ -810,7 +796,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> complexfloating: ... @overload def quantile( @@ -823,7 +808,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> timedelta64: ... @overload def quantile( @@ -836,7 +820,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> datetime64: ... @overload def quantile( @@ -849,7 +832,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> Any: ... @overload def quantile( @@ -862,7 +844,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[floating]: ... @overload def quantile( @@ -875,7 +856,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[complexfloating]: ... @overload def quantile( @@ -888,7 +868,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[timedelta64]: ... @overload def quantile( @@ -901,7 +880,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[datetime64]: ... @overload def quantile( @@ -914,7 +892,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[object_]: ... @overload def quantile( @@ -927,7 +904,6 @@ def quantile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> Any: ... @overload def quantile( @@ -940,7 +916,6 @@ def quantile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> _ArrayT: ... @overload def quantile( @@ -953,7 +928,6 @@ def quantile( method: _MethodKind = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> _ArrayT: ... _ScalarT_fm = TypeVar( From aedb6922d33fcb62c715e438234732b9798b352d Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 20:57:36 +0200 Subject: [PATCH 0628/1018] DEP: Remove expired ``interpolation`` kwarg from ``percentile`` and ``quantile`` --- numpy/lib/_function_base_impl.py | 51 ++++---------------------------- 1 file changed, 6 insertions(+), 45 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 4adb0c187395..bdc32574e8d1 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4090,8 +4090,7 @@ def _median(a, axis=None, out=None, overwrite_input=False): def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, - interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -4104,8 +4103,7 @@ def percentile(a, method="linear", keepdims=False, *, - weights=None, - interpolation=None): + weights=None): """ Compute the q-th percentile of the data along the specified axis. @@ -4175,11 +4173,6 @@ def percentile(a, .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- percentile : scalar or ndarray @@ -4275,10 +4268,6 @@ def percentile(a, The American Statistician, 50(4), pp. 361-365, 1996 """ - if interpolation is not None: - method = _check_interpolation_as_method( - method, interpolation, "percentile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") @@ -4305,8 +4294,7 @@ def percentile(a, def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, - interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -4319,8 +4307,7 @@ def quantile(a, method="linear", keepdims=False, *, - weights=None, - interpolation=None): + weights=None): """ Compute the q-th quantile of the data along the specified axis. @@ -4390,11 +4377,6 @@ def quantile(a, .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- quantile : scalar or ndarray @@ -4536,10 +4518,6 @@ def quantile(a, The American Statistician, 50(4), pp. 361-365, 1996 """ - if interpolation is not None: - method = _check_interpolation_as_method( - method, interpolation, "quantile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") @@ -4599,23 +4577,6 @@ def _quantile_is_valid(q): return True -def _check_interpolation_as_method(method, interpolation, fname): - # Deprecated NumPy 1.22, 2021-11-08 - warnings.warn( - f"the `interpolation=` argument to {fname} was renamed to " - "`method=`, which has additional options.\n" - "Users of the modes 'nearest', 'lower', 'higher', or " - "'midpoint' are encouraged to review the method they used. " - "(Deprecated NumPy 1.22)", - DeprecationWarning, stacklevel=4) - if method != "linear": - # sanity check, we assume this basically never happens - raise TypeError( - "You shall not pass both `method` and `interpolation`!\n" - "(`interpolation` is Deprecated in favor of `method`)") - return interpolation - - def _compute_virtual_index(n, quantiles, alpha: float, beta: float): """ Compute the floating point indexes of an array for the linear @@ -4651,7 +4612,7 @@ def _get_gamma(virtual_indexes, previous_indexes, method): sample. previous_indexes : array_like The floor values of virtual_indexes. - interpolation : dict + method : dict The interpolation method chosen, which may have a specific rule modifying gamma. @@ -4809,7 +4770,7 @@ def _quantile( These methods are extended to this function using _ureduce See nanpercentile for parameter usage It computes the quantiles of the array for the given axis. - A linear interpolation is performed based on the `interpolation`. + A linear interpolation is performed based on the `method`. By default, the method is "linear" where alpha == beta == 1 which performs the 7th method of Hyndman&Fan. From 514fc6c3053f5f173dfb4042adf08ea26facaf35 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 20:57:50 +0200 Subject: [PATCH 0629/1018] DEP: Remove expired ``interpolation`` kwarg from ``nanpercentile`` and ``nanquantile`` --- numpy/lib/_nanfunctions_impl.py | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index aec60d484ba4..f030d74c5c11 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -1219,7 +1219,7 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu def _nanpercentile_dispatcher( a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -1234,7 +1234,6 @@ def nanpercentile( keepdims=np._NoValue, *, weights=None, - interpolation=None, ): """ Compute the qth percentile of the data along the specified axis, @@ -1313,11 +1312,6 @@ def nanpercentile( .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- percentile : scalar or ndarray @@ -1379,10 +1373,6 @@ def nanpercentile( The American Statistician, 50(4), pp. 361-365, 1996 """ - if interpolation is not None: - method = fnb._check_interpolation_as_method( - method, interpolation, "nanpercentile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") @@ -1407,8 +1397,7 @@ def nanpercentile( def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, - interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -1423,7 +1412,6 @@ def nanquantile( keepdims=np._NoValue, *, weights=None, - interpolation=None, ): """ Compute the qth quantile of the data along the specified axis, @@ -1500,11 +1488,6 @@ def nanquantile( .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- quantile : scalar or ndarray @@ -1565,11 +1548,6 @@ def nanquantile( The American Statistician, 50(4), pp. 361-365, 1996 """ - - if interpolation is not None: - method = fnb._check_interpolation_as_method( - method, interpolation, "nanquantile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") From c162a27759c72538b5d054db0ad2c3c618100b4e Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 21:00:24 +0200 Subject: [PATCH 0630/1018] DEP: Remove ``TestQuantileInterpolationDeprecation`` --- numpy/_core/tests/test_deprecations.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 27e98a563930..f76c68a531a3 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -215,24 +215,6 @@ def test_not_deprecated(self, name: str) -> None: self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) -class TestQuantileInterpolationDeprecation(_DeprecationTestCase): - # Deprecated 2021-11-08, NumPy 1.22 - @pytest.mark.parametrize("func", - [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) - def test_deprecated(self, func): - self.assert_deprecated( - lambda: func([0., 1.], 0., interpolation="linear")) - self.assert_deprecated( - lambda: func([0., 1.], 0., interpolation="nearest")) - - @pytest.mark.parametrize("func", - [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) - def test_both_passed(self, func): - with pytest.warns(DeprecationWarning): - with pytest.raises(TypeError): - func([0., 1.], 0., interpolation="nearest", method="nearest") - - class TestPyIntConversion(_DeprecationTestCase): message = r".*stop allowing conversion of out-of-bound.*" From 37b02ca58b5ce3cb1a78007e4c7eca0dc421163d Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 21:14:17 +0200 Subject: [PATCH 0631/1018] DOC: add release note for #29973 --- doc/release/upcoming_changes/29973.expired.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 doc/release/upcoming_changes/29973.expired.rst diff --git a/doc/release/upcoming_changes/29973.expired.rst b/doc/release/upcoming_changes/29973.expired.rst new file mode 100644 index 000000000000..0dda176fc360 --- /dev/null +++ b/doc/release/upcoming_changes/29973.expired.rst @@ -0,0 +1,12 @@ +Remove ``interpolation`` parameter from quantile and percentile functions +--------------------------------------------------------------------------- + +The ``interpolation`` parameter was deprecated in NumPy 1.22.0 and has been +removed from the following functions: + +* ``numpy.percentile`` +* ``numpy.nanpercentile`` +* ``numpy.quantile`` +* ``numpy.nanquantile`` + +Use the ``method`` parameter instead. From 58481143ea65a59fec17ff80af714dfafd1d1f04 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 21:15:14 +0200 Subject: [PATCH 0632/1018] DOC: fix typo in release note for #29909 --- doc/release/upcoming_changes/29909.expired.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/release/upcoming_changes/29909.expired.rst b/doc/release/upcoming_changes/29909.expired.rst index e3cb557e2c28..6a2ee4f53c09 100644 --- a/doc/release/upcoming_changes/29909.expired.rst +++ b/doc/release/upcoming_changes/29909.expired.rst @@ -1,5 +1,5 @@ -Remove numpy.linalg.linag and numpy.fft.helper ----------------------------------------------- +Remove numpy.linalg.linalg and numpy.fft.helper +----------------------------------------------- The following were deprecated in NumPy 2.0 and have been moved to private modules From c27fb484b7b41f11bfbddc23b782147df8d7b425 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 21:17:25 +0200 Subject: [PATCH 0633/1018] DOC: fix rst formatting in release note for #29973 --- doc/release/upcoming_changes/29973.expired.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/29973.expired.rst b/doc/release/upcoming_changes/29973.expired.rst index 0dda176fc360..5b51cb7cf428 100644 --- a/doc/release/upcoming_changes/29973.expired.rst +++ b/doc/release/upcoming_changes/29973.expired.rst @@ -1,5 +1,5 @@ Remove ``interpolation`` parameter from quantile and percentile functions ---------------------------------------------------------------------------- +------------------------------------------------------------------------- The ``interpolation`` parameter was deprecated in NumPy 1.22.0 and has been removed from the following functions: From 7d276dea991b0c8f72502d5856fe9ea31e7e9cb0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 10 Oct 2025 02:32:29 +0200 Subject: [PATCH 0634/1018] TYP: move ``matrix`` from ``__init__.pyi`` to ``matrixlib/defmatrix.pyi`` --- numpy/__init__.pyi | 187 +---------------------------- numpy/matrixlib/__init__.pyi | 4 +- numpy/matrixlib/defmatrix.pyi | 217 ++++++++++++++++++++++++++++++++-- 3 files changed, 212 insertions(+), 196 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2bf4eb16d1d9..b03533f77b96 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -606,6 +606,7 @@ from numpy.lib._utils_impl import ( from numpy.matrixlib import ( asmatrix, bmat, + matrix, ) __all__ = [ # noqa: RUF022 @@ -5986,192 +5987,6 @@ class poly1d: k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = 0, ) -> poly1d: ... -class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): - __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] - - def __new__( - subtype, # pyright: ignore[reportSelfClsParameterName] - data: ArrayLike, - dtype: DTypeLike | None = None, - copy: builtins.bool = True, - ) -> matrix[_2D, Incomplete]: ... - def __array_finalize__(self, obj: object) -> None: ... - - @overload # type: ignore[override] - def __getitem__( - self, key: SupportsIndex | _ArrayLikeInt_co | tuple[SupportsIndex | _ArrayLikeInt_co, ...], / - ) -> Incomplete: ... - @overload - def __getitem__(self, key: _ToIndices, /) -> matrix[_2D, _DTypeT_co]: ... - @overload - def __getitem__(self: matrix[Any, dtype[void]], key: str, /) -> matrix[_2D, dtype]: ... - @overload - def __getitem__(self: matrix[Any, dtype[void]], key: list[str], /) -> matrix[_2DShapeT_co, _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # - def __mul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __rmul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __imul__(self, other: ArrayLike, /) -> Self: ... - - # - def __pow__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __rpow__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __ipow__(self, other: ArrayLike, /) -> Self: ... # type: ignore[override] - - # keep in sync with `prod` and `mean` - @overload # type: ignore[override] - def sum(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... - @overload - def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... - @overload - def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `sum` and `mean` - @overload # type: ignore[override] - def prod(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... - @overload - def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... - @overload - def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `sum` and `prod` - @overload # type: ignore[override] - def mean(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... - @overload - def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... - @overload - def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `var` - @overload # type: ignore[override] - def std(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... - @overload - def std( - self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0 - ) -> matrix[_2D, Incomplete]: ... - @overload - def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... - @overload - def std( # pyright: ignore[reportIncompatibleMethodOverride] - self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 - ) -> _ArrayT: ... - - # keep in sync with `std` - @overload # type: ignore[override] - def var(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... - @overload - def var( - self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0 - ) -> matrix[_2D, Incomplete]: ... - @overload - def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... - @overload - def var( # pyright: ignore[reportIncompatibleMethodOverride] - self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 - ) -> _ArrayT: ... - - # keep in sync with `all` - @overload # type: ignore[override] - def any(self, axis: None = None, out: None = None) -> np.bool: ... - @overload - def any(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ... - @overload - def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `any` - @overload # type: ignore[override] - def all(self, axis: None = None, out: None = None) -> np.bool: ... - @overload - def all(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ... - @overload - def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `min` and `ptp` - @overload # type: ignore[override] - def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... - @overload - def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... - @overload - def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `max` and `ptp` - @overload # type: ignore[override] - def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... - @overload - def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... - @overload - def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `max` and `min` - @overload - def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... - @overload - def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... - @overload - def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `argmin` - @overload # type: ignore[override] - def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ... - @overload - def argmax(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ... - @overload - def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... - @overload - def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `argmax` - @overload # type: ignore[override] - def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ... - @overload - def argmin(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ... - @overload - def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... - @overload - def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - #the second overload handles the (rare) case that the matrix is not 2-d - @overload - def tolist(self: matrix[_2D, dtype[generic[_T]]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] - @overload - def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # these three methods will at least return a `2-d` array of shape (1, n) - def squeeze(self, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... - def ravel(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def flatten(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - - # matrix.T is inherited from _ScalarOrArrayCommon - def getT(self) -> Self: ... - @property - def I(self) -> matrix[_2D, Incomplete]: ... # noqa: E743 - def getI(self) -> matrix[_2D, Incomplete]: ... - @property - def A(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ... - def getA(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ... - @property - def A1(self) -> ndarray[_AnyShape, _DTypeT_co]: ... - def getA1(self) -> ndarray[_AnyShape, _DTypeT_co]: ... - @property - def H(self) -> matrix[_2D, _DTypeT_co]: ... - def getH(self) -> matrix[_2D, _DTypeT_co]: ... - def from_dlpack( x: _SupportsDLPack[None], /, diff --git a/numpy/matrixlib/__init__.pyi b/numpy/matrixlib/__init__.pyi index 56ae8bf4c84b..ad4091d98d06 100644 --- a/numpy/matrixlib/__init__.pyi +++ b/numpy/matrixlib/__init__.pyi @@ -1,5 +1,3 @@ -from numpy import matrix - -from .defmatrix import asmatrix, bmat +from .defmatrix import asmatrix, bmat, matrix __all__ = ["matrix", "bmat", "asmatrix"] diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index 462e8b209d2c..b5345a2d0d7c 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -1,17 +1,220 @@ +from _typeshed import Incomplete from collections.abc import Mapping, Sequence -from typing import Any +from types import EllipsisType +from typing import Any, ClassVar, Literal as L, Self, SupportsIndex, TypeAlias, overload +from typing_extensions import TypeVar -from numpy import matrix -from numpy._typing import ArrayLike, DTypeLike, NDArray +import numpy as np +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLikeInt_co, + _NestedSequence, + _ShapeLike, +) __all__ = ["asmatrix", "bmat", "matrix"] +_T = TypeVar("_T") +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_2D, default=_2D, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) + +_2D: TypeAlias = tuple[int, int] +_Matrix: TypeAlias = matrix[_2D, np.dtype[_ScalarT]] +_ToIndex1: TypeAlias = slice | EllipsisType | NDArray[np.integer | np.bool] | _NestedSequence[int] | None +_ToIndex2: TypeAlias = tuple[_ToIndex1, _ToIndex1 | SupportsIndex] | tuple[_ToIndex1 | SupportsIndex, _ToIndex1] + +class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] + + def __new__( + subtype, # pyright: ignore[reportSelfClsParameterName] + data: ArrayLike, + dtype: DTypeLike | None = None, + copy: bool = True, + ) -> _Matrix[Incomplete]: ... + + # + @overload # type: ignore[override] + def __getitem__( + self, key: SupportsIndex | _ArrayLikeInt_co | tuple[SupportsIndex | _ArrayLikeInt_co, ...], / + ) -> Incomplete: ... + @overload + def __getitem__(self, key: _ToIndex1 | _ToIndex2, /) -> matrix[_2D, _DTypeT_co]: ... + @overload + def __getitem__(self: _Matrix[np.void], key: str, /) -> _Matrix[Incomplete]: ... + @overload + def __getitem__(self: _Matrix[np.void], key: list[str], /) -> matrix[_2D, _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __mul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rmul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __imul__(self, other: ArrayLike, /) -> Self: ... + + # + def __pow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ipow__(self, other: ArrayLike, /) -> Self: ... + + # keep in sync with `prod` and `mean` + @overload # type: ignore[override] + def sum(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `mean` + @overload # type: ignore[override] + def prod(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `prod` + @overload # type: ignore[override] + def mean(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `var` + @overload # type: ignore[override] + def std(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def std(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... + @overload + def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + @overload + def std( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + + # keep in sync with `std` + @overload # type: ignore[override] + def var(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def var(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... + @overload + def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + @overload + def var( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + + # keep in sync with `all` + @overload # type: ignore[override] + def any(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def any(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... + @overload + def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `any` + @overload # type: ignore[override] + def all(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def all(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... + @overload + def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `min` and `ptp` + @overload # type: ignore[override] + def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `ptp` + @overload # type: ignore[override] + def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `min` + @overload + def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmin` + @overload # type: ignore[override] + def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + @overload + def argmax(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... + @overload + def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + @overload + def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmax` + @overload # type: ignore[override] + def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + @overload + def argmin(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... + @overload + def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + @overload + def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # the second overload handles the (rare) case that the matrix is not 2-d + @overload + def tolist(self: _Matrix[np.generic[_T]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] + @overload + def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # these three methods will at least return a `2-d` array of shape (1, n) + def squeeze(self, /, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... + def ravel(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def flatten(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # matrix.T is inherited from _ScalarOrArrayCommon + def getT(self) -> Self: ... + @property + def I(self) -> _Matrix[Incomplete]: ... # noqa: E743 + def getI(self) -> _Matrix[Incomplete]: ... + @property + def A(self) -> np.ndarray[_2D, _DTypeT_co]: ... + def getA(self) -> np.ndarray[_2D, _DTypeT_co]: ... + @property + def A1(self) -> np.ndarray[_AnyShape, _DTypeT_co]: ... + def getA1(self) -> np.ndarray[_AnyShape, _DTypeT_co]: ... + @property + def H(self) -> matrix[_2D, _DTypeT_co]: ... + def getH(self) -> matrix[_2D, _DTypeT_co]: ... + def bmat( obj: str | Sequence[ArrayLike] | NDArray[Any], ldict: Mapping[str, Any] | None = None, gdict: Mapping[str, Any] | None = None, -) -> matrix[tuple[int, int], Any]: ... +) -> _Matrix[Incomplete]: ... -def asmatrix( - data: ArrayLike, dtype: DTypeLike | None = None -) -> matrix[tuple[int, int], Any]: ... +def asmatrix(data: ArrayLike, dtype: DTypeLike | None = None) -> _Matrix[Incomplete]: ... From b648a7cfbb41d4368dcc2566de1593afd0570041 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 10 Oct 2025 02:47:38 +0200 Subject: [PATCH 0635/1018] TYP: update ``matrix`` type-tests --- numpy/typing/tests/data/reveal/matrix.pyi | 28 +++++++++---------- .../tests/data/reveal/ndarray_conversion.pyi | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/numpy/typing/tests/data/reveal/matrix.pyi b/numpy/typing/tests/data/reveal/matrix.pyi index 1a7285d428cc..3a32b3d394f0 100644 --- a/numpy/typing/tests/data/reveal/matrix.pyi +++ b/numpy/typing/tests/data/reveal/matrix.pyi @@ -9,11 +9,11 @@ mat: np.matrix[_Shape2D, np.dtype[np.int64]] ar_f8: npt.NDArray[np.float64] ar_ip: npt.NDArray[np.intp] -assert_type(mat * 5, np.matrix[_Shape2D, Any]) -assert_type(5 * mat, np.matrix[_Shape2D, Any]) +assert_type(mat * 5, np.matrix) +assert_type(5 * mat, np.matrix) mat *= 5 -assert_type(mat**5, np.matrix[_Shape2D, Any]) +assert_type(mat**5, np.matrix) mat **= 5 assert_type(mat.sum(), Any) @@ -29,11 +29,11 @@ assert_type(mat.argmax(), np.intp) assert_type(mat.argmin(), np.intp) assert_type(mat.ptp(), np.int64) -assert_type(mat.sum(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.mean(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.std(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.var(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.prod(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.sum(axis=0), np.matrix) +assert_type(mat.mean(axis=0), np.matrix) +assert_type(mat.std(axis=0), np.matrix) +assert_type(mat.var(axis=0), np.matrix) +assert_type(mat.prod(axis=0), np.matrix) assert_type(mat.any(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) assert_type(mat.all(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) assert_type(mat.max(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) @@ -56,18 +56,18 @@ assert_type(mat.argmin(out=ar_ip), npt.NDArray[np.intp]) assert_type(mat.ptp(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.T, np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(mat.I, np.matrix[_Shape2D, Any]) +assert_type(mat.I, np.matrix) assert_type(mat.A, np.ndarray[_Shape2D, np.dtype[np.int64]]) assert_type(mat.A1, npt.NDArray[np.int64]) assert_type(mat.H, np.matrix[_Shape2D, np.dtype[np.int64]]) assert_type(mat.getT(), np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(mat.getI(), np.matrix[_Shape2D, Any]) +assert_type(mat.getI(), np.matrix) assert_type(mat.getA(), np.ndarray[_Shape2D, np.dtype[np.int64]]) assert_type(mat.getA1(), npt.NDArray[np.int64]) assert_type(mat.getH(), np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(np.bmat(ar_f8), np.matrix[_Shape2D, Any]) -assert_type(np.bmat([[0, 1, 2]]), np.matrix[_Shape2D, Any]) -assert_type(np.bmat("mat"), np.matrix[_Shape2D, Any]) +assert_type(np.bmat(ar_f8), np.matrix) +assert_type(np.bmat([[0, 1, 2]]), np.matrix) +assert_type(np.bmat("mat"), np.matrix) -assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix[_Shape2D, Any]) +assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix) diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index 4f7d0c28b747..2af616440c5e 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -71,7 +71,7 @@ assert_type(i0_nd.copy("C"), npt.NDArray[np.int_]) assert_type(i0_nd.view(), npt.NDArray[np.int_]) assert_type(i0_nd.view(np.float64), npt.NDArray[np.float64]) assert_type(i0_nd.view(float), npt.NDArray[Any]) -assert_type(i0_nd.view(np.float64, np.matrix), np.matrix[tuple[int, int], Any]) +assert_type(i0_nd.view(np.float64, np.matrix), np.matrix) # getfield assert_type(i0_nd.getfield("float"), npt.NDArray[Any]) From 125b8771ad8ca1c823c2eee82f7308549c0011d3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 22:03:54 +0200 Subject: [PATCH 0636/1018] TYP: fix ``random.Generator.shuffle`` input type --- numpy/random/_generator.pyi | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index b81f5d322416..1f7c342394e1 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,4 +1,4 @@ -from collections.abc import Callable +from collections.abc import Callable, MutableSequence from typing import Any, Literal, TypeAlias, TypeVar, overload import numpy as np @@ -850,7 +850,12 @@ class Generator: def permuted( self, x: ArrayLike, *, axis: int | None = None, out: NDArray[Any] | None = None ) -> NDArray[Any]: ... - def shuffle(self, x: ArrayLike, axis: int = 0) -> None: ... + + # axis must be 0 for MutableSequence + @overload + def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ... + @overload + def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ... def default_rng( seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None From 9b040fc582a371626f54a1c4f00b9d69500e8f7d Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 01:10:59 +0200 Subject: [PATCH 0637/1018] DEP: remove ``in1d`` --- numpy/__init__.py | 1 - numpy/__init__.pyi | 5 +- numpy/_core/tests/test_deprecations.py | 3 +- numpy/conftest.py | 1 - numpy/lib/_arraysetops_impl.py | 114 +------------------------ numpy/lib/_arraysetops_impl.pyi | 14 +-- numpy/matlib.pyi | 1 - 7 files changed, 8 insertions(+), 131 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index 7bfc12119694..4e3119238c0a 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -459,7 +459,6 @@ from .lib._arraypad_impl import pad from .lib._arraysetops_impl import ( ediff1d, - in1d, intersect1d, isin, setdiff1d, diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2bf4eb16d1d9..7ba72a7fc96b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -417,9 +417,8 @@ from numpy.lib._arraypad_impl import ( pad, ) -from numpy.lib._arraysetops_impl import ( # type: ignore[deprecated] +from numpy.lib._arraysetops_impl import ( ediff1d, - in1d, intersect1d, isin, setdiff1d, @@ -700,7 +699,7 @@ __all__ = [ # noqa: RUF022 "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", "real_if_close", "typename", "mintypecode", "common_type", # lib._arraysetops_impl.__all__ - "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", + "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values", # lib._ufunclike_impl.__all__ "fix", "isneginf", "isposinf", diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 27e98a563930..87892e29e93e 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -309,7 +309,7 @@ def test_deprecated_np_lib_math(self): class TestLibImports(_DeprecationTestCase): # Deprecated in Numpy 1.26.0, 2023-09 def test_lib_functions_deprecation_call(self): - from numpy import in1d, row_stack, trapz + from numpy import row_stack, trapz from numpy._core.numerictypes import maximum_sctype from numpy.lib._function_base_impl import disp from numpy.lib._npyio_impl import recfromcsv, recfromtxt @@ -328,7 +328,6 @@ def test_lib_functions_deprecation_call(self): self.assert_deprecated(get_array_wrap) self.assert_deprecated(lambda: maximum_sctype(int)) - self.assert_deprecated(lambda: in1d([1], [1])) self.assert_deprecated(lambda: row_stack([[]])) self.assert_deprecated(lambda: trapz([1], [1])) self.assert_deprecated(lambda: np.chararray) diff --git a/numpy/conftest.py b/numpy/conftest.py index 71ece01cc3b5..52f2a75b9df0 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -163,7 +163,6 @@ def warnings_errors_and_rng(test=None): "This function is deprecated.", # random_integers "Data type alias 'a'", # numpy.rec.fromfile "Arrays of 2-dimensional vectors", # matlib.cross - "`in1d` is deprecated", "NumPy warning suppression and assertion utilities are deprecated." ] msg = "|".join(msgs) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index f85a3d55aae9..5d521b1fba60 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -15,7 +15,6 @@ """ import functools -import warnings from typing import NamedTuple import numpy as np @@ -28,7 +27,7 @@ __all__ = [ - "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", + "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values" ] @@ -804,112 +803,7 @@ def setxor1d(ar1, ar2, assume_unique=False): return aux[flag[1:] & flag[:-1]] -def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *, - kind=None): - return (ar1, ar2) - - -@array_function_dispatch(_in1d_dispatcher) -def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): - """ - Test whether each element of a 1-D array is also present in a second array. - - .. deprecated:: 2.0 - Use :func:`isin` instead of `in1d` for new code. - - Returns a boolean array the same length as `ar1` that is True - where an element of `ar1` is in `ar2` and False otherwise. - - Parameters - ---------- - ar1 : (M,) array_like - Input array. - ar2 : array_like - The values against which to test each value of `ar1`. - assume_unique : bool, optional - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - invert : bool, optional - If True, the values in the returned array are inverted (that is, - False where an element of `ar1` is in `ar2` and True otherwise). - Default is False. ``np.in1d(a, b, invert=True)`` is equivalent - to (but is faster than) ``np.invert(in1d(a, b))``. - kind : {None, 'sort', 'table'}, optional - The algorithm to use. This will not affect the final result, - but will affect the speed and memory use. The default, None, - will select automatically based on memory considerations. - - * If 'sort', will use a mergesort-based approach. This will have - a memory usage of roughly 6 times the sum of the sizes of - `ar1` and `ar2`, not accounting for size of dtypes. - * If 'table', will use a lookup table approach similar - to a counting sort. This is only available for boolean and - integer arrays. This will have a memory usage of the - size of `ar1` plus the max-min value of `ar2`. `assume_unique` - has no effect when the 'table' option is used. - * If None, will automatically choose 'table' if - the required memory allocation is less than or equal to - 6 times the sum of the sizes of `ar1` and `ar2`, - otherwise will use 'sort'. This is done to not use - a large amount of memory by default, even though - 'table' may be faster in most cases. If 'table' is chosen, - `assume_unique` will have no effect. - - Returns - ------- - in1d : (M,) ndarray, bool - The values `ar1[in1d]` are in `ar2`. - - See Also - -------- - isin : Version of this function that preserves the - shape of ar1. - - Notes - ----- - `in1d` can be considered as an element-wise function version of the - python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly - equivalent to ``np.array([item in b for item in a])``. - However, this idea fails if `ar2` is a set, or similar (non-sequence) - container: As ``ar2`` is converted to an array, in those cases - ``asarray(ar2)`` is an object array rather than the expected array of - contained values. - - Using ``kind='table'`` tends to be faster than `kind='sort'` if the - following relationship is true: - ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``, - but may use greater memory. The default value for `kind` will - be automatically selected based only on memory usage, so one may - manually set ``kind='table'`` if memory constraints can be relaxed. - - Examples - -------- - >>> import numpy as np - >>> test = np.array([0, 1, 2, 5, 0]) - >>> states = [0, 2] - >>> mask = np.in1d(test, states) - >>> mask - array([ True, False, True, False, True]) - >>> test[mask] - array([0, 2, 0]) - >>> mask = np.in1d(test, states, invert=True) - >>> mask - array([False, True, False, True, False]) - >>> test[mask] - array([1, 5]) - """ - - # Deprecated in NumPy 2.0, 2023-08-18 - warnings.warn( - "`in1d` is deprecated. Use `np.isin` instead.", - DeprecationWarning, - stacklevel=2 - ) - - return _in1d(ar1, ar2, assume_unique, invert, kind=kind) - - -def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): +def _isin(ar1, ar2, assume_unique=False, invert=False, *, kind=None): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() @@ -1178,7 +1072,7 @@ def isin(element, test_elements, assume_unique=False, invert=False, *, [ True, False]]) """ element = np.asarray(element) - return _in1d(element, test_elements, assume_unique=assume_unique, + return _isin(element, test_elements, assume_unique=assume_unique, invert=invert, kind=kind).reshape(element.shape) @@ -1261,4 +1155,4 @@ def setdiff1d(ar1, ar2, assume_unique=False): else: ar1 = unique(ar1) ar2 = unique(ar2) - return ar1[_in1d(ar1, ar2, assume_unique=True, invert=True)] + return ar1[_isin(ar1, ar2, assume_unique=True, invert=True)] diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 6ca3ed8282b6..da687da03dde 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -7,7 +7,7 @@ from typing import ( TypeAlias, overload, ) -from typing_extensions import TypeVar, deprecated +from typing_extensions import TypeVar import numpy as np from numpy._typing import ( @@ -20,7 +20,6 @@ from numpy._typing import ( __all__ = [ "ediff1d", - "in1d", "intersect1d", "isin", "setdiff1d", @@ -461,14 +460,3 @@ def isin( *, kind: L["sort", "table"] | None = None, ) -> NDArray[np.bool]: ... - -# -@deprecated("Use 'isin' instead") -def in1d( - element: ArrayLike, - test_elements: ArrayLike, - assume_unique: bool = False, - invert: bool = False, - *, - kind: L["sort", "table"] | None = None, -) -> NDArray[np.bool]: ... diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index 552e518fe9b9..0a4f649a7d9c 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -222,7 +222,6 @@ from numpy import ( # noqa: F401 i0, iinfo, imag, - in1d, index_exp, indices, inexact, From 6f554c1b25375c1f94f59f042d1321a3b28c3e38 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 01:23:39 +0200 Subject: [PATCH 0638/1018] DOC: add release note for #29978 --- doc/release/upcoming_changes/29978.expired.rst | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 doc/release/upcoming_changes/29978.expired.rst diff --git a/doc/release/upcoming_changes/29978.expired.rst b/doc/release/upcoming_changes/29978.expired.rst new file mode 100644 index 000000000000..e0f4de1d8715 --- /dev/null +++ b/doc/release/upcoming_changes/29978.expired.rst @@ -0,0 +1,4 @@ +Removed ``numpy.in1d`` +---------------------- + +``numpy.in1d`` has been deprecated since NumPy 2.0 and is now removed in favor of ``numpy.isin``. From edc42afc187ae623a8418f8e7afa9c2b05be27f0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 01:41:15 +0200 Subject: [PATCH 0639/1018] DOC: remove reference to ``numpy.in1d`` --- numpy/ma/extras.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 381cf75f00c3..b7405d3809f8 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1446,14 +1446,13 @@ def in1d(ar1, ar2, assume_unique=False, invert=False): Test whether each element of an array is also present in a second array. - The output is always a masked array. See `numpy.in1d` for more details. + The output is always a masked array. We recommend using :func:`isin` instead of `in1d` for new code. See Also -------- isin : Version of this function that preserves the shape of ar1. - numpy.in1d : Equivalent function for ndarrays. Examples -------- From 5f7d84a40164c35f75f590e8d6163a2f9977268c Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 01:47:50 +0200 Subject: [PATCH 0640/1018] DEP: remove ``ndindex.ndincr`` (deprecated since 1.20) --- numpy/lib/_index_tricks_impl.py | 16 ---------------- numpy/lib/_index_tricks_impl.pyi | 6 +----- 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index a0d04ad3285f..2dbb098c2bad 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -695,22 +695,6 @@ def __init__(self, *shape): def __iter__(self): return self - def ndincr(self): - """ - Increment the multi-dimensional index by one. - - This method is for backward compatibility only: do not use. - - .. deprecated:: 1.20.0 - This method has been advised against since numpy 1.8.0, but only - started emitting DeprecationWarning as of this version. - """ - # NumPy 1.20.0, 2020-09-08 - warnings.warn( - "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead", - DeprecationWarning, stacklevel=2) - next(self) - def __next__(self): """ Standard iterator method, updates the index and returns the index diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 3c3074b1c1a4..ca8d223aa5d8 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -11,7 +11,7 @@ from typing import ( final, overload, ) -from typing_extensions import TypeVar, deprecated +from typing_extensions import TypeVar import numpy as np from numpy._core.multiarray import ravel_multi_index, unravel_index @@ -101,10 +101,6 @@ class ndindex: def __iter__(self) -> Self: ... def __next__(self) -> _AnyShape: ... - # - @deprecated("Deprecated since 1.20.0.") - def ndincr(self, /) -> None: ... - class nd_grid(Generic[_BoolT_co]): __slots__ = ("sparse",) From bca310b139393f293ddccdcd73889a83aa66022a Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 01:49:33 +0200 Subject: [PATCH 0641/1018] STY: appease ruff --- numpy/lib/_index_tricks_impl.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 2dbb098c2bad..666171f7c2aa 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -1,7 +1,6 @@ import functools import math import sys -import warnings from itertools import product import numpy as np From 048845707c84fd86c3997fbfcff9ead9c3a45c7f Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 01:53:07 +0200 Subject: [PATCH 0642/1018] DOC: add release note for #29980 --- doc/release/upcoming_changes/29980.expired.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/29980.expired.rst diff --git a/doc/release/upcoming_changes/29980.expired.rst b/doc/release/upcoming_changes/29980.expired.rst new file mode 100644 index 000000000000..563ba8aa6929 --- /dev/null +++ b/doc/release/upcoming_changes/29980.expired.rst @@ -0,0 +1,5 @@ +Removed ``numpy.ndindex.ndincr()`` +---------------------------------- + +The ``ndindex.ndincr()`` method has been deprecated since NumPy 1.20 and is now removed; +use ``next(ndindex)`` instead. From bc6ce7ccf3d602b6a8197ef151325e8f88fc255f Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 02:23:03 +0200 Subject: [PATCH 0643/1018] TYP: change ``ndenumerate.__new__`` into ``__init__`` --- numpy/lib/_index_tricks_impl.pyi | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 3c3074b1c1a4..908e54f15449 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -61,21 +61,21 @@ _Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) class ndenumerate(Generic[_ScalarT_co]): @overload - def __new__(cls, arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> ndenumerate[_ScalarT]: ... + def __init__(self: ndenumerate[_ScalarT], arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> None: ... @overload - def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[np.str_]: ... + def __init__(self: ndenumerate[np.str_], arr: str | _NestedSequence[str]) -> None: ... @overload - def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[np.bytes_]: ... + def __init__(self: ndenumerate[np.bytes_], arr: bytes | _NestedSequence[bytes]) -> None: ... @overload - def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[np.bool]: ... + def __init__(self: ndenumerate[np.bool], arr: bool | _NestedSequence[bool]) -> None: ... @overload - def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[np.intp]: ... + def __init__(self: ndenumerate[np.intp], arr: int | _NestedSequence[int]) -> None: ... @overload - def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[np.float64]: ... + def __init__(self: ndenumerate[np.float64], arr: float | _NestedSequence[float]) -> None: ... @overload - def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[np.complex128]: ... + def __init__(self: ndenumerate[np.complex128], arr: complex | _NestedSequence[complex]) -> None: ... @overload - def __new__(cls, arr: object) -> ndenumerate[Any]: ... + def __init__(self: ndenumerate[Incomplete], arr: object) -> None: ... # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11) @overload From 813d3ec3e3d5e8cf195377a5fa1afbe93ec108af Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 02:47:31 +0200 Subject: [PATCH 0644/1018] DOC: remove reference to ``numpy.in1d`` from the reference docs --- doc/source/reference/routines.set.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/reference/routines.set.rst b/doc/source/reference/routines.set.rst index fbb5afdc1b75..47080f96fff8 100644 --- a/doc/source/reference/routines.set.rst +++ b/doc/source/reference/routines.set.rst @@ -19,7 +19,6 @@ Boolean operations .. autosummary:: :toctree: generated/ - in1d intersect1d isin setdiff1d From edc5f47a5cca02c3def39f0977130070af1bbec7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 03:04:05 +0200 Subject: [PATCH 0645/1018] TYP: change ``nditer.__new__`` into ``__init__`` and tighten its signature --- numpy/__init__.pyi | 43 +++++++++++++++++-------- numpy/typing/tests/data/fail/nditer.pyi | 2 +- 2 files changed, 31 insertions(+), 14 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2bf4eb16d1d9..5cfc6ca347a9 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -990,7 +990,7 @@ _NDIterFlagsOp: TypeAlias = L[ "updateifcopy", "virtual", "writeonly", - "writemasked" + "writemasked", ] _MemMapModeKind: TypeAlias = L[ @@ -5776,18 +5776,35 @@ class busdaycalendar: @final class nditer: - def __new__( - cls, - op: ArrayLike | Sequence[ArrayLike | None], - flags: Sequence[_NDIterFlagsKind] | None = ..., - op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., - op_dtypes: DTypeLike | Sequence[DTypeLike | None] | None = ..., - order: _OrderKACF = ..., - casting: _CastingKind = ..., - op_axes: Sequence[Sequence[SupportsIndex]] | None = ..., - itershape: _ShapeLike | None = ..., - buffersize: SupportsIndex = ..., - ) -> nditer: ... + @overload + def __init__( + self, + /, + op: ArrayLike, + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[_NDIterFlagsOp] | None = None, + op_dtypes: DTypeLike | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[SupportsIndex] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + @overload + def __init__( + self, + /, + op: Sequence[ArrayLike | None], + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = None, + op_dtypes: Sequence[DTypeLike | None] | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[Sequence[SupportsIndex]] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + def __enter__(self) -> nditer: ... def __exit__( self, diff --git a/numpy/typing/tests/data/fail/nditer.pyi b/numpy/typing/tests/data/fail/nditer.pyi index cb64061e45fe..fae728da454e 100644 --- a/numpy/typing/tests/data/fail/nditer.pyi +++ b/numpy/typing/tests/data/fail/nditer.pyi @@ -5,4 +5,4 @@ class Test(np.nditer): ... # type: ignore[misc] np.nditer([0, 1], flags=["test"]) # type: ignore[list-item] np.nditer([0, 1], op_flags=[["test"]]) # type: ignore[list-item] np.nditer([0, 1], itershape=(1.0,)) # type: ignore[arg-type] -np.nditer([0, 1], buffersize=1.0) # type: ignore[arg-type] +np.nditer([0, 1], buffersize=1.0) # type: ignore[call-overload] From 21434bbc114f86df2474d17cced0535b3524b8fe Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 03:32:43 +0200 Subject: [PATCH 0646/1018] TYP: minor fixes and improvements in ``record`` and ``recarray`` --- numpy/_core/records.pyi | 42 ++++++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 200d5310f955..511a6a764829 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,6 +1,6 @@ # ruff: noqa: ANN401 # pyright: reportSelfClsParameterName=false -from _typeshed import StrOrBytesPath +from _typeshed import Incomplete, StrOrBytesPath from collections.abc import Iterable, Sequence from typing import ( Any, @@ -12,10 +12,10 @@ from typing import ( overload, type_check_only, ) -from typing_extensions import TypeVar +from typing_extensions import Buffer, TypeVar import numpy as np -from numpy import _ByteOrder, _OrderKACF, _SupportsBuffer +from numpy import _ByteOrder, _OrderKACF from numpy._typing import ( ArrayLike, DTypeLike, @@ -55,25 +55,31 @@ class _SupportsReadInto(Protocol): ### # exported in `numpy.rec` -class record(np.void): - def __getattribute__(self, attr: str) -> Any: ... - def __setattr__(self, attr: str, val: ArrayLike) -> None: ... +class record(np.void): # type: ignore[misc] + __name__: ClassVar[Literal["record"]] = "record" + __module__: Literal["numpy"] = "numpy" + def pprint(self) -> str: ... + + def __getattribute__(self, attr: str, /) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + @overload - def __getitem__(self, key: str | SupportsIndex) -> Any: ... + def __getitem__(self, key: str | SupportsIndex, /) -> Incomplete: ... @overload - def __getitem__(self, key: list[str]) -> record: ... + def __getitem__(self, key: list[str], /) -> record: ... # exported in `numpy.rec` class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): - __name__: ClassVar[Literal["record"]] = "record" - __module__: Literal["numpy"] = "numpy" + __name__: ClassVar[Literal["recarray"]] = "recarray" + __module__: Literal["numpy.rec"] = "numpy.rec" + @overload def __new__( subtype, shape: _ShapeLike, dtype: None = None, - buf: _SupportsBuffer | None = None, + buf: Buffer | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, *, @@ -89,7 +95,7 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): subtype, shape: _ShapeLike, dtype: DTypeLike | None, - buf: _SupportsBuffer | None = None, + buf: Buffer | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, formats: None = None, @@ -98,16 +104,18 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): byteorder: None = None, aligned: Literal[False] = False, order: _OrderKACF = "C", - ) -> _RecArray[Any]: ... - def __array_finalize__(self, /, obj: object) -> None: ... + ) -> _RecArray[Incomplete]: ... + def __getattribute__(self, attr: str, /) -> Any: ... def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + def __array_finalize__(self, /, obj: object) -> None: ... + # @overload def field(self, /, attr: int | str, val: ArrayLike) -> None: ... @overload - def field(self, /, attr: int | str, val: None = None) -> Any: ... + def field(self, /, attr: int | str, val: None = None) -> Incomplete: ... # exported in `numpy.rec` class format_parser: @@ -174,7 +182,7 @@ def fromrecords( # exported in `numpy.rec` @overload def fromstring( - datastring: _SupportsBuffer, + datastring: Buffer, dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, @@ -186,7 +194,7 @@ def fromstring( ) -> _RecArray[record]: ... @overload def fromstring( - datastring: _SupportsBuffer, + datastring: Buffer, dtype: None = None, shape: _ShapeLike | None = None, offset: int = 0, From e43f070e1e64ea724f1ac83ebcf9808efd8e23cd Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 04:04:33 +0200 Subject: [PATCH 0647/1018] DEP: remove the ``fix_imports`` parameter from ``save()`` --- numpy/_core/tests/test_deprecations.py | 25 +------------------------ numpy/lib/_npyio_impl.py | 19 +++---------------- numpy/lib/_npyio_impl.pyi | 13 +------------ numpy/typing/tests/data/fail/npyio.pyi | 4 ++-- 4 files changed, 7 insertions(+), 54 deletions(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 27e98a563930..d7450fcc4101 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -11,7 +11,7 @@ import numpy as np import numpy._core._struct_ufunc_tests as struct_ufunc from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 -from numpy.testing import assert_raises, temppath +from numpy.testing import assert_raises class _DeprecationTestCase: @@ -392,29 +392,6 @@ def test_parenthesized_repeat_count(self, string): self.assert_deprecated(np.dtype, args=(string,)) -class TestDeprecatedSaveFixImports(_DeprecationTestCase): - # Deprecated in Numpy 2.1, 2024-05 - message = "The 'fix_imports' flag is deprecated and has no effect." - - def test_deprecated(self): - with temppath(suffix='.npy') as path: - sample_args = (path, np.array(np.zeros((1024, 10)))) - self.assert_not_deprecated(np.save, args=sample_args) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'fix_imports': True}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'fix_imports': False}) - for allow_pickle in [True, False]: - self.assert_not_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle, - 'fix_imports': True}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle, - 'fix_imports': False}) - - class TestAddNewdocUFunc(_DeprecationTestCase): # Deprecated in Numpy 2.2, 2024-11 def test_deprecated(self): diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index e9a8509fc685..72e746f19eba 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -497,12 +497,12 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, f"Failed to interpret file {file!r} as a pickle") from e -def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): +def _save_dispatcher(file, arr, allow_pickle=None): return (arr,) @array_function_dispatch(_save_dispatcher) -def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): +def save(file, arr, allow_pickle=True): """ Save an array to a binary file in NumPy ``.npy`` format. @@ -523,12 +523,6 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): require libraries that are not available, and not all pickled data is compatible between different versions of Python). Default: True - fix_imports : bool, optional - The `fix_imports` flag is deprecated and has no effect. - - .. deprecated:: 2.1 - This flag is ignored since NumPy 1.17 and was only needed to - support loading in Python 2 some files written in Python 3. See Also -------- @@ -565,12 +559,6 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): >>> print(a, b) # [1 2] [1 3] """ - if fix_imports is not np._NoValue: - # Deprecated 2024-05-16, NumPy 2.1 - warnings.warn( - "The 'fix_imports' flag is deprecated and has no effect. " - "(Deprecated in NumPy 2.1)", - DeprecationWarning, stacklevel=2) if hasattr(file, 'write'): file_ctx = contextlib.nullcontext(file) else: @@ -581,8 +569,7 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): with file_ctx as fid: arr = np.asanyarray(arr) - format.write_array(fid, arr, allow_pickle=allow_pickle, - pickle_kwargs={'fix_imports': fix_imports}) + format.write_array(fid, arr, allow_pickle=allow_pickle) def _savez_dispatcher(file, *args, allow_pickle=True, **kwds): diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 8a4dfa27ed9b..a859118329f1 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -21,7 +21,7 @@ from typing import ( overload, type_check_only, ) -from typing_extensions import TypeVar, deprecated, override +from typing_extensions import TypeVar, override import numpy as np from numpy._core.multiarray import packbits, unpackbits @@ -105,19 +105,8 @@ def load( max_header_size: int = 10_000, ) -> Any: ... -@overload def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True) -> None: ... -@overload -@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") -def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool, fix_imports: bool) -> None: ... -@overload -@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") -def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True, *, fix_imports: bool) -> None: ... - -# def savez(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... - -# def savez_compressed(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... # File-like objects only have to implement `__iter__` and, diff --git a/numpy/typing/tests/data/fail/npyio.pyi b/numpy/typing/tests/data/fail/npyio.pyi index d57ebe3a2e3e..e20be3a2a247 100644 --- a/numpy/typing/tests/data/fail/npyio.pyi +++ b/numpy/typing/tests/data/fail/npyio.pyi @@ -12,8 +12,8 @@ AR_i8: npt.NDArray[np.int64] np.load(str_file) # type: ignore[arg-type] -np.save(bytes_path, AR_i8) # type: ignore[call-overload] -np.save(str_path, AR_i8, fix_imports=True) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +np.save(bytes_path, AR_i8) # type: ignore[arg-type] +np.save(str_path, AR_i8, fix_imports=True) # type: ignore[call-arg] np.savez(bytes_path, AR_i8) # type: ignore[arg-type] From 11629d6623c9856d670ff40d712040145bc11c8e Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 04:27:43 +0200 Subject: [PATCH 0648/1018] DOC: add release note for #29984 --- doc/release/upcoming_changes/29984.expired.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/29984.expired.rst diff --git a/doc/release/upcoming_changes/29984.expired.rst b/doc/release/upcoming_changes/29984.expired.rst new file mode 100644 index 000000000000..bcce0dedd4a7 --- /dev/null +++ b/doc/release/upcoming_changes/29984.expired.rst @@ -0,0 +1,5 @@ +Removed ``fix_imports`` parameter from ``numpy.save`` +----------------------------------------------------- + +The ``fix_imports`` parameter was deprecated in NumPy 2.1.0 and is now removed. +This flag has been ignored since NumPy 1.17 and was only needed to support loading files in Python 2 that were written in Python 3. From 472935a944b81fc595c823c7be8ad00279d00113 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 04:51:33 +0200 Subject: [PATCH 0649/1018] MAINT: Remove ``_core.MachAr`` remnants --- numpy/_core/__init__.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index 7b53022c3bc3..18b250f9972b 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -187,18 +187,6 @@ def _DType_reduce(DType): return _DType_reconstruct, (scalar_type,) -def __getattr__(name): - # Deprecated 2022-11-22, NumPy 1.25. - if name == "MachAr": - import warnings - warnings.warn( - "The `np._core.MachAr` is considered private API (NumPy 1.24)", - DeprecationWarning, stacklevel=2, - ) - return _machar.MachAr - raise AttributeError(f"Module {__name__!r} has no attribute {name!r}") - - import copyreg copyreg.pickle(ufunc, _ufunc_reduce) From eff460aeb32de60354713256370ad340bd94448d Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 05:20:36 +0200 Subject: [PATCH 0650/1018] DEP: Remove ``ndarray.ctypes.get_*`` methods (deprecated since 1.21) --- numpy/_core/_internal.py | 40 -------------------- numpy/_core/_internal.pyi | 12 +----- numpy/_core/tests/test_deprecations.py | 13 +------ numpy/typing/tests/data/pass/ndarray_misc.py | 13 ------- 4 files changed, 2 insertions(+), 76 deletions(-) diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index e00e1b2c1f60..490b3407997a 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -365,46 +365,6 @@ def _as_parameter_(self): """ return self.data_as(ctypes.c_void_p) - # Numpy 1.21.0, 2021-05-18 - - def get_data(self): - """Deprecated getter for the `_ctypes.data` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_data" is deprecated. Use "data" instead', - DeprecationWarning, stacklevel=2) - return self.data - - def get_shape(self): - """Deprecated getter for the `_ctypes.shape` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_shape" is deprecated. Use "shape" instead', - DeprecationWarning, stacklevel=2) - return self.shape - - def get_strides(self): - """Deprecated getter for the `_ctypes.strides` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_strides" is deprecated. Use "strides" instead', - DeprecationWarning, stacklevel=2) - return self.strides - - def get_as_parameter(self): - """Deprecated getter for the `_ctypes._as_parameter_` property. - - .. deprecated:: 1.21 - """ - warnings.warn( - '"get_as_parameter" is deprecated. Use "_as_parameter_" instead', - DeprecationWarning, stacklevel=2, - ) - return self._as_parameter_ - def _newnames(datatype, order): """ diff --git a/numpy/_core/_internal.pyi b/numpy/_core/_internal.pyi index 04c26ca284db..6e37022ffd56 100644 --- a/numpy/_core/_internal.pyi +++ b/numpy/_core/_internal.pyi @@ -2,7 +2,7 @@ import ctypes as ct import re from collections.abc import Callable, Iterable from typing import Any, Final, Generic, Self, overload -from typing_extensions import TypeVar, deprecated +from typing_extensions import TypeVar import numpy as np import numpy.typing as npt @@ -47,16 +47,6 @@ class _ctypes(Generic[_PT_co]): def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... - # - @deprecated('"get_data" is deprecated. Use "data" instead') - def get_data(self, /) -> _PT_co: ... - @deprecated('"get_shape" is deprecated. Use "shape" instead') - def get_shape(self, /) -> ct.Array[c_intp]: ... - @deprecated('"get_strides" is deprecated. Use "strides" instead') - def get_strides(self, /) -> ct.Array[c_intp]: ... - @deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead') - def get_as_parameter(self, /) -> ct.c_void_p: ... - class dummy_ctype(Generic[_T_co]): _cls: type[_T_co] diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 27e98a563930..e2bd1dc3b5d3 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -197,20 +197,9 @@ def test_not_deprecated(self): class TestCtypesGetter(_DeprecationTestCase): - # Deprecated 2021-05-18, Numpy 1.21.0 - warning_cls = DeprecationWarning ctypes = np.array([1]).ctypes - @pytest.mark.parametrize( - "name", ["get_data", "get_shape", "get_strides", "get_as_parameter"] - ) - def test_deprecated(self, name: str) -> None: - func = getattr(self.ctypes, name) - self.assert_deprecated(func) - - @pytest.mark.parametrize( - "name", ["data", "shape", "strides", "_as_parameter_"] - ) + @pytest.mark.parametrize("name", ["data", "shape", "strides", "_as_parameter_"]) def test_not_deprecated(self, name: str) -> None: self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index 4d4e1f872763..a17082aeba15 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -11,8 +11,6 @@ import operator from typing import Any, cast -import pytest - import numpy as np import numpy.typing as npt @@ -189,14 +187,3 @@ class IntSubClass(npt.NDArray[np.intp]): ... A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)]) A_void["yop"] = A_float[:, 0] A_void["yap"] = A_float[:, 1] - -# deprecated - -with pytest.deprecated_call(): - ctypes_obj.get_data() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with pytest.deprecated_call(): - ctypes_obj.get_shape() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with pytest.deprecated_call(): - ctypes_obj.get_strides() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with pytest.deprecated_call(): - ctypes_obj.get_as_parameter() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] From aa1ae516ff084b2d72205b375fd6224d9a1dc665 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 05:35:14 +0200 Subject: [PATCH 0651/1018] DOC: add release note for #29986 --- doc/release/upcoming_changes/29986.expired.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 doc/release/upcoming_changes/29986.expired.rst diff --git a/doc/release/upcoming_changes/29986.expired.rst b/doc/release/upcoming_changes/29986.expired.rst new file mode 100644 index 000000000000..2a6b44380dd4 --- /dev/null +++ b/doc/release/upcoming_changes/29986.expired.rst @@ -0,0 +1,10 @@ +Removal of four undocumented ``ndarray.ctypes`` methods +------------------------------------------------------- +Four undocumented methods of the ``ndarray.ctypes`` object have been removed: + +* ``_ctypes.get_data()`` (use ``_ctypes.data`` instead) +* ``_ctypes.get_shape()`` (use ``_ctypes.shape`` instead) +* ``_ctypes.get_strides()`` (use ``_ctypes.strides`` instead) +* ``_ctypes.get_as_parameter()`` (use ``_ctypes._as_parameter_`` instead) + +These methods have been deprecated since NumPy 1.21. From b72ff69c9c7d200622837f07fd41289fefd50f9a Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 05:51:25 +0200 Subject: [PATCH 0652/1018] MAINT: remove remnants of ``linalg.linalg`` and ``fft.helper`` --- numpy/fft/__init__.py | 4 +--- numpy/fft/helper.py | 1 - numpy/fft/helper.pyi | 1 - numpy/fft/meson.build | 2 -- numpy/linalg/__init__.py | 5 +---- numpy/linalg/linalg.py | 1 - numpy/linalg/linalg.pyi | 1 - numpy/linalg/meson.build | 2 -- numpy/tests/test_public_api.py | 2 -- 9 files changed, 2 insertions(+), 17 deletions(-) delete mode 100644 numpy/fft/helper.py delete mode 100644 numpy/fft/helper.pyi delete mode 100644 numpy/linalg/linalg.py delete mode 100644 numpy/linalg/linalg.pyi diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py index 55f7320f653f..2de162c5ec71 100644 --- a/numpy/fft/__init__.py +++ b/numpy/fft/__init__.py @@ -200,9 +200,7 @@ """ -# TODO: `numpy.fft.helper`` was deprecated in NumPy 2.0. It should -# be deleted once downstream libraries move to `numpy.fft`. -from . import _helper, _pocketfft, helper +from . import _helper, _pocketfft from ._helper import * from ._pocketfft import * diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py deleted file mode 100644 index 8b137891791f..000000000000 --- a/numpy/fft/helper.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi deleted file mode 100644 index 8b137891791f..000000000000 --- a/numpy/fft/helper.pyi +++ /dev/null @@ -1 +0,0 @@ - diff --git a/numpy/fft/meson.build b/numpy/fft/meson.build index e18949af5e31..a5b2413ebb90 100644 --- a/numpy/fft/meson.build +++ b/numpy/fft/meson.build @@ -23,8 +23,6 @@ py.install_sources( '_pocketfft.pyi', '_helper.py', '_helper.pyi', - 'helper.py', - 'helper.pyi', ], subdir: 'numpy/fft' ) diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py index fa230ece580c..cc482cfc9579 100644 --- a/numpy/linalg/__init__.py +++ b/numpy/linalg/__init__.py @@ -84,10 +84,7 @@ """ # To get sub-modules -from . import ( - _linalg, - linalg, # deprecated in NumPy 2.0 -) +from . import _linalg from ._linalg import * __all__ = _linalg.__all__.copy() # noqa: PLE0605 diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py deleted file mode 100644 index 8b137891791f..000000000000 --- a/numpy/linalg/linalg.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/numpy/linalg/linalg.pyi b/numpy/linalg/linalg.pyi deleted file mode 100644 index 8b137891791f..000000000000 --- a/numpy/linalg/linalg.pyi +++ /dev/null @@ -1 +0,0 @@ - diff --git a/numpy/linalg/meson.build b/numpy/linalg/meson.build index e2f8136208d6..1d3297286317 100644 --- a/numpy/linalg/meson.build +++ b/numpy/linalg/meson.build @@ -47,8 +47,6 @@ py.install_sources( '_linalg.pyi', '_umath_linalg.pyi', 'lapack_lite.pyi', - 'linalg.py', - 'linalg.pyi', ], subdir: 'numpy/linalg' ) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 6a36358c3a06..f6fa8611e181 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -190,10 +190,8 @@ def test_NPY_NO_EXPORT(): "f2py.rules", "f2py.symbolic", "f2py.use_rules", - "fft.helper", "lib.user_array", # note: not in np.lib, but probably should just be deleted "linalg.lapack_lite", - "linalg.linalg", "ma.core", "ma.testutils", "matlib", From 35581560d50887c47994d631b7505f4494e50328 Mon Sep 17 00:00:00 2001 From: Aaron Kollasch Date: Wed, 15 Oct 2025 23:39:56 -0400 Subject: [PATCH 0653/1018] BUG: Fix np.strings.slice if start > stop If the slice start > stop, then `slice_strided_loop()` attempts to allocate a string of negative size, causing a MemoryError. To replicate: ``` >>> a = np.array(['test-strings'], dtype="T") >>> np.strings.slice(a, 6, 5) Traceback (most recent call last): File "", line 1, in File "lib/python3.12/site-packages/numpy/_core/strings.py", line 1813, in slice return _slice(a, start, stop, step) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ MemoryError: Failed to allocate string in slice ``` --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 1 + numpy/_core/tests/test_strings.py | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index e00612a369f0..ebc10586bf8b 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -2271,6 +2271,7 @@ slice_strided_loop(PyArrayMethod_Context *context, char *const data[], ? codepoint_offsets[stop] : (unsigned char *)is.buf + is.size); npy_intp outsize = stop_bounded - start_bounded; + outsize = outsize < 0 ? 0 : outsize; if (load_new_string(ops, &os, outsize, oallocator, "slice") < 0) { goto fail; diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index e5c3bb87c773..24b9af9c160c 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -992,7 +992,8 @@ def test_rpartition(self, buf, sep, res1, res2, res3, dt): (6,), # test index past the end (6, None), (6, None, -1), - (6, 7), # test stop index past the end + (6, 7), # test start and stop index past the end + (4, 3), # test start > stop index (-1,), (-1, None), (-1, None, -1), @@ -1016,8 +1017,16 @@ def test_rpartition(self, buf, sep, res1, res2, res3, dt): (None, None, -1), ([0, 6], [-1, 0], [2, -1]), ]) - def test_slice(self, args, dt): - buf = np.array(["hello", "world"], dtype=dt) + @pytest.mark.parametrize("buf", [ + ["hello", "world"], + ['hello world', 'γεια σου κόσμε', '你好世界', '👋 🌍'], + ]) + def test_slice(self, args, buf, dt): + if dt == "S" and "你好世界" in buf: + pytest.skip("Bytes dtype does not support non-ascii input") + if len(buf) == 4: + args = tuple(s * 2 if isinstance(s, list) else s for s in args) + buf = np.array(buf, dtype=dt) act = np.strings.slice(buf, *args) bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args) res = np.array([s[slice(*arg)] From f338a83f97e22a4a8edbcc5a422f1eccdfb41ab0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 07:56:06 +0200 Subject: [PATCH 0654/1018] TYP: some minor fixes for the constants in ``_core.multiarray`` --- numpy/_core/multiarray.pyi | 49 ++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index b90453d66d02..dd47e8e872d7 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -365,37 +365,40 @@ NEEDS_INIT: Final = 8 NEEDS_PYAPI: Final = 16 USE_GETITEM: Final = 32 USE_SETITEM: Final = 64 -DATETIMEUNITS: Final[CapsuleType] -_ARRAY_API: Final[CapsuleType] -_flagdict: Final[dict[str, int]] -_monotonicity: Final[Callable[..., object]] -_place: Final[Callable[..., object]] -_reconstruct: Final[Callable[..., object]] -_vec_string: Final[Callable[..., object]] -correlate2: Final[Callable[..., object]] -dragon4_positional: Final[Callable[..., object]] -dragon4_scientific: Final[Callable[..., object]] -interp_complex: Final[Callable[..., object]] -set_datetimeparse_function: Final[Callable[..., object]] +DATETIMEUNITS: Final[CapsuleType] = ... +_ARRAY_API: Final[CapsuleType] = ... + +_flagdict: Final[dict[str, int]] = ... +_monotonicity: Final[Callable[..., object]] = ... +_place: Final[Callable[..., object]] = ... +_reconstruct: Final[Callable[..., object]] = ... +_vec_string: Final[Callable[..., object]] = ... +correlate2: Final[Callable[..., object]] = ... +dragon4_positional: Final[Callable[..., object]] = ... +dragon4_scientific: Final[Callable[..., object]] = ... +interp_complex: Final[Callable[..., object]] = ... +set_datetimeparse_function: Final[Callable[..., object]] = ... + def get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ... def get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ... def format_longfloat(x: np.longdouble, precision: int) -> str: ... def scalar(dtype: _DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], _DTypeT]: ... def set_typeDict(dict_: dict[str, np.dtype], /) -> None: ... -typeinfo: Final[dict[str, np.dtype[np.generic]]] + +typeinfo: Final[dict[str, np.dtype[np.generic]]] = ... ALLOW_THREADS: Final[int] # 0 or 1 (system-specific) -BUFSIZE: L[8192] -CLIP: L[0] -WRAP: L[1] -RAISE: L[2] -MAXDIMS: L[32] -MAY_SHARE_BOUNDS: L[0] -MAY_SHARE_EXACT: L[-1] -tracemalloc_domain: L[389047] +BUFSIZE: Final = 8_192 +CLIP: Final = 0 +WRAP: Final = 1 +RAISE: Final = 2 +MAXDIMS: Final = 64 +MAY_SHARE_BOUNDS: Final = 0 +MAY_SHARE_EXACT: Final = -1 +tracemalloc_domain: Final = 389_047 -zeros: Final[_ConstructorEmpty] -empty: Final[_ConstructorEmpty] +zeros: Final[_ConstructorEmpty] = ... +empty: Final[_ConstructorEmpty] = ... @overload def empty_like( From 18bc40b5e6ea9a8e4c31fc1be06401fc4fd46b3e Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 16 Oct 2025 09:51:03 +0300 Subject: [PATCH 0655/1018] Doc: tweak docstring [skip actions][skip azp][skip cirrus] --- doc/source/reference/arrays.datetime.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index ac402c973fd8..94468c3a7363 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -59,7 +59,8 @@ you can specify a different unit (e.g., 'M', 'D', 'h') to truncate the result to that precision. Units finer than seconds (such as 'ms' or 'ns') are supported but will show fractional parts as zeros, effectively truncating to whole seconds. The string "today" is also supported and returns the current UTC -date with day precision. +date with day precision. It also supports the same precision specifiers +as ``now``. .. admonition:: Example From f13b468202f7bbb0c0c430f05a1876dba49e52fb Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 16 Oct 2025 10:08:37 +0300 Subject: [PATCH 0656/1018] remove misleading f(x) [skip actions][skip azp][skip cirrus] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Melissa Weber Mendonça --- numpy/lib/_polynomial_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index d919373ea1f4..ee5b8593a9db 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -457,7 +457,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): A summary of the differences can be found in the :doc:`transition guide `. - Fit a polynomial ``f(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + Fit a polynomial ``p[0] * x**deg + ... + p[deg]`` of degree `deg` to points `(x, y)`. Returns a vector of coefficients `p` that minimises the squared error in the order `deg`, `deg-1`, ... `0`. From 95cdc307c91b53b33e48cffb66184209bfe7eff5 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 09:45:38 +0200 Subject: [PATCH 0657/1018] DEP: remove the ``newshape`` parameter from ``reshape()`` --- numpy/_core/fromnumeric.py | 25 +++---------------------- numpy/_core/fromnumeric.pyi | 16 ---------------- numpy/_core/tests/test_numeric.py | 9 --------- 3 files changed, 3 insertions(+), 47 deletions(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 41f899c54815..6cbd1385d459 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -201,13 +201,12 @@ def take(a, indices, axis=None, out=None, mode='raise'): return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) -def _reshape_dispatcher(a, /, shape=None, order=None, *, newshape=None, - copy=None): +def _reshape_dispatcher(a, /, shape=None, order=None, *, copy=None): return (a,) @array_function_dispatch(_reshape_dispatcher) -def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): +def reshape(a, /, shape=None, order='C', *, copy=None): """ Gives a new shape to an array without changing its data. @@ -233,10 +232,6 @@ def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): 'A' means to read / write the elements in Fortran-like index order if ``a`` is Fortran *contiguous* in memory, C-like order otherwise. - newshape : int or tuple of ints - .. deprecated:: 2.1 - Replaced by ``shape`` argument. Retained for backward - compatibility. copy : bool, optional If ``True``, then the array data is copied. If ``None``, a copy will only be made if it's required by ``order``. For ``False`` it raises @@ -300,23 +295,9 @@ def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): [3, 4], [5, 6]]) """ - if newshape is None and shape is None: + if shape is None: raise TypeError( "reshape() missing 1 required positional argument: 'shape'") - if newshape is not None: - if shape is not None: - raise TypeError( - "You cannot specify 'newshape' and 'shape' arguments " - "at the same time.") - # Deprecated in NumPy 2.1, 2024-04-18 - warnings.warn( - "`newshape` keyword argument is deprecated, " - "use `shape=...` or pass shape positionally instead. " - "(deprecated in NumPy 2.1)", - DeprecationWarning, - stacklevel=2, - ) - shape = newshape if copy is not None: return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) return _wrapfunc(a, 'reshape', shape, order=order) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index dc8a040e30e8..d0e0a2a1f67c 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -14,7 +14,6 @@ from typing import ( overload, type_check_only, ) -from typing_extensions import deprecated import numpy as np from numpy import ( @@ -240,21 +239,6 @@ def reshape( *, copy: bool | None = None, ) -> NDArray[Any]: ... -@overload -@deprecated( - "`newshape` keyword argument is deprecated, " - "use `shape=...` or pass shape positionally instead. " - "(deprecated in NumPy 2.1)", -) -def reshape( - a: ArrayLike, - /, - shape: None = None, - order: _OrderACF = "C", - *, - newshape: _ShapeLike, - copy: bool | None = None, -) -> NDArray[Any]: ... @overload def choose( diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 751c66584275..127f08527f1e 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -183,12 +183,6 @@ def test_reshape_shape_arg(self): shape = (3, 4) expected = arr.reshape(shape) - with pytest.raises( - TypeError, - match="You cannot specify 'newshape' and 'shape' " - "arguments at the same time." - ): - np.reshape(arr, shape=shape, newshape=shape) with pytest.raises( TypeError, match=r"reshape\(\) missing 1 required positional " @@ -201,9 +195,6 @@ def test_reshape_shape_arg(self): assert_equal(np.reshape(arr, shape, "C"), expected) assert_equal(np.reshape(arr, shape=shape), expected) assert_equal(np.reshape(arr, shape=shape, order="C"), expected) - with pytest.warns(DeprecationWarning): - actual = np.reshape(arr, newshape=shape) - assert_equal(actual, expected) def test_reshape_copy_arg(self): arr = np.arange(24).reshape(2, 3, 4) From a0cf4d338ed1f41f4deb8530b3b23f45d6ac1972 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 09:50:56 +0200 Subject: [PATCH 0658/1018] DOC: add release note for #29994 --- doc/release/upcoming_changes/29994.expired.rst | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 doc/release/upcoming_changes/29994.expired.rst diff --git a/doc/release/upcoming_changes/29994.expired.rst b/doc/release/upcoming_changes/29994.expired.rst new file mode 100644 index 000000000000..0dcc083178ab --- /dev/null +++ b/doc/release/upcoming_changes/29994.expired.rst @@ -0,0 +1,7 @@ +Remove ``newshape`` parameter from ``numpy.reshape`` +---------------------------------------------------- + +The ``newshape`` parameter was deprecated in NumPy 2.1.0 and has been +removed from ``numpy.reshape``. + +Use the ``shape`` parameter instead. From 16a043ceec52b7e19c6ea77762b6204a6ca81232 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 16 Oct 2025 10:56:47 +0200 Subject: [PATCH 0659/1018] DOC: tweak release note Co-authored-by: Sebastian Berg --- doc/release/upcoming_changes/29994.expired.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/release/upcoming_changes/29994.expired.rst b/doc/release/upcoming_changes/29994.expired.rst index 0dcc083178ab..11331da6e810 100644 --- a/doc/release/upcoming_changes/29994.expired.rst +++ b/doc/release/upcoming_changes/29994.expired.rst @@ -2,6 +2,5 @@ Remove ``newshape`` parameter from ``numpy.reshape`` ---------------------------------------------------- The ``newshape`` parameter was deprecated in NumPy 2.1.0 and has been -removed from ``numpy.reshape``. - -Use the ``shape`` parameter instead. +removed from ``numpy.reshape``. Pass it positionally or use ``shape=`` +on newer NumPy versions. From 49fbb3119064105df967691858897a15ca8c8286 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Thu, 16 Oct 2025 17:32:00 +0800 Subject: [PATCH 0660/1018] DOC: update SIMD build options to cover riscv64 (#29992) Signed-off-by: Wang Yang --- doc/source/reference/simd/build-options.rst | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 6cf27510103c..229a9ebbae0a 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -239,6 +239,17 @@ On IBM/ZSYSTEM(S390X) * - ``VXE2`` - ``VX`` ``VXE`` +On RISCV64 +~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``RVV`` + - + .. _opt-special-options: Special Options @@ -288,6 +299,8 @@ Enables the minimum CPU features for each architecture: - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` * - IBM/ZSYSTEM(S390X) - ``NONE`` + * - riscv64 + - ``NONE`` ``MAX`` From 1076e9da17bff2b6bbddd7baa13f2d7dda4057fa Mon Sep 17 00:00:00 2001 From: Sandeep Gupta Date: Thu, 16 Oct 2025 16:31:59 +0530 Subject: [PATCH 0661/1018] ci: run NumPy tests on POWER10 runner for full SIMD coverage (#29934) --- .github/workflows/linux-ppc64le.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index f54b5dc74060..a593bcd774a2 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -20,7 +20,7 @@ jobs: # It requires a native ppc64le GHA runner, which is not available on forks. # For more details, see: https://github.com/numpy/numpy/issues/29125 if: github.repository == 'numpy/numpy' - runs-on: ubuntu-24.04-ppc64le + runs-on: ubuntu-24.04-ppc64le-p10 name: "Native PPC64LE" steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 From a79e1541d80b94be2e2432f9027b1bf5e507be1e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 16 Oct 2025 11:04:39 -0600 Subject: [PATCH 0662/1018] MAINT: Update main after the NumPy 2.3.4 release. - Add 2.3.4-changelog.rst - Add 2.3.4-notes.rst - Update release.rst - Update .mailmap - Update RELEASE_WALKTHROUGH.rst [skip azp] [skip cirrus] [skip actions] --- .mailmap | 2 + doc/RELEASE_WALKTHROUGH.rst | 95 +++++++++++++++++------------- doc/changelog/2.3.4-changelog.rst | 61 +++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.3.4-notes.rst | 83 ++++++++++++++++++++++++++ 5 files changed, 202 insertions(+), 40 deletions(-) create mode 100644 doc/changelog/2.3.4-changelog.rst create mode 100644 doc/source/release/2.3.4-notes.rst diff --git a/.mailmap b/.mailmap index e3e3bb56ecdf..ddadaf22a62f 100644 --- a/.mailmap +++ b/.mailmap @@ -214,6 +214,7 @@ Chris Kerr Chris Navarro Chris Navarro <24905907+lvllvl@users.noreply.github.com> Chris Vavaliaris +Christian Barbia Christian Clauss Christopher Dahlin Christopher Hanley @@ -655,6 +656,7 @@ Saransh Chopra Saullo Giovani Saurabh Mehta Sayantika Banik +Sayed Awad Schrijvers Luc Sean Cheah Sean Cheah <67928790+thalassemia@users.noreply.github.com> diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index 82f8d2c9e9f0..ac306c682148 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -47,6 +47,13 @@ Update 2.4.0 milestones Look at the issues/prs with 2.4.0 milestones and either push them off to a later version, or maybe remove the milestone. You may need to add a milestone. +Check the numpy-release repo +---------------------------- + +The things to check are the ``cibuildwheel`` version in +``.github/workflows/wheels.yml`` and the ``openblas`` versions in +``openblas_requirements.txt``. + Make a release PR ================= @@ -121,6 +128,20 @@ may also be appended, but not for the initial release as it is too long. Check previous release notes to see how this is done. +Test the wheel builds +--------------------- + +After the release PR is merged, go to the ``numpy-release`` repository in your +browser and manually trigger the workflow on the ``maintenance/2.4.x`` branch +using the ``Run workflow`` button in ``actions``. Make sure that the upload +target is ``none`` in the *evironment* dropdown. The wheels take about 1 hour +to build, but sometimes GitHub is very slow. If some wheel builds fail for +unrelated reasons, you can re-run them as normal in the GitHub Actions UI with +``re-run failed``. After the wheels are built review the results, checking that +the number of artifacts are correct, the wheel names are as expected, etc. If +everything looks good, proceed with the release. + + Release walkthrough =================== @@ -131,8 +152,8 @@ cloned it locally. You can also edit ``.git/config`` and add ``upstream`` if it isn't already present. -1. Prepare the release commit ------------------------------ +1. Tag the release commit +------------------------- Checkout the branch for the release, make sure it is up to date, and clean the repository:: @@ -161,47 +182,36 @@ If you need to delete the tag due to error:: 2. Build wheels and sdist ------------------------- -Create a ``maintenance/2.4.x`` branch in the ``numpy-release`` repository, -and open a PR changing the ``SOURCE_REF_TO_BUILD`` identifier at the top of -``.github/workflows/wheels.yml`` to ``v2.4.0``. That will do a full set of -wheel builds on the PR, if everything looks good merge the PR. - -All wheels are currently built in that repository on GitHub Actions, they take -about 1 hour to build. - -If you wish to manually trigger a wheel build, you can do so: in your browser, -go to `numpy-release/actions/workflows/wheels.yml `__ -and click on the "Run workflow" button, then choose the tag to build. If some -wheel builds fail for unrelated reasons, you can re-run them as normal -in the GitHub Actions UI with "re-run failed". - -Once you are ready to publish a release to PyPI, use that same "Run workflow" -button and choose ``pypi`` in the *environment* dropdown. All wheels and the -sdist will build and be ready to release to PyPI after manual inspection that -everything passed. E.g., the number of artifacts is correct, and the wheel -filenames and sizes look as expected. If desired, you can also download an -artifact for local unzipping and inspection. You will get an email notification -as well with a "Review pending deployments" link. Once you're ready, press the -button to start the uploads to PyPI, which will complete the release. +Go to the ``numpy-release`` repository in your browser and manually trigger the +workflow on the ``maintenance/2.4.x`` branch using the ``Run workflow`` button +in ``actions``. Make sure that the upload target is ``pypi`` in the +*evironment* dropdown. the wheels take about 1 hour to build, but sometimes +GitHub is very slow. If some wheel builds fail for unrelated reasons, you can +re-run them as normal in the GitHub Actions UI with ``re-run failed``. After +the wheels are built review the results, checking that the number of artifacts +are correct, the wheel names are as expected, etc. If everything looks good +trigger the upload. 3. Upload files to GitHub Releases ---------------------------------- -Go to ``_, there should be a ``v2.4.0 -tag``, click on it and hit the edit button for that tag and update the title to +Go to ``_, there should be a ``v2.4.0`` +tag, click on it and hit the edit button for that tag and update the title to "v2.4.0 ()". There are two ways to add files, using an editable text -window and as binary uploads. +window and as binary uploads. The text window needs markdown, so translate the +release notes from rst to md:: + + $ python tools/write_release.py 2.4.0 -Start by running ``spin notes 2.4.0`` and then edit the ``release/README.md`` -that is translated from the rst version using pandoc. Things that will need -fixing: PR lines from the changelog, if included, are wrapped and need -unwrapping, links should be changed to monospaced text. Then copy the contents -to the clipboard and paste them into the text window. It may take several tries -to get it look right. Then +this will create a ``release/README.md`` file that you can edit. Check the +result to see that it looks correct. Things that may need fixing: wrapped lines +that need unwrapping and links that should be changed to monospaced text. Then +copy the contents to the clipboard and paste them into the text window. It may +take several tries to get it look right. Then -- Download the sdist (``numpy-2.4.0.tar.gz``) from PyPI upload it to GitHub as - a binary file. +- Download the sdist (``numpy-2.4.0.tar.gz``) from PyPI and upload it to GitHub + as a binary file. You cannot do this using pip. - Upload ``release/README.rst`` as a binary file. - Upload ``doc/changelog/2.4.0-changelog.rst`` as a binary file. - Check the pre-release button if this is a pre-releases. @@ -271,14 +281,19 @@ notes will be a skeleton and have little content:: $ gvim doc/source/release/2.4.1-notes.rst $ git add doc/source/release/2.4.1-notes.rst -Add new release notes to the documentation release list. Then update the -``version`` in ``pyproject.toml``:: +Add a link to the new release notes:: + + $ gvim doc/source/release.rst + +Update the ``version`` in ``pyproject.toml``:: $ gvim pyproject.toml -Commit the result:: +Commit the result, edit the commit message, note the files in the commit, and +add a line ``[skip azp] [skip cirrus] [skip actions]``, then push:: $ git commit -a -m"MAINT: Prepare 2.4.x for further development" + $ git rebase -i HEAD^ $ git push origin HEAD Go to GitHub and make a PR. It should be merged quickly. @@ -300,7 +315,7 @@ This assumes that you have forked ``_:: - For the ``*.0`` release in a cycle, add a new section at the top with a short description of the new features and point the news link to it. - Edit the newsHeader and date fields at the top of news.md -- Also edit the butttonText on line 14 in content/en/config.yaml +- Also edit the buttonText on line 14 in content/en/config.yaml commit and push:: @@ -316,7 +331,7 @@ Go to GitHub and make a PR. The release should be announced on the numpy-discussion and python-announce-list mailing lists. Look at previous announcements for the basic template. The contributor and PR lists are the same as generated for the -release notes above. If you crosspost, make sure that python-announce-list is +release notes above. If you cross-post, make sure that python-announce-list is BCC so that replies will not be sent to that list. diff --git a/doc/changelog/2.3.4-changelog.rst b/doc/changelog/2.3.4-changelog.rst new file mode 100644 index 000000000000..f94b46a07573 --- /dev/null +++ b/doc/changelog/2.3.4-changelog.rst @@ -0,0 +1,61 @@ + +Contributors +============ + +A total of 17 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Christian Barbia + +* Evgeni Burovski +* Joren Hammudoglu +* Maaz + +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Riku Sakamoto + +* Sandeep Gupta + +* Sayed Awad +* Sebastian Berg +* Sergey Fedorov + +* Warren Weckesser +* dependabot[bot] + +Pull requests merged +==================== + +A total of 30 pull requests were merged for this release. + +* `#29725 `__: MAINT: Prepare 2.3.x for further development +* `#29781 `__: MAINT: Pin some upstream dependences +* `#29782 `__: BLD: enable x86-simd-sort to build on KNL with -mavx512f +* `#29783 `__: BUG: Include python-including headers first (#29281) +* `#29784 `__: TYP: fix np.number and np.\*integer method declaration +* `#29785 `__: TYP: mypy 1.18.1 +* `#29788 `__: TYP: replace scalar type __init__ with __new__ +* `#29790 `__: BUG: Fix ``dtype`` refcount in ``__array__`` (#29715) +* `#29791 `__: TYP: fix method declarations in floating, timedelta64, and datetime64Backport +* `#29792 `__: MAINT: delete unused variables in unary logical dispatch +* `#29797 `__: BUG: Fix pocketfft umath strides for AIX compatibility (#29768) +* `#29798 `__: BUG: np.setbufsize should raise ValueError for negative input +* `#29799 `__: BUG: Fix assert in nditer buffer setup +* `#29800 `__: BUG: Stable ScalarType ordering +* `#29838 `__: TST: Pin pyparsing to avoid matplotlib errors. +* `#29839 `__: BUG: linalg: emit a MemoryError on a malloc failure (#29811) +* `#29840 `__: BLD: change file extension for libnpymath on win-arm64 from .a... +* `#29864 `__: CI: Fix loongarch64 CI (#29856) +* `#29865 `__: TYP: Various typing fixes +* `#29910 `__: BUG: Fix float16-sort failures on 32-bit x86 MSVC (#29908) +* `#29911 `__: TYP: add missing ``__slots__`` (#29901) +* `#29913 `__: TYP: wrong argument defaults in ``testing._private`` (#29902) +* `#29920 `__: BUG: avoid segmentation fault in string_expandtabs_length_promoter +* `#29921 `__: BUG: Fix INT_MIN % -1 to return 0 for all signed integer types... +* `#29922 `__: TYP: minor fixes related to ``errstate`` (#29914) +* `#29923 `__: TST: use requirements/test_requirements across CI (#29919) +* `#29926 `__: BUG: fix negative samples generated by Wald distribution (#29609) +* `#29940 `__: MAINT: Bump pypa/cibuildwheel from 3.1.4 to 3.2.1 +* `#29949 `__: STY: rename @classmethod arg to cls +* `#29950 `__: MAINT: Simplify string arena growth strategy (#29885) + diff --git a/doc/source/release.rst b/doc/source/release.rst index 72ddab818a77..6f1c1a22f11d 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.4.0 + 2.3.4 2.3.3 2.3.2 2.3.1 diff --git a/doc/source/release/2.3.4-notes.rst b/doc/source/release/2.3.4-notes.rst new file mode 100644 index 000000000000..6ba7c06b7514 --- /dev/null +++ b/doc/source/release/2.3.4-notes.rst @@ -0,0 +1,83 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.3.4 Release Notes +========================== + +The NumPy 2.3.4 release is a patch release split between a number of maintenance +updates and bug fixes. This release supports Python versions 3.11-3.14. This +release is based on the Python 3.14.0 final. + + +Changes +======= + +The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a +``.a`` file extension on win-arm64, for compatibility for building with MSVC and +``setuptools``. Please note that using these static libraries is discouraged +and for existing projects using it, it's best to use it with a matching +compiler toolchain, which is ``clang-cl`` on Windows on Arm. + +(`gh-29750 `__) + + +Contributors +============ + +A total of 17 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Christian Barbia + +* Evgeni Burovski +* Joren Hammudoglu +* Maaz + +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Riku Sakamoto + +* Sandeep Gupta + +* Sayed Awad +* Sebastian Berg +* Sergey Fedorov + +* Warren Weckesser +* dependabot[bot] + +Pull requests merged +==================== + +A total of 30 pull requests were merged for this release. + +* `#29725 `__: MAINT: Prepare 2.3.x for further development +* `#29781 `__: MAINT: Pin some upstream dependences +* `#29782 `__: BLD: enable x86-simd-sort to build on KNL with -mavx512f +* `#29783 `__: BUG: Include python-including headers first (#29281) +* `#29784 `__: TYP: fix np.number and np.\*integer method declaration +* `#29785 `__: TYP: mypy 1.18.1 +* `#29788 `__: TYP: replace scalar type __init__ with __new__ +* `#29790 `__: BUG: Fix ``dtype`` refcount in ``__array__`` (#29715) +* `#29791 `__: TYP: fix method declarations in floating, timedelta64, and datetime64Backport +* `#29792 `__: MAINT: delete unused variables in unary logical dispatch +* `#29797 `__: BUG: Fix pocketfft umath strides for AIX compatibility (#29768) +* `#29798 `__: BUG: np.setbufsize should raise ValueError for negative input +* `#29799 `__: BUG: Fix assert in nditer buffer setup +* `#29800 `__: BUG: Stable ScalarType ordering +* `#29838 `__: TST: Pin pyparsing to avoid matplotlib errors. +* `#29839 `__: BUG: linalg: emit a MemoryError on a malloc failure (#29811) +* `#29840 `__: BLD: change file extension for libnpymath on win-arm64 from .a... +* `#29864 `__: CI: Fix loongarch64 CI (#29856) +* `#29865 `__: TYP: Various typing fixes +* `#29910 `__: BUG: Fix float16-sort failures on 32-bit x86 MSVC (#29908) +* `#29911 `__: TYP: add missing ``__slots__`` (#29901) +* `#29913 `__: TYP: wrong argument defaults in ``testing._private`` (#29902) +* `#29920 `__: BUG: avoid segmentation fault in string_expandtabs_length_promoter +* `#29921 `__: BUG: Fix INT_MIN % -1 to return 0 for all signed integer types... +* `#29922 `__: TYP: minor fixes related to ``errstate`` (#29914) +* `#29923 `__: TST: use requirements/test_requirements across CI (#29919) +* `#29926 `__: BUG: fix negative samples generated by Wald distribution (#29609) +* `#29940 `__: MAINT: Bump pypa/cibuildwheel from 3.1.4 to 3.2.1 +* `#29949 `__: STY: rename @classmethod arg to cls +* `#29950 `__: MAINT: Simplify string arena growth strategy (#29885) + From 72c4b8c9d0fc252806b50d894cd834383c638764 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Thu, 16 Oct 2025 14:14:34 -0400 Subject: [PATCH 0663/1018] TST: Mark thread-unsafe tests (#29816) --- numpy/_core/tests/test_arrayprint.py | 4 ++++ numpy/_core/tests/test_cpu_features.py | 1 + numpy/_core/tests/test_custom_dtypes.py | 6 +++++ numpy/_core/tests/test_deprecations.py | 3 +++ numpy/_core/tests/test_dtype.py | 8 +++++++ numpy/_core/tests/test_finfo.py | 3 +++ numpy/_core/tests/test_mem_policy.py | 1 + numpy/_core/tests/test_memmap.py | 1 + numpy/_core/tests/test_multiarray.py | 6 +++++ numpy/_core/tests/test_multithreading.py | 3 +++ numpy/_core/tests/test_nditer.py | 2 ++ numpy/_core/tests/test_print.py | 2 ++ numpy/_core/tests/test_regression.py | 1 + numpy/_core/tests/test_scalarmath.py | 6 +++++ numpy/_core/tests/test_shape_base.py | 1 + numpy/_core/tests/test_strings.py | 2 ++ numpy/_core/tests/test_ufunc.py | 2 ++ numpy/_core/tests/test_umath.py | 1 + numpy/conftest.py | 23 ++++++++++++++++++++ numpy/lib/tests/test__datasource.py | 6 +++++ numpy/lib/tests/test__iotools.py | 3 +++ numpy/lib/tests/test_format.py | 1 + numpy/lib/tests/test_function_base.py | 7 ++++++ numpy/lib/tests/test_io.py | 3 +++ numpy/lib/tests/test_nanfunctions.py | 1 + numpy/lib/tests/test_recfunctions.py | 3 +++ numpy/ma/tests/test_core.py | 3 +++ numpy/matrixlib/tests/test_matrix_linalg.py | 5 +++++ numpy/polynomial/tests/test_printing.py | 2 ++ numpy/random/tests/test_direct.py | 3 +++ numpy/random/tests/test_extending.py | 4 ++++ numpy/random/tests/test_generator_mt19937.py | 1 + numpy/random/tests/test_randomstate.py | 5 +++++ numpy/testing/tests/test_utils.py | 15 +++++++++++++ numpy/tests/test_ctypeslib.py | 6 +++++ numpy/tests/test_numpy_config.py | 1 + numpy/tests/test_reloading.py | 1 + numpy/typing/tests/test_isfile.py | 6 +++++ 38 files changed, 152 insertions(+) diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index a054f408e954..04969cb74a4e 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -533,6 +533,9 @@ def test_nested_array_repr(self): ) @given(hynp.from_dtype(np.dtype("U"))) + @pytest.mark.thread_unsafe( + reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" + ) def test_any_text(self, text): # This test checks that, given any value that can be represented in an # array of dtype("U") (i.e. unicode string), ... @@ -546,6 +549,7 @@ def test_any_text(self, text): assert_equal(result, expected_repr) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.thread_unsafe(reason="garbage collector is global state") def test_refcount(self): # make sure we do not hold references to the array due to a recursive # closure (gh-10620) diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index ebf5b49fc357..818c1fb204a4 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -124,6 +124,7 @@ def load_flags_auxv(self): " therefore this test class cannot be properly executed." ), ) +@pytest.mark.thread_unsafe(reason="setup & tmp_path_factory threads-unsafe, modifies environment variables") class TestEnvPrivation: cwd = pathlib.Path(__file__).parent.resolve() env = os.environ.copy() diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index f03e35dca9f2..2acb4adf4c7c 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -327,6 +327,9 @@ def test_creation_class(self): assert np.zeros(3, dtype=SF).dtype == SF(1.) assert np.zeros_like(arr1, dtype=SF).dtype == SF(1.) + @pytest.mark.thread_unsafe( + reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" + ) def test_np_save_load(self): # this monkeypatch is needed because pickle # uses the repr of a type to reconstruct it @@ -370,6 +373,9 @@ def test_flatiter_index(self, index): np.testing.assert_array_equal( arr.view(np.float64), arr2.view(np.float64)) +@pytest.mark.thread_unsafe( + reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" +) def test_type_pickle(): # can't actually unpickle, but we can pickle (if in namespace) import pickle diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index ca4463ece1f7..cb73d455ca8f 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -458,6 +458,9 @@ def assign_to_index(): self.assert_deprecated(assign_to_index) +@pytest.mark.thread_unsafe( + reason="warning control utilities are deprecated due to being thread-unsafe" +) class TestWarningUtilityDeprecations(_DeprecationTestCase): # Deprecation in NumPy 2.4, 2025-08 message = r"NumPy warning suppression and assertion utilities are deprecated." diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 175680451f3e..e6c80d8d4212 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1316,6 +1316,9 @@ def test_object_flag_not_inherited(self): @pytest.mark.slow @hypothesis.given(dtype=hynp.nested_dtypes()) + @pytest.mark.thread_unsafe( + reason="hynp.nested_dtypes thread unsafe (HypothesisWorks/hypothesis#4562)" + ) def test_make_canonical_hypothesis(self, dtype): canonical = np.result_type(dtype) self.check_canonical(dtype, canonical) @@ -1324,6 +1327,9 @@ def test_make_canonical_hypothesis(self, dtype): assert np.can_cast(two_arg_result, canonical, casting="no") @pytest.mark.slow + @pytest.mark.thread_unsafe( + reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" + ) @hypothesis.given( dtype=hypothesis.extra.numpy.array_dtypes( subtype_strategy=hypothesis.extra.numpy.array_dtypes(), @@ -1916,6 +1922,7 @@ def test_pairs(self, pair): class TestUserDType: @pytest.mark.leaks_references(reason="dynamically creates custom dtype.") + @pytest.mark.thread_unsafe(reason="crashes when GIL disabled, dtype setup is thread-unsafe") def test_custom_structured_dtype(self): class mytype: pass @@ -1936,6 +1943,7 @@ class mytype: del a assert sys.getrefcount(o) == startcount + @pytest.mark.thread_unsafe(reason="crashes when GIL disabled, dtype setup is thread-unsafe") def test_custom_structured_dtype_errors(self): class mytype: pass diff --git a/numpy/_core/tests/test_finfo.py b/numpy/_core/tests/test_finfo.py index 572490c1eb08..5703b8d6a765 100644 --- a/numpy/_core/tests/test_finfo.py +++ b/numpy/_core/tests/test_finfo.py @@ -69,6 +69,9 @@ def float64_ma(): 'machep', 'negep', 'minexp', 'maxexp', 'nmant', 'iexp', 'eps', 'epsneg', 'precision', 'resolution' ]) +@pytest.mark.thread_unsafe( + reason="complex fixture setup is thread-unsafe (pytest-dev/pytest#13768.)" +) def test_finfo_properties(dtype, ma_fixture, prop, request): """Test that finfo properties match expected machine arithmetic values.""" ma = request.getfixturevalue(ma_fixture) diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 70befb8bd324..313d3efe779a 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -415,6 +415,7 @@ def test_new_policy(get_module): reason=("bad interaction between getenv and " "os.environ inside pytest")) @pytest.mark.parametrize("policy", ["0", "1", None]) +@pytest.mark.thread_unsafe(reason="modifies environment variables") def test_switch_owner(get_module, policy): a = get_module.get_array() assert np._core.multiarray.get_handler_name(a) is None diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index 49931e1680e8..8e2aa0a507b1 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -30,6 +30,7 @@ ) +@pytest.mark.thread_unsafe(reason="setup & memmap is thread-unsafe (gh-29126)") class TestMemmap: def setup_method(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 12ea10e5b568..c2d54dd15caf 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -7201,6 +7201,7 @@ def assert_dot_close(A, X, desired): @pytest.mark.slow @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) @requires_memory(free_bytes=18e9) # complex case needs 18GiB+ + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_huge_vectordot(self, dtype): # Large vector multiplications are chunked with 32bit BLAS # Test that the chunking does the right thing, see also gh-22262 @@ -8565,6 +8566,7 @@ def test_padding(self): x = np.array([(1,), (2,)], dtype={'f0': (int, j)}) self._check_roundtrip(x) + @pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") def test_reference_leak(self): if HAS_REFCOUNT: count_1 = sys.getrefcount(np._core._internal) @@ -8686,6 +8688,7 @@ class foo(ctypes.Structure): assert_equal(arr['a'], 3) @pytest.mark.parametrize("obj", [np.ones(3), np.ones(1, dtype="i,i")[()]]) + @pytest.mark.thread_unsafe(reason="_multiarray_tests used memoryview, which is thread-unsafe") def test_error_if_stored_buffer_info_is_corrupted(self, obj): """ If a user extends a NumPy array before 1.20 and then runs it @@ -9082,6 +9085,7 @@ def __array_interface__(self): # This fails due to going into the buffer protocol path (f, {'data': None, 'shape': ()}, TypeError), ]) + @pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") def test_scalar_interface(self, val, iface, expected): # Test scalar coercion within the array interface self.f.iface = {'typestr': 'f8'} @@ -9770,6 +9774,7 @@ def test_ctypes_is_available(self): assert_equal(ctypes, test_arr.ctypes._ctypes) assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + @pytest.mark.thread_unsafe(reason="modifies global module state") def test_ctypes_is_not_available(self): from numpy._core import _internal _internal.ctypes = None @@ -10677,6 +10682,7 @@ def test_argsort_largearrays(dtype): assert_arg_sorted(arr, np.argsort(arr, kind='quick')) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") def test_gh_22683(): b = 777.68760986 a = np.array([b] * 10000, dtype=object) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 6563abde8ff4..149a216fa1cd 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -12,6 +12,9 @@ if IS_WASM: pytest.skip(allow_module_level=True, reason="no threading support in wasm") +pytestmark = pytest.mark.thread_unsafe( + reason="tests in this module are already explicitly multi-threaded" +) def test_parallel_randomstate_creation(): # if the coercion cache is enabled and not thread-safe, creating diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 6396a19fe97f..acaaa0548fd5 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -3403,6 +3403,7 @@ def test_arbitrary_number_of_ops_nested(): @pytest.mark.slow @requires_memory(9 * np.iinfo(np.intc).max) +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_arbitrary_number_of_ops_error(): # A different error may happen for more than integer operands, but that # is too large to test nicely. @@ -3415,6 +3416,7 @@ def test_arbitrary_number_of_ops_error(): np.nested_iters(args, [[0], []]) +@pytest.mark.thread_unsafe(reason="capfd is thread-unsafe") def test_debug_print(capfd): """ Matches the expected output of a debug print with the actual output. diff --git a/numpy/_core/tests/test_print.py b/numpy/_core/tests/test_print.py index d99b2794d7ca..95a177b57a7d 100644 --- a/numpy/_core/tests/test_print.py +++ b/numpy/_core/tests/test_print.py @@ -117,6 +117,7 @@ def _test_redirected_print(x, tp, ref=None): err_msg=f'print failed for type{tp}') +@pytest.mark.thread_unsafe(reason="sys.stdout not thread-safe") @pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) def test_float_type_print(tp): """Check formatting when using print """ @@ -133,6 +134,7 @@ def test_float_type_print(tp): _test_redirected_print(1e16, tp, ref) +@pytest.mark.thread_unsafe(reason="sys.stdout not thread-safe") @pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) def test_complex_type_print(tp): """Check formatting when using print """ diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 8ad9a26fcc9a..dc457b2d5fc1 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -2491,6 +2491,7 @@ def test__array_interface__descr(self): @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') @requires_memory(free_bytes=9e9) + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_dot_big_stride(self): # gh-17111 # blas stride = stride//itemsize > int32 max diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 8c24b4cfdc88..bbde90c08bbb 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -871,6 +871,9 @@ def recursionlimit(n): @given(sampled_from(objecty_things), sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) +@pytest.mark.thread_unsafe( + reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" +) def test_operator_object_left(o, op, type_): try: with recursionlimit(200): @@ -882,6 +885,9 @@ def test_operator_object_left(o, op, type_): @given(sampled_from(objecty_things), sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) +@pytest.mark.thread_unsafe( + reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" +) def test_operator_object_right(o, op, type_): try: with recursionlimit(200): diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 2d606a2d33fd..5be3d05bbf11 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -299,6 +299,7 @@ def test_exceptions(self): reason="only problematic on 64bit platforms" ) @requires_memory(2 * np.iinfo(np.intc).max) + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_huge_list_error(self): a = np.array([1]) max_int = np.iinfo(np.intc).max diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index e5c3bb87c773..57d94ce12747 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -136,6 +136,7 @@ def test_string_size_dtype_large_repr(str_dt): @pytest.mark.slow @requires_memory(2 * np.iinfo(np.intc).max) @pytest.mark.parametrize("str_dt", "US") +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_string_coercion_error(str_dt): very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize try: @@ -163,6 +164,7 @@ def __str__(self): @pytest.mark.slow @requires_memory(2 * np.iinfo(np.intc).max) @pytest.mark.parametrize("str_dt", "US") +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_string_addition_error(str_dt): very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index d5f85d4b5c3b..2b01d37989cc 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1796,6 +1796,7 @@ def test_identityless_reduction(self, arrs, pos): @requires_memory(6 * 1024**3) @pytest.mark.skipif(sys.maxsize < 2**32, reason="test array too large for 32bit platform") + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_identityless_reduction_huge_array(self): # Regression test for gh-20921 (copying identity incorrectly failed) arr = np.zeros((2, 2**31), 'uint8') @@ -3247,6 +3248,7 @@ def test_resolve_dtypes_reduction_errors(self): @pytest.mark.skipif(not hasattr(ct, "pythonapi"), reason="`ctypes.pythonapi` required for capsule unpacking.") + @pytest.mark.thread_unsafe(reason="modifies global object in the ctypes API") def test_loop_access(self): # This is a basic test for the full strided loop access data_t = ct.c_char_p * 2 diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 572302541e47..cd0ec7d04f69 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4046,6 +4046,7 @@ def test_array_ufunc_direct_call(self): res = a.__array_ufunc__(np.add, "__call__", a, a) assert_array_equal(res, a + a) + @pytest.mark.thread_unsafe(reason="modifies global module") def test_ufunc_docstring(self): original_doc = np.add.__doc__ new_doc = "new docs" diff --git a/numpy/conftest.py b/numpy/conftest.py index 52f2a75b9df0..c158a2251914 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -6,6 +6,7 @@ import tempfile import warnings from contextlib import contextmanager +from pathlib import Path import hypothesis import pytest @@ -20,6 +21,11 @@ except ModuleNotFoundError: HAVE_SCPDT = False +try: + import pytest_run_parallel # noqa: F401 + PARALLEL_RUN_AVALIABLE = True +except ModuleNotFoundError: + PARALLEL_RUN_AVALIABLE = False _old_fpu_mode = None _collect_results = {} @@ -62,6 +68,17 @@ def pytest_configure(config): "slow: Tests that are very slow.") config.addinivalue_line("markers", "slow_pypy: Tests that are very slow on pypy.") + if not PARALLEL_RUN_AVALIABLE: + config.addinivalue_line("markers", + "parallel_threads(n): run the given test function in parallel " + "using `n` threads.", + ) + config.addinivalue_line("markers", + "iterations(n): run the given test function `n` times in each thread", + ) + config.addinivalue_line("markers", + "thread_unsafe: mark the test function as single-threaded", + ) def pytest_addoption(parser): @@ -224,3 +241,9 @@ def warnings_errors_and_rng(test=None): 'numpy/random/_examples', 'numpy/f2py/_backends/_distutils.py', ] + +def pytest_collection_modifyitems(config, items): + for item in items: + if Path(item.fspath).parent == Path(__file__).parent / 'f2py' / 'tests': + item.add_marker(pytest.mark.thread_unsafe( + reason="f2py tests are thread-unsafe")) diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py index 2dd19410bbf0..4c866511a5de 100644 --- a/numpy/lib/tests/test__datasource.py +++ b/numpy/lib/tests/test__datasource.py @@ -88,6 +88,7 @@ def invalid_httpfile(): return http_fakefile +@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestDataSourceOpen: def test_ValidHTTP(self, tmp_path): ds = datasource.DataSource(tmp_path) @@ -156,6 +157,7 @@ def test_ValidBz2File(self, tmp_path): assert_equal(magic_line, result) +@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestDataSourceExists: def test_ValidHTTP(self, tmp_path): ds = datasource.DataSource(tmp_path) @@ -182,6 +184,7 @@ def test_InvalidFile(self, tmp_path): assert_equal(ds.exists(tmpfile), False) +@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestDataSourceAbspath: def test_ValidHTTP(self, tmp_path): ds = datasource.DataSource(tmp_path) @@ -244,6 +247,7 @@ def test_windows_os_sep(self, tmp_path): os.sep = orig_os_sep +@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestRepositoryAbspath: def test_ValidHTTP(self, tmp_path): repos = datasource.Repository(valid_baseurl(), tmp_path) @@ -271,6 +275,7 @@ def test_windows_os_sep(self, tmp_path): os.sep = orig_os_sep +@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestRepositoryExists: def test_ValidFile(self, tmp_path): # Create local temp file @@ -300,6 +305,7 @@ def test_CachedHTTPFile(self, tmp_path): assert_(repos.exists(tmpfile)) +@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestOpenFunc: def test_DataSourceOpen(self, tmp_path): local_file = valid_textfile(tmp_path) diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py index 548a3db2dc07..2555c4b86f6c 100644 --- a/numpy/lib/tests/test__iotools.py +++ b/numpy/lib/tests/test__iotools.py @@ -1,6 +1,8 @@ import time from datetime import date +import pytest + import numpy as np from numpy.lib._iotools import ( LineSplitter, @@ -200,6 +202,7 @@ def test_missing(self): except ValueError: pass + @pytest.mark.thread_unsafe(reason="monkeypatches StringConverter") def test_upgrademapper(self): "Tests updatemapper" dateparser = _bytes_to_date diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index a6de6238d269..c70e3d5ebd43 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -957,6 +957,7 @@ def test_large_file_support(tmpdir): @pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") @pytest.mark.slow @requires_memory(free_bytes=2 * 2**30) +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_archive(tmpdir): # Regression test for product of saving arrays with dimensions of array # having a product that doesn't fit in int32. See gh-7598 for details. diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 6ca7892c6cbe..e441d9a1dd4b 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2056,6 +2056,9 @@ def unbound(*args): ('bound', A.iters), ('unbound', 0), ]) + @pytest.mark.thread_unsafe( + reason="test result depends on the reference count of a global object" + ) def test_frompyfunc_leaks(self, name, incr): # exposed in gh-11867 as np.vectorized, but the problem stems from # frompyfunc. @@ -3961,6 +3964,10 @@ def test_quantile_monotonic(self, method): quantile = np.quantile([0., 1., 2., 3.], p0, method=method) assert_equal(np.sort(quantile), quantile) + @pytest.mark.thread_unsafe( + reason="gives unreliable results w/ hypothesis " + "(HypothesisWorks/hypothesis#4562)" + ) @hypothesis.given( arr=arrays(dtype=np.float64, shape=st.integers(min_value=3, max_value=1000), diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index b4b0db36ae2a..5b9c9ab42a5d 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -235,6 +235,7 @@ def test_load_non_npy(self): @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") @pytest.mark.slow + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_big_arrays(self): L = (1 << 31) + 100000 a = np.empty(L, dtype=np.uint8) @@ -630,6 +631,7 @@ def test_unicode_and_bytes_fmt(self, iotype): @pytest.mark.skipif(sys.platform == 'win32', reason="files>4GB may not work") @pytest.mark.slow @requires_memory(free_bytes=7e9) + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_zip(self): def check_large_zip(memoryerror_raised): memoryerror_raised.value = False @@ -2806,6 +2808,7 @@ def test_npzfile_dict(): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="garbage collector is global state") def test_load_refcount(): # Check that objects returned by np.load are directly freed based on # their refcount, rather than needing the gc to collect them. diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index ef11ecfdf518..6ef86bf84ee0 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -1422,6 +1422,7 @@ def test__replace_nan(): assert np.isnan(arr_nan[-1]) +@pytest.mark.thread_unsafe(reason="memmap is thread-unsafe (gh-29126)") def test_memmap_takes_fast_route(tmpdir): # We want memory mapped arrays to take the fast route through nanmax, # which avoids creating a mask by using fmax.reduce (see gh-28721). So we diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index 3991f92b16a3..b9cc266a9363 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -1,3 +1,5 @@ +import pytest + import numpy as np import numpy.ma as ma from numpy.lib.recfunctions import ( @@ -234,6 +236,7 @@ def test_repack_fields(self): dt = np.dtype((np.record, dt)) assert_(repack_fields(dt).type is np.record) + @pytest.mark.thread_unsafe(reason="memmap is thread-unsafe (gh-29126)") def test_structured_to_unstructured(self, tmp_path): a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) out = structured_to_unstructured(a) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index fbebd9fccc37..2da03158d317 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1046,6 +1046,7 @@ def test_mvoid_iter(self): # w/ mask assert_equal(list(a[1]), [masked, 4]) + @pytest.mark.thread_unsafe(reason="masked_print_option.set_display global state") def test_mvoid_print(self): # Test printing a mvoid mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) @@ -1063,6 +1064,7 @@ def test_mvoid_print(self): mx = array([(1,), (2,)], dtype=[('a', 'O')]) assert_equal(str(mx[0]), "(1,)") + @pytest.mark.thread_unsafe(reason="masked_print_option global state") def test_mvoid_multidim_print(self): # regression test for gh-6019 @@ -4331,6 +4333,7 @@ def test_axis_methods_nomask(self): assert_equal(a.max(-1), [3, 6]) assert_equal(a.max(1), [3, 6]) + @pytest.mark.thread_unsafe(reason="crashes with low memory") @requires_memory(free_bytes=2 * 10000 * 1000 * 2) def test_mean_overflow(self): # Test overflow in masked arrays diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py index 6ce03b36abde..99acf32adc49 100644 --- a/numpy/matrixlib/tests/test_matrix_linalg.py +++ b/numpy/matrixlib/tests/test_matrix_linalg.py @@ -1,4 +1,6 @@ """ Test functions for linalg module using the matrix class.""" +import pytest + import numpy as np from numpy.linalg.tests.test_linalg import ( CondCases, @@ -81,6 +83,9 @@ class TestDetMatrix(DetCases, MatrixTestCase): pass +@pytest.mark.thread_unsafe( + reason="residuals not calculated properly for square tests (gh-29851)" +) class TestLstsqMatrix(LstsqCases, MatrixTestCase): pass diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py index d3735e3b85f6..f7d0131c94a9 100644 --- a/numpy/polynomial/tests/test_printing.py +++ b/numpy/polynomial/tests/test_printing.py @@ -246,6 +246,7 @@ def test_linewidth_printoption(self, lw, tgt): assert_(len(line) < lw) +@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state") def test_set_default_printoptions(): p = poly.Polynomial([1, 2, 3]) c = poly.Chebyshev([1, 2, 3]) @@ -259,6 +260,7 @@ def test_set_default_printoptions(): poly.set_default_printstyle('invalid_input') +@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state") def test_complex_coefficients(): """Test both numpy and built-in complex.""" coefs = [0 + 1j, 1 + 1j, -2 + 2j, 3 + 0j] diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index 6f069e48879f..9916f8ad3440 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -572,6 +572,9 @@ def test_passthrough(self): assert rg2 is rg assert rg2.bit_generator is bg + @pytest.mark.thread_unsafe( + reason="np.random.set_bit_generator affects global state" + ) def test_coercion_RandomState_Generator(self): # use default_rng to coerce RandomState to Generator rs = RandomState(1234) diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 7a079d6362e8..a1e64ecbe343 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -57,6 +57,10 @@ @pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='Meson unable to find MSVC linker on win-arm64') @pytest.mark.slow +@pytest.mark.thread_unsafe( + reason="building cython code in a subprocess doesn't make sense to do in many " + "threads and sometimes crashes" +) def test_cython(tmp_path): import glob # build the examples in a temporary directory diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 51065f24868d..7d13c49149b3 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1266,6 +1266,7 @@ def test_dirichlet_small_alpha(self): assert_array_almost_equal(actual, expected, decimal=15) @pytest.mark.slow + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_dirichlet_moderately_small_alpha(self): # Use alpha.max() < 0.1 to trigger stick breaking code path alpha = np.array([0.02, 0.04, 0.03]) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index fd371218616c..63ffb5a86389 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -2034,6 +2034,7 @@ def test_randomstate_ctor_old_style_pickle(): assert_equal(state_a['gauss'], state_b['gauss']) +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_hot_swap(restore_singleton_bitgen): # GH 21808 def_bg = np.random.default_rng(0) @@ -2045,6 +2046,7 @@ def test_hot_swap(restore_singleton_bitgen): assert bg is second_bg +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_seed_alt_bit_gen(restore_singleton_bitgen): # GH 21808 bg = PCG64(0) @@ -2059,6 +2061,7 @@ def test_seed_alt_bit_gen(restore_singleton_bitgen): assert state["state"]["inc"] != new_state["state"]["inc"] +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_state_error_alt_bit_gen(restore_singleton_bitgen): # GH 21808 state = np.random.get_state() @@ -2068,6 +2071,7 @@ def test_state_error_alt_bit_gen(restore_singleton_bitgen): np.random.set_state(state) +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_swap_worked(restore_singleton_bitgen): # GH 21808 np.random.seed(98765) @@ -2086,6 +2090,7 @@ def test_swap_worked(restore_singleton_bitgen): assert new_state["state"]["inc"] == new_state["state"]["inc"] +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_swapped_singleton_against_direct(restore_singleton_bitgen): np.random.set_bit_generator(PCG64(98765)) singleton_vals = np.random.randint(0, 2 ** 30, 10) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 815f7ac2930a..6d43343ef98a 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -1146,6 +1146,7 @@ def test_strict(self): @pytest.mark.filterwarnings( "ignore:.*NumPy warning suppression and assertion utilities are deprecated" ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") class TestWarns: def test_warn(self): @@ -1760,6 +1761,7 @@ def _get_fresh_mod(): return my_mod +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_clear_and_catch_warnings(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1795,6 +1797,7 @@ def test_clear_and_catch_warnings(): @pytest.mark.filterwarnings( "ignore:.*NumPy warning suppression and assertion utilities are deprecated" ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_suppress_warnings_module(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1844,6 +1847,7 @@ def warn(arr): @pytest.mark.filterwarnings( "ignore:.*NumPy warning suppression and assertion utilities are deprecated" ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_suppress_warnings_type(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1875,6 +1879,9 @@ def test_suppress_warnings_type(): @pytest.mark.filterwarnings( "ignore:.*NumPy warning suppression and assertion utilities are deprecated" ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_decorate_no_record(): sup = suppress_warnings() sup.filter(UserWarning) @@ -1893,6 +1900,9 @@ def warn(category): @pytest.mark.filterwarnings( "ignore:.*NumPy warning suppression and assertion utilities are deprecated" ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_record(): sup = suppress_warnings() log1 = sup.record() @@ -1937,6 +1947,9 @@ def test_suppress_warnings_record(): @pytest.mark.filterwarnings( "ignore:.*NumPy warning suppression and assertion utilities are deprecated" ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_forwarding(): def warn_other_module(): # Apply along axis is implemented in python; stacklevel=2 means @@ -2026,6 +2039,7 @@ class my_cacw(clear_and_catch_warnings): class_modules = (sys.modules[__name__],) +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_clear_and_catch_warnings_inherit(): # Test can subclass and add default modules my_mod = _get_fresh_mod() @@ -2036,6 +2050,7 @@ def test_clear_and_catch_warnings_inherit(): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="garbage collector is global state") class TestAssertNoGcCycles: """ Test assert_no_gc_cycles """ diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index 68d31416040b..b88910ce457e 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -124,6 +124,7 @@ def test_flags(self): assert_(p.from_param(x)) assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) + @pytest.mark.thread_unsafe(reason="checks that global ndpointer cache is updating") def test_cache(self): assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64)) @@ -178,6 +179,7 @@ def test_return(self, dt): arr.__array_interface__['data'] ) + @pytest.mark.thread_unsafe(reason="mutates global test vars") def test_vague_return_value(self): """ Test that vague ndpointer return values do not promote to arrays """ arr = np.zeros((2, 3)) @@ -252,6 +254,7 @@ def check(x): check(as_array(pointer(c_array[0]), shape=(2,))) check(as_array(pointer(c_array[0][0]), shape=(2, 3))) + @pytest.mark.thread_unsafe(reason="garbage collector is global state") def test_reference_cycles(self): # related to gh-6511 import ctypes @@ -302,6 +305,7 @@ def test_scalar(self): ct = np.ctypeslib.as_ctypes_type(dt) assert_equal(ct, ctypes.c_uint16) + @pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)") def test_subarray(self): dt = np.dtype((np.int32, (2, 3))) ct = np.ctypeslib.as_ctypes_type(dt) @@ -321,6 +325,7 @@ def test_structure(self): ('b', ctypes.c_uint32), ]) + @pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)") def test_structure_aligned(self): dt = np.dtype([ ('a', np.uint16), @@ -351,6 +356,7 @@ def test_union(self): ('b', ctypes.c_uint32), ]) + @pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)") def test_padded_union(self): dt = np.dtype({ 'names': ['a', 'b'], diff --git a/numpy/tests/test_numpy_config.py b/numpy/tests/test_numpy_config.py index f01a279574a5..9219379b2552 100644 --- a/numpy/tests/test_numpy_config.py +++ b/numpy/tests/test_numpy_config.py @@ -21,6 +21,7 @@ class TestNumPyConfigs: ] @patch("numpy.__config__._check_pyyaml") + @pytest.mark.thread_unsafe(reason="unittest.mock.patch updates global state") def test_pyyaml_not_found(self, mock_yaml_importer): mock_yaml_importer.side_effect = ModuleNotFoundError() with pytest.warns(UserWarning): diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index b70a715237a5..aa87ae104318 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -10,6 +10,7 @@ from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises +@pytest.mark.thread_unsafe(reason="reloads global module") def test_numpy_reloading(): # gh-7844. Also check that relevant globals retain their identity. import numpy as np diff --git a/numpy/typing/tests/test_isfile.py b/numpy/typing/tests/test_isfile.py index f72122f208c9..0e3157a1e54d 100644 --- a/numpy/typing/tests/test_isfile.py +++ b/numpy/typing/tests/test_isfile.py @@ -2,6 +2,8 @@ import sys from pathlib import Path +import pytest + import numpy as np from numpy.testing import assert_ @@ -25,6 +27,10 @@ FILES += [ROOT / "distutils" / "__init__.pyi"] +@pytest.mark.thread_unsafe( + reason="os.path has a thread-safety bug (python/cpython#140054). " + "Expected to only be a problem in 3.14.0" +) class TestIsFile: def test_isfile(self): """Test if all ``.pyi`` files are properly installed.""" From e4d64414394cbb2ae62e0f61c3230ba88f537090 Mon Sep 17 00:00:00 2001 From: abhi210 <27881020+Abhi210@users.noreply.github.com> Date: Fri, 17 Oct 2025 00:04:29 +0530 Subject: [PATCH 0664/1018] MAINT: remove deprecated `trapz` function, `disp` method and `bias/ddof` args in `corrcoef` This commit removes long-deprecated code paths in `numpy/lib/_function_base_impl`: - Removed `numpy.trapz`, deprecated in NumPy 2.0 (2023-08-18) - Removed `disp` method - Removed `bias` and `ddof` arguments from `numpy.corrcoef` Removed associated tests from `numpy/lib/tests/test_function_base.py` and `numpy/_core/test_deprecations.py` Associated tests and type stubs were also updated. All tests and Ruff checks pass locally. --- numpy/__init__.py | 1 - numpy/__init__.pyi | 3 +- numpy/_core/tests/test_deprecations.py | 5 +- numpy/_expired_attrs_2_0.py | 1 - numpy/_expired_attrs_2_0.pyi | 1 - numpy/lib/_function_base_impl.py | 88 +------------------------- numpy/lib/_function_base_impl.pyi | 7 +- numpy/lib/tests/test_function_base.py | 26 -------- numpy/matlib.pyi | 1 - 9 files changed, 4 insertions(+), 129 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index 4e3119238c0a..a0178b211258 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -505,7 +505,6 @@ sinc, sort_complex, trapezoid, - trapz, trim_zeros, unwrap, vectorize, diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 47e3236be751..a272062a6cd3 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -460,7 +460,6 @@ from numpy.lib._function_base_impl import ( # type: ignore[deprecated] blackman, kaiser, trapezoid, - trapz, i0, meshgrid, delete, @@ -686,7 +685,7 @@ __all__ = [ # noqa: RUF022 "gradient", "angle", "unwrap", "sort_complex", "flip", "rot90", "extract", "place", "vectorize", "asarray_chkfinite", "average", "bincount", "digitize", "cov", "corrcoef", "median", "sinc", "hamming", "hanning", "bartlett", "blackman", - "kaiser", "trapezoid", "trapz", "i0", "meshgrid", "delete", "insert", "append", + "kaiser", "trapezoid", "i0", "meshgrid", "delete", "insert", "append", "interp", "quantile", # lib._twodim_base_impl.__all__ "diag", "diagflat", "eye", "fliplr", "flipud", "tri", "triu", "tril", "vander", diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index ca4463ece1f7..2927935cb90f 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -280,9 +280,8 @@ def test_deprecated_np_lib_math(self): class TestLibImports(_DeprecationTestCase): # Deprecated in Numpy 1.26.0, 2023-09 def test_lib_functions_deprecation_call(self): - from numpy import row_stack, trapz + from numpy import row_stack from numpy._core.numerictypes import maximum_sctype - from numpy.lib._function_base_impl import disp from numpy.lib._npyio_impl import recfromcsv, recfromtxt from numpy.lib._shape_base_impl import get_array_wrap from numpy.lib._utils_impl import safe_eval @@ -295,12 +294,10 @@ def test_lib_functions_deprecation_call(self): self.assert_deprecated(lambda: recfromcsv(data_gen())) self.assert_deprecated(lambda: recfromtxt(data_gen(), **kwargs)) - self.assert_deprecated(lambda: disp("test")) self.assert_deprecated(get_array_wrap) self.assert_deprecated(lambda: maximum_sctype(int)) self.assert_deprecated(lambda: row_stack([[]])) - self.assert_deprecated(lambda: trapz([1], [1])) self.assert_deprecated(lambda: np.chararray) diff --git a/numpy/_expired_attrs_2_0.py b/numpy/_expired_attrs_2_0.py index 1397134e3f8c..2eebf95bc558 100644 --- a/numpy/_expired_attrs_2_0.py +++ b/numpy/_expired_attrs_2_0.py @@ -61,7 +61,6 @@ "or use `typing.deprecated`.", "deprecate_with_doc": "Emit `DeprecationWarning` with `warnings.warn` " "directly, or use `typing.deprecated`.", - "disp": "Use your own printing function instead.", "find_common_type": "Use `numpy.promote_types` or `numpy.result_type` instead. " "To achieve semantics for the `scalar_types` argument, use " diff --git a/numpy/_expired_attrs_2_0.pyi b/numpy/_expired_attrs_2_0.pyi index 14524689c1c5..de6c2d10f9b0 100644 --- a/numpy/_expired_attrs_2_0.pyi +++ b/numpy/_expired_attrs_2_0.pyi @@ -47,7 +47,6 @@ class _ExpiredAttributesType(TypedDict): recfromtxt: str deprecate: str deprecate_with_doc: str - disp: str find_common_type: str round_: str get_array_wrap: str diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index bdc32574e8d1..32774212befa 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2,7 +2,6 @@ import collections.abc import functools import re -import sys import warnings import numpy as np @@ -68,7 +67,7 @@ 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 'bincount', 'digitize', 'cov', 'corrcoef', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', - 'blackman', 'kaiser', 'trapezoid', 'trapz', 'i0', + 'blackman', 'kaiser', 'trapezoid', 'i0', 'meshgrid', 'delete', 'insert', 'append', 'interp', 'quantile' ] @@ -2119,62 +2118,6 @@ def place(arr, mask, vals): return _place(arr, mask, vals) -def disp(mesg, device=None, linefeed=True): - """ - Display a message on a device. - - .. deprecated:: 2.0 - Use your own printing function instead. - - Parameters - ---------- - mesg : str - Message to display. - device : object - Device to write message. If None, defaults to ``sys.stdout`` which is - very similar to ``print``. `device` needs to have ``write()`` and - ``flush()`` methods. - linefeed : bool, optional - Option whether to print a line feed or not. Defaults to True. - - Raises - ------ - AttributeError - If `device` does not have a ``write()`` or ``flush()`` method. - - Examples - -------- - >>> import numpy as np - - Besides ``sys.stdout``, a file-like object can also be used as it has - both required methods: - - >>> from io import StringIO - >>> buf = StringIO() - >>> np.disp('"Display" in a file', device=buf) - >>> buf.getvalue() - '"Display" in a file\\n' - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`disp` is deprecated, " - "use your own printing function instead. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - if device is None: - device = sys.stdout - if linefeed: - device.write(f'{mesg}\n') - else: - device.write(f'{mesg}') - device.flush() - - # See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html _DIMENSION_NAME = r'\w+' _CORE_DIMENSION_LIST = f'(?:{_DIMENSION_NAME}(?:,{_DIMENSION_NAME})*)?' @@ -2959,14 +2902,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. - bias : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 - ddof : _NoValue, optional - Has no effect, do not use. - .. deprecated:: 1.10.0 dtype : data-type, optional Data-type of the result. By default, the return data-type will have at least `numpy.float64` precision. @@ -3061,10 +2997,6 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, 1. ]]) """ - if bias is not np._NoValue or ddof is not np._NoValue: - # 2015-03-15, 1.10 - warnings.warn('bias and ddof have no effect and are deprecated', - DeprecationWarning, stacklevel=2) c = cov(x, y, rowvar, dtype=dtype) try: d = diag(c) @@ -5080,24 +5012,6 @@ def trapezoid(y, x=None, dx=1.0, axis=-1): return ret -@set_module('numpy') -def trapz(y, x=None, dx=1.0, axis=-1): - """ - `trapz` is deprecated in NumPy 2.0. - - Please use `trapezoid` instead, or one of the numerical integration - functions in `scipy.integrate`. - """ - # Deprecated in NumPy 2.0, 2023-08-18 - warnings.warn( - "`trapz` is deprecated. Use `trapezoid` instead, or one of the " - "numerical integration functions in `scipy.integrate`.", - DeprecationWarning, - stacklevel=2 - ) - return trapezoid(y, x=x, dx=dx, axis=axis) - - def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None): return xi diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 47dd0749f07f..e99be06469f3 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -14,7 +14,7 @@ from typing import ( overload, type_check_only, ) -from typing_extensions import TypeIs, deprecated +from typing_extensions import TypeIs import numpy as np from numpy import ( @@ -87,7 +87,6 @@ __all__ = [ "blackman", "kaiser", "trapezoid", - "trapz", "i0", "meshgrid", "delete", @@ -990,10 +989,6 @@ def trapezoid( floating | complexfloating | timedelta64 | NDArray[floating | complexfloating | timedelta64 | object_] ): ... - -@deprecated("Use 'trapezoid' instead") -def trapz(y: ArrayLike, x: ArrayLike | None = None, dx: float = 1.0, axis: int = -1) -> generic | NDArray[generic]: ... - @overload def meshgrid( *, diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 6ca7892c6cbe..7af2f9dc15bd 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -565,10 +565,6 @@ def test_return_dtype(self): m = np.isnan(d) assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0]) - def test_deprecated_empty(self): - assert_raises(ValueError, select, [], [], 3j) - assert_raises(ValueError, select, [], []) - def test_non_bool_deprecation(self): choices = self.choices conditions = self.conditions[:] @@ -2476,28 +2472,6 @@ def test_simple(self): assert_almost_equal(tgt2, self.res2) assert_(np.all(np.abs(tgt2) <= 1.0)) - def test_ddof(self): - # ddof raises DeprecationWarning - with warnings.catch_warnings(): - warnings.simplefilter("always") - pytest.warns(DeprecationWarning, corrcoef, self.A, ddof=-1) - warnings.simplefilter('ignore', DeprecationWarning) - # ddof has no or negligible effect on the function - assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) - assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) - assert_almost_equal(corrcoef(self.A, ddof=3), self.res1) - assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2) - - def test_bias(self): - # bias raises DeprecationWarning - with warnings.catch_warnings(): - warnings.simplefilter("always") - pytest.warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) - pytest.warns(DeprecationWarning, corrcoef, self.A, bias=0) - warnings.simplefilter('ignore', DeprecationWarning) - # bias has no or negligible effect on the function - assert_almost_equal(corrcoef(self.A, bias=1), self.res1) - def test_complex(self): x = np.array([[1, 2, 3], [1j, 2j, 3j]]) res = corrcoef(x) diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index 0a4f649a7d9c..d653a5a6cc98 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -450,7 +450,6 @@ from numpy import ( # noqa: F401 trace, transpose, trapezoid, - trapz, tri, tril, tril_indices, From 1b89f6bfcb3dcddb13ad02b2075f1b8c22422599 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 23:29:49 +0200 Subject: [PATCH 0665/1018] TYP: fix ``char.startswith`` signature --- numpy/_core/defchararray.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 815d1397b39c..53ff96f450f5 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1036,7 +1036,7 @@ def startswith( @overload def startswith( a: T_co, - suffix: T_co, + prefix: T_co, start: i_co = 0, end: i_co | None = None, ) -> NDArray[np.bool]: ... From fe051faefefbb519e31fef7943a63b1abef5e836 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 16 Oct 2025 16:11:55 -0600 Subject: [PATCH 0666/1018] MAINT: Update write_release.py (#29998) Remove hash generation and only translate the release notes from rst to markdown. [skip azp] [skip cirrus] [skip actions] --- tools/write_release.py | 68 ++++++------------------------------------ 1 file changed, 9 insertions(+), 59 deletions(-) diff --git a/tools/write_release.py b/tools/write_release.py index 7662eb7b1288..cdb5235f0bd0 100644 --- a/tools/write_release.py +++ b/tools/write_release.py @@ -18,8 +18,6 @@ import argparse import os import subprocess -import textwrap -from hashlib import md5, sha256 from pathlib import Path # Name of the notes directory @@ -29,38 +27,12 @@ # Output base name, `.rst` or `.md` will be appended OUTPUT_FILE = "README" -def compute_hash(wheel_dir, hash_func): - """ - Compute hashes of files in wheel_dir. - - Parameters - ---------- - wheel_dir: str - Path to wheel directory from repo root. - hash_func: function - Hash function, i.e., md5, sha256, etc. - - Returns - ------- - list_of_strings: list - List of of strings. Each string is the hash - followed by the file basename. - - """ - released = os.listdir(wheel_dir) - checksums = [] - for fn in sorted(released): - fn_path = Path(f"{wheel_dir}/{fn}") - m = hash_func(fn_path.read_bytes()) - checksums.append(f"{m.hexdigest()} {fn}") - return checksums - - def write_release(version): """ - Copy the -notes.rst file to the OUTPUT_DIR, append - the md5 and sha256 hashes of the wheels and sdist, and produce - README.rst and README.md files. + Copy the -notes.rst file to the OUTPUT_DIR and use + pandoc to translate it to markdown. That results in both + README.rst and README.md files that can be used for on + github for the release. Parameters ---------- @@ -73,35 +45,13 @@ def write_release(version): """ notes = Path(NOTES_DIR) / f"{version}-notes.rst" - wheel_dir = Path(OUTPUT_DIR) / "installers" - target_md = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.md" - target_rst = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.rst" - - os.system(f"cp {notes} {target_rst}") - - with open(str(target_rst), 'a') as f: - f.writelines(textwrap.dedent( - """ - Checksums - ========= - - MD5 - --- - :: - - """)) - f.writelines([f' {c}\n' for c in compute_hash(wheel_dir, md5)]) - - f.writelines(textwrap.dedent( - """ - SHA256 - ------ - :: - - """)) - f.writelines([f' {c}\n' for c in compute_hash(wheel_dir, sha256)]) + outdir = Path(OUTPUT_DIR) + outdir.mkdir(exist_ok=True) + target_md = outdir / f"{OUTPUT_FILE}.md" + target_rst = outdir / f"{OUTPUT_FILE}.rst" # translate README.rst to md for posting on GitHub + os.system(f"cp {notes} {target_rst}") subprocess.run( ["pandoc", "-s", "-o", str(target_md), str(target_rst), "--wrap=preserve"], check=True, From 97e21930e923fa40d6936dc4b2dc82607690fd2b Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 06:38:51 +0200 Subject: [PATCH 0667/1018] TYP: fix ``ndarray.sort(stable=True)`` --- numpy/__init__.pyi | 4 ++-- numpy/typing/tests/data/pass/ndarray_misc.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 47e3236be751..866e3ba9b92a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1721,7 +1721,7 @@ class _ArrayOrScalarCommon: kind: _SortKind | None = ..., order: str | Sequence[str] | None = ..., *, - stable: bool | None = ..., + stable: builtins.bool | None = ..., ) -> NDArray[intp]: ... @overload # axis=None (default), out=None (default), keepdims=False (default) @@ -2382,7 +2382,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): kind: _SortKind | None = ..., order: str | Sequence[str] | None = ..., *, - stable: bool | None = ..., + stable: builtins.bool | None = ..., ) -> None: ... # Keep in sync with `MaskedArray.trace` diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index a17082aeba15..40c84d8641bd 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -53,7 +53,12 @@ class IntSubClass(npt.NDArray[np.intp]): ... A.argmin(out=B_int0) i4.argsort() +i4.argsort(stable=True) A.argsort() +A.argsort(stable=True) + +A.sort() +A.sort(stable=True) i4.choose([()]) _choices = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=np.int32) From c8e6395f0b9de0f5355705ec5d8a9327d8e282d6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 07:19:35 +0200 Subject: [PATCH 0668/1018] TYP: inconsistent ``strings.slice`` default argument for ``stop`` --- numpy/_core/strings.pyi | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 2f630ce8556a..270760412670 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -1,6 +1,7 @@ from typing import TypeAlias, overload import numpy as np +from numpy._globals import _NoValueType from numpy._typing import ( NDArray, _AnyShape, @@ -502,14 +503,34 @@ def translate( # @overload -def slice(a: U_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.str_]: ... # type: ignore[overload-overlap] +def slice( + a: U_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> NDArray[np.str_]: ... @overload -def slice(a: S_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.bytes_]: ... +def slice( + a: S_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> NDArray[np.bytes_]: ... @overload def slice( - a: _StringDTypeSupportsArray, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / + a: _StringDTypeSupportsArray, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, ) -> _StringDTypeArray: ... @overload def slice( - a: T_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / + a: T_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, ) -> _StringDTypeOrUnicodeArray: ... From 37ecd83af0be90e865ac2c73643b3e79d8ea6cde Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 07:37:28 +0200 Subject: [PATCH 0669/1018] TYP: remove implicit re-export in ``_core._exceptions`` Because it's not there at runtime either. --- numpy/_core/_exceptions.pyi | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/_core/_exceptions.pyi b/numpy/_core/_exceptions.pyi index 02637a17b6a8..b340fde3e463 100644 --- a/numpy/_core/_exceptions.pyi +++ b/numpy/_core/_exceptions.pyi @@ -3,7 +3,6 @@ from typing import Any, Final, TypeVar, overload import numpy as np from numpy import _CastingKind -from numpy._utils import set_module as set_module ### From 2b5ad5b330aed167399cd9403f783fe089bc04a1 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 08:11:05 +0200 Subject: [PATCH 0670/1018] TYP: inconsistent ``lib._iotools.NameValidator.deletechars`` annotation --- numpy/lib/_iotools.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi index 82275940e137..4c716ff68f87 100644 --- a/numpy/lib/_iotools.pyi +++ b/numpy/lib/_iotools.pyi @@ -49,8 +49,8 @@ class LineSplitter: def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ... class NameValidator: - defaultexcludelist: ClassVar[Sequence[str]] - defaultdeletechars: ClassVar[Sequence[str]] + defaultexcludelist: ClassVar[Sequence[str]] = ... + defaultdeletechars: ClassVar[frozenset[str]] = ... excludelist: list[str] deletechars: set[str] case_converter: Callable[[str], str] From b381cf60f36415ab18435b88dd521d1b17fcf219 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 08:26:28 +0200 Subject: [PATCH 0671/1018] TYP: inconsistent ``lib._iotools._decode_line`` function signature --- numpy/lib/_iotools.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi index 4c716ff68f87..3bf41fc0cdde 100644 --- a/numpy/lib/_iotools.pyi +++ b/numpy/lib/_iotools.pyi @@ -99,7 +99,7 @@ class StringConverter: @classmethod def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... -def _decode_line(line: str | bytes, encoding: str) -> str: ... +def _decode_line(line: str | bytes, encoding: str | None = None) -> str: ... def _is_string_like(obj: object) -> bool: ... def _is_bytes_like(obj: object) -> bool: ... def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... From 9fc90137fdbab27f89124d0217e6432a1b619002 Mon Sep 17 00:00:00 2001 From: abhi210 <27881020+Abhi210@users.noreply.github.com> Date: Fri, 17 Oct 2025 12:06:59 +0530 Subject: [PATCH 0672/1018] DOC: add release note for removal of deprecated trapz, disp, and corrcoef args --- doc/release/upcoming_changes/29997.expired.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 doc/release/upcoming_changes/29997.expired.rst diff --git a/doc/release/upcoming_changes/29997.expired.rst b/doc/release/upcoming_changes/29997.expired.rst new file mode 100644 index 000000000000..c109012836cc --- /dev/null +++ b/doc/release/upcoming_changes/29997.expired.rst @@ -0,0 +1,12 @@ +Removal of deprecated `trapz` function, `disp` method, and `bias`/`ddof` args in `corrcoef` +------------------------------------------------------------------------------------------- + +The following long-deprecated APIs have been removed: + +* ``numpy.trapz`` — deprecated since NumPy 2.0 (2023-08-18). Use ``numpy.trapezoid`` or + ``scipy.integrate`` functions instead. +* ``disp`` method — deprecated in earlier releases and no longer functional. Use your own printing function instead. +* ``bias`` and ``ddof`` arguments in ``numpy.corrcoef`` — these had no effect since NumPy 1.10. + +Associated tests and type stubs have been updated or removed accordingly. + From 6eb523a7dc07b13990c3f12e6a1c412f2d9f9a2f Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 08:58:15 +0200 Subject: [PATCH 0673/1018] TYP: inconsistent ``lib.npyio.NpzFile.get`` method signature --- numpy/lib/_npyio_impl.pyi | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 8a4dfa27ed9b..657cb14a49e0 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -43,6 +43,7 @@ __all__ = [ "unpackbits", ] +_T = TypeVar("_T") _T_co = TypeVar("_T_co", covariant=True) _ScalarT = TypeVar("_ScalarT", bound=np.generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) @@ -64,8 +65,8 @@ class BagObj(Generic[_T_co]): class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): _MAX_REPR_ARRAY_COUNT: ClassVar[int] = 5 - zip: zipfile.ZipFile - fid: IO[str] | None + zip: zipfile.ZipFile | None = None + fid: IO[str] | None = None files: list[str] allow_pickle: bool pickle_kwargs: Mapping[str, Any] | None @@ -91,6 +92,15 @@ class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): def __iter__(self) -> Iterator[str]: ... @override def __getitem__(self, key: str, /) -> NDArray[_ScalarT_co]: ... + + # + @override + @overload + def get(self, key: str, default: None = None, /) -> NDArray[_ScalarT_co] | None: ... + @overload + def get(self, key: str, default: NDArray[_ScalarT_co] | _T, /) -> NDArray[_ScalarT_co] | _T: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # def close(self) -> None: ... # NOTE: Returns a `NpzFile` if file is a zip file; From 499530ff675c05fb994ea2ce2571df3cc0a3f6dd Mon Sep 17 00:00:00 2001 From: abhi210 <27881020+Abhi210@users.noreply.github.com> Date: Fri, 17 Oct 2025 12:28:21 +0530 Subject: [PATCH 0674/1018] MAINT: remove redundant deprecated `bias` and `ddof` handling in `corrcoef` Removes leftover deprecation logic for the `bias` and `ddof` arguments in `numpy.corrcoef` from `extra.py`. These arguments were already deprecated and removed from `_function_base_impl`. No functional changes beyond removing redundant code. All tests and Ruff checks pass locally. --- numpy/ma/extras.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index b7405d3809f8..9fb3dd378bbf 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1751,19 +1751,6 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. - bias : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 - allow_masked : bool, optional - If True, masked values are propagated pair-wise: if a value is masked - in `x`, the corresponding value is masked in `y`. - If False, raises an exception. Because `bias` is deprecated, this - argument needs to be treated as keyword only to avoid a warning. - ddof : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 See Also -------- @@ -1791,10 +1778,6 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, dtype=float64) """ - msg = 'bias and ddof have no effect and are deprecated' - if bias is not np._NoValue or ddof is not np._NoValue: - # 2015-03-15, 1.10 - warnings.warn(msg, DeprecationWarning, stacklevel=2) # Estimate the covariance matrix. corr = cov(x, y, rowvar, allow_masked=allow_masked) # The non-masked version returns a masked value for a scalar. From a6ab3ab5de35981e573c1ab5edced62779e34556 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 09:13:57 +0200 Subject: [PATCH 0675/1018] TYP: mark ``lib.mixins.NDArrayOperatorsMixin.__array_ufunc__`` as ``@type_check_only`` --- numpy/lib/mixins.pyi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi index 1572f6bee289..e508a5cfd4bb 100644 --- a/numpy/lib/mixins.pyi +++ b/numpy/lib/mixins.pyi @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from typing import Any, Literal as L +from typing import Any, Literal as L, type_check_only from numpy import ufunc @@ -15,11 +15,13 @@ __all__ = ["NDArrayOperatorsMixin"] class NDArrayOperatorsMixin(ABC): __slots__ = () + @type_check_only @abstractmethod def __array_ufunc__( self, ufunc: ufunc, method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + /, *inputs: Any, **kwargs: Any, ) -> Any: ... From 28352b0ef213d88791d4916658ce80884b8a90cf Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 09:17:30 +0200 Subject: [PATCH 0676/1018] TYP: add missing overload with default to `lib.recfunctions.unstructured_to_structured` --- numpy/lib/recfunctions.pyi | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/lib/recfunctions.pyi b/numpy/lib/recfunctions.pyi index a0b49ba1df00..33713cf16331 100644 --- a/numpy/lib/recfunctions.pyi +++ b/numpy/lib/recfunctions.pyi @@ -268,6 +268,16 @@ def unstructured_to_structured( copy: bool = False, casting: str = "unsafe", ) -> npt.NDArray[np.void]: ... +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: None = None, + *, + names: _OneOrMany[str], + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... # def apply_along_fields( From aabd7adffe41f91e320d883a058cacf56edb6804 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 09:31:46 +0200 Subject: [PATCH 0677/1018] TYP: update ``lib.npyio.NpzFile.zip`` test --- numpy/typing/tests/data/reveal/npyio.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/reveal/npyio.pyi b/numpy/typing/tests/data/reveal/npyio.pyi index 40da72c8544e..e3eaa45a5fa1 100644 --- a/numpy/typing/tests/data/reveal/npyio.pyi +++ b/numpy/typing/tests/data/reveal/npyio.pyi @@ -28,7 +28,7 @@ class BytesReader: bytes_writer: BytesWriter bytes_reader: BytesReader -assert_type(npz_file.zip, zipfile.ZipFile) +assert_type(npz_file.zip, zipfile.ZipFile | None) assert_type(npz_file.fid, IO[str] | None) assert_type(npz_file.files, list[str]) assert_type(npz_file.allow_pickle, bool) From 7065a9d14065265aa1f0fd690fdf930a01b9694d Mon Sep 17 00:00:00 2001 From: abhi210 <27881020+Abhi210@users.noreply.github.com> Date: Fri, 17 Oct 2025 13:37:04 +0530 Subject: [PATCH 0678/1018] This commit removes remaining deprecated `bias` and `ddof` arguments from `numpy.ma.extras` and updates the related tests. In addition: - Removed redundant declarations of `bias` and `ddof` in `extras.py` (deprecated since NumPy 2.0) - Removed associated tests from `test_extras.py` and `test_regression.py` - Amended `numpy/lib/_function_base_impl.py` to fully remove these arguments from function signatures and ensure consistency with documentation - Documented the above changes in the release notes All tests and Ruff checks pass locally. --- .../upcoming_changes/29997.expired.rst | 5 +- numpy/lib/_function_base_impl.py | 4 +- numpy/ma/extras.py | 9 ++- numpy/ma/tests/test_extras.py | 71 ------------------- numpy/ma/tests/test_regression.py | 17 +---- 5 files changed, 12 insertions(+), 94 deletions(-) diff --git a/doc/release/upcoming_changes/29997.expired.rst b/doc/release/upcoming_changes/29997.expired.rst index c109012836cc..92c052015b7e 100644 --- a/doc/release/upcoming_changes/29997.expired.rst +++ b/doc/release/upcoming_changes/29997.expired.rst @@ -1,12 +1,11 @@ -Removal of deprecated `trapz` function, `disp` method, and `bias`/`ddof` args in `corrcoef` +Removal of deprecated `trapz` function, `disp` function, and `bias`/`ddof` args in `corrcoef` ------------------------------------------------------------------------------------------- The following long-deprecated APIs have been removed: * ``numpy.trapz`` — deprecated since NumPy 2.0 (2023-08-18). Use ``numpy.trapezoid`` or ``scipy.integrate`` functions instead. -* ``disp`` method — deprecated in earlier releases and no longer functional. Use your own printing function instead. +* ``disp`` function — deprecated from 2.0 release and no longer functional. Use your own printing function instead. * ``bias`` and ``ddof`` arguments in ``numpy.corrcoef`` — these had no effect since NumPy 1.10. -Associated tests and type stubs have been updated or removed accordingly. diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 32774212befa..bccbdcc87d85 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2869,13 +2869,13 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, return c.squeeze() -def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *, +def _corrcoef_dispatcher(x, y=None, rowvar=None, *, dtype=None): return (x, y) @array_function_dispatch(_corrcoef_dispatcher) -def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, +def corrcoef(x, y=None, rowvar=True, *, dtype=None): """ Return Pearson product-moment correlation coefficients. diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 9fb3dd378bbf..7bd0f480f4f3 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1729,8 +1729,8 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): return result -def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, - ddof=np._NoValue): +def corrcoef(x, y=None, rowvar=True, allow_masked=True, + ): """ Return Pearson product-moment correlation coefficients. @@ -1751,6 +1751,11 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises an exception. Because `bias` is deprecated, this + argument needs to be treated as keyword only to avoid a warning. See Also -------- diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 230c9d211f19..07f0fbdd7f97 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -6,7 +6,6 @@ """ import itertools -import warnings import pytest @@ -1406,53 +1405,12 @@ def _create_data(self): data2 = array(np.random.rand(12)) return data, data2 - def test_ddof(self): - # ddof raises DeprecationWarning - x, y = self._create_data() - expected = np.corrcoef(x) - expected2 = np.corrcoef(x, y) - with pytest.warns(DeprecationWarning): - corrcoef(x, ddof=-1) - - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - - # ddof has no or negligible effect on the function - assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) - assert_almost_equal(corrcoef(x, ddof=-1), expected) - assert_almost_equal(corrcoef(x, y, ddof=-1), expected2) - assert_almost_equal(corrcoef(x, ddof=3), expected) - assert_almost_equal(corrcoef(x, y, ddof=3), expected2) - - def test_bias(self): - x, y = self._create_data() - expected = np.corrcoef(x) - # bias raises DeprecationWarning - with pytest.warns(DeprecationWarning): - corrcoef(x, y, True, False) - with pytest.warns(DeprecationWarning): - corrcoef(x, y, True, True) - with pytest.warns(DeprecationWarning): - corrcoef(x, y, bias=False) - - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - # bias has no or negligible effect on the function - assert_almost_equal(corrcoef(x, bias=1), expected) - def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values x = self._create_data()[0] assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) def test_2d_without_missing(self): # Test corrcoef on 1 2D variable w/o missing values @@ -1460,11 +1418,6 @@ def test_2d_without_missing(self): assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) def test_1d_with_missing(self): # Test corrcoef 1 1D variable w/missing values @@ -1472,14 +1425,8 @@ def test_1d_with_missing(self): x[-1] = masked x -= x.mean() nx = x.compressed() - assert_almost_equal(np.corrcoef(nx), corrcoef(x)) assert_almost_equal(np.corrcoef(nx, rowvar=False), corrcoef(x, rowvar=False)) - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) try: corrcoef(x, allow_masked=False) except ValueError: @@ -1489,14 +1436,6 @@ def test_1d_with_missing(self): assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), corrcoef(x, x[::-1], rowvar=False)) - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - # ddof and bias have no or negligible effect on the function - assert_almost_equal(np.corrcoef(nx, nx[::-1]), - corrcoef(x, x[::-1], bias=1)) - assert_almost_equal(np.corrcoef(nx, nx[::-1]), - corrcoef(x, x[::-1], ddof=2)) def test_2d_with_missing(self): # Test corrcoef on 2D variable w/ missing value @@ -1507,16 +1446,6 @@ def test_2d_with_missing(self): test = corrcoef(x) control = np.corrcoef(x) assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - # ddof and bias have no or negligible effect on the function - assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], - control[:-1, :-1]) - assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1], - control[:-1, :-1]) - assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1], - control[:-1, :-1]) class TestPolynomial: diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py index 2a08234cba61..4e40a3f8ee75 100644 --- a/numpy/ma/tests/test_regression.py +++ b/numpy/ma/tests/test_regression.py @@ -1,7 +1,5 @@ -import warnings - import numpy as np -from numpy.testing import assert_, assert_allclose, assert_array_equal +from numpy.testing import assert_, assert_array_equal class TestRegression: @@ -59,19 +57,6 @@ def test_var_sets_maskedarray_scalar(self): a.var(out=mout) assert_(mout._data == 0) - def test_ddof_corrcoef(self): - # See gh-3336 - x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) - y = np.array([2, 2.5, 3.1, 3, 5]) - # this test can be removed after deprecation. - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - r0 = np.ma.corrcoef(x, y, ddof=0) - r1 = np.ma.corrcoef(x, y, ddof=1) - # ddof should not have an effect (it gets cancelled out) - assert_allclose(r0.data, r1.data) - def test_mask_not_backmangled(self): # See gh-10314. Test case taken from gh-3140. a = np.ma.MaskedArray([1., 2.], mask=[False, False]) From f7d1b16e0f1dad9e61d94d904bfa3c2ed1d99430 Mon Sep 17 00:00:00 2001 From: Abhishek Tiwari Date: Fri, 17 Oct 2025 14:08:12 +0530 Subject: [PATCH 0679/1018] Update doc/release/upcoming_changes/29997.expired.rst Co-authored-by: Joren Hammudoglu --- doc/release/upcoming_changes/29997.expired.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/release/upcoming_changes/29997.expired.rst b/doc/release/upcoming_changes/29997.expired.rst index 92c052015b7e..ab6f76a2a3d9 100644 --- a/doc/release/upcoming_changes/29997.expired.rst +++ b/doc/release/upcoming_changes/29997.expired.rst @@ -7,5 +7,3 @@ The following long-deprecated APIs have been removed: ``scipy.integrate`` functions instead. * ``disp`` function — deprecated from 2.0 release and no longer functional. Use your own printing function instead. * ``bias`` and ``ddof`` arguments in ``numpy.corrcoef`` — these had no effect since NumPy 1.10. - - From fa44337de3ee26b538936ad11889d2f9dbddbf59 Mon Sep 17 00:00:00 2001 From: abhi210 <27881020+Abhi210@users.noreply.github.com> Date: Fri, 17 Oct 2025 12:28:21 +0530 Subject: [PATCH 0680/1018] MAINT: remove redundant deprecated `bias` and `ddof` handling in `corrcoef` Removes leftover deprecation logic for the `bias` and `ddof` arguments in `numpy.corrcoef` from `extra.py`. These arguments were already deprecated and removed from `_function_base_impl`. No functional changes beyond removing redundant code. All tests and Ruff checks pass locally. --- numpy/lib/_function_base_impl.py | 5 ----- numpy/ma/extras.py | 7 ------- 2 files changed, 12 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index bccbdcc87d85..54e6645f6182 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2926,11 +2926,6 @@ def corrcoef(x, y=None, rowvar=True, *, interval [-1, 1] in an attempt to improve on that situation but is not much help in the complex case. - This function accepts but discards arguments `bias` and `ddof`. This is - for backwards compatibility with previous versions of this function. These - arguments had no effect on the return values of the function and can be - safely ignored in this and previous versions of numpy. - Examples -------- >>> import numpy as np diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 7bd0f480f4f3..6d5569e833dd 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1762,13 +1762,6 @@ def corrcoef(x, y=None, rowvar=True, allow_masked=True, numpy.corrcoef : Equivalent function in top-level NumPy module. cov : Estimate the covariance matrix. - Notes - ----- - This function accepts but discards arguments `bias` and `ddof`. This is - for backwards compatibility with previous versions of this function. These - arguments had no effect on the return values of the function and can be - safely ignored in this and previous versions of numpy. - Examples -------- >>> import numpy as np From e07ae893a4be21de4c10d09f8096b4d0121a5419 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Fri, 17 Oct 2025 13:11:59 +0300 Subject: [PATCH 0681/1018] doc: correct underline formatting --- doc/release/upcoming_changes/29997.expired.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/29997.expired.rst b/doc/release/upcoming_changes/29997.expired.rst index ab6f76a2a3d9..4d20a1c10078 100644 --- a/doc/release/upcoming_changes/29997.expired.rst +++ b/doc/release/upcoming_changes/29997.expired.rst @@ -1,5 +1,5 @@ Removal of deprecated `trapz` function, `disp` function, and `bias`/`ddof` args in `corrcoef` -------------------------------------------------------------------------------------------- +-------------------------------------------------------------------------------------------- The following long-deprecated APIs have been removed: From f1e088d36992394291aa844edef905ef1547884b Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Fri, 17 Oct 2025 14:43:41 +0300 Subject: [PATCH 0682/1018] Doc: simplify heading [skip actions][skip azp][skip cirrus] --- doc/release/upcoming_changes/29997.expired.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/release/upcoming_changes/29997.expired.rst b/doc/release/upcoming_changes/29997.expired.rst index 4d20a1c10078..6bdfa792e4e6 100644 --- a/doc/release/upcoming_changes/29997.expired.rst +++ b/doc/release/upcoming_changes/29997.expired.rst @@ -1,5 +1,5 @@ -Removal of deprecated `trapz` function, `disp` function, and `bias`/`ddof` args in `corrcoef` --------------------------------------------------------------------------------------------- +Removal of deprecated functions and arguments +--------------------------------------------- The following long-deprecated APIs have been removed: From c5fb27e517c33af6ee6cd9804e4097d0cb819491 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 15:59:34 +0200 Subject: [PATCH 0683/1018] MAINT: avoid namespace pollution in ``_core._type_aliases`` --- numpy/_core/_type_aliases.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/numpy/_core/_type_aliases.py b/numpy/_core/_type_aliases.py index de6c30953e91..51c8e6ca2677 100644 --- a/numpy/_core/_type_aliases.py +++ b/numpy/_core/_type_aliases.py @@ -36,6 +36,7 @@ for _abstract_type_name in _abstract_type_names: allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name) + del _abstract_type_name for k, v in typeinfo.items(): if k.startswith("NPY_") and v not in c_names_dict: @@ -44,6 +45,8 @@ concrete_type = v.type allTypes[k] = concrete_type sctypeDict[k] = concrete_type + del concrete_type + del k, v _aliases = { "double": "float64", @@ -60,6 +63,7 @@ for k, v in _aliases.items(): sctypeDict[k] = allTypes[v] allTypes[k] = allTypes[v] + del k, v # extra aliases are added only to `sctypeDict` # to support dtype name access, such as`np.dtype("float")` @@ -76,18 +80,21 @@ for k, v in _extra_aliases.items(): sctypeDict[k] = allTypes[v] + del k, v # include extended precision sized aliases for is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]: - longdouble_type: type = allTypes[full_name] + longdouble_type = allTypes[full_name] - bits: int = dtype(longdouble_type).itemsize * 8 - base_name: str = "complex" if is_complex else "float" - extended_prec_name: str = f"{base_name}{bits}" + bits = dtype(longdouble_type).itemsize * 8 + base_name = "complex" if is_complex else "float" + extended_prec_name = f"{base_name}{bits}" if extended_prec_name not in allTypes: sctypeDict[extended_prec_name] = longdouble_type allTypes[extended_prec_name] = longdouble_type + del is_complex, full_name, longdouble_type, bits, base_name, extended_prec_name + #################### # Building `sctypes` @@ -110,10 +117,15 @@ ]: if issubclass(concrete_type, abstract_type): sctypes[type_group].add(concrete_type) + del type_group, abstract_type break + del type_info, concrete_type + # sort sctype groups by bitsize for sctype_key in sctypes.keys(): sctype_list = list(sctypes[sctype_key]) sctype_list.sort(key=lambda x: dtype(x).itemsize) sctypes[sctype_key] = sctype_list + + del sctype_key, sctype_list From 8334c49cb32a885493ef7d5ad291641c9d532303 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 17:49:05 +0200 Subject: [PATCH 0684/1018] MAINT: remove confusing parameter default for ``shape`` in ``reshape`` --- numpy/_core/fromnumeric.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 6cbd1385d459..90493ef77626 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -201,12 +201,12 @@ def take(a, indices, axis=None, out=None, mode='raise'): return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) -def _reshape_dispatcher(a, /, shape=None, order=None, *, copy=None): +def _reshape_dispatcher(a, /, shape, order=None, *, copy=None): return (a,) @array_function_dispatch(_reshape_dispatcher) -def reshape(a, /, shape=None, order='C', *, copy=None): +def reshape(a, /, shape, order='C', *, copy=None): """ Gives a new shape to an array without changing its data. @@ -295,9 +295,6 @@ def reshape(a, /, shape=None, order='C', *, copy=None): [3, 4], [5, 6]]) """ - if shape is None: - raise TypeError( - "reshape() missing 1 required positional argument: 'shape'") if copy is not None: return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) return _wrapfunc(a, 'reshape', shape, order=order) From 6090115be423163f1ab33d929ea4447433d01609 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Sat, 18 Oct 2025 00:00:20 +0800 Subject: [PATCH 0685/1018] TST: Add unit test for RISC-V CPU features (#29927) * TST: Add unit test for RISC-V CPU features Signed-off-by: Wang Yang * fix ruff check * fix comment * fix ruff check --------- Signed-off-by: Wang Yang --- numpy/_core/tests/test_cpu_features.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index 818c1fb204a4..431fcb40b324 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -433,3 +433,18 @@ class Test_LOONGARCH_Features(AbstractTest): def load_flags(self): self.load_flags_cpuinfo("Features") + + +is_riscv = re.match(r"^(riscv)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_riscv, reason="Only for Linux and RISC-V") +class Test_RISCV_Features(AbstractTest): + features = ["RVV"] + + def load_flags(self): + self.load_flags_auxv() + if not self.features_flags: + # Let the test fail and dump if we cannot read HWCAP. + return + hwcap = int(next(iter(self.features_flags)), 16) + if hwcap & (1 << 21): # HWCAP_RISCV_V + self.features_flags.add("RVV") From 6348e38250f42362646510e0b854d4fc173d19b1 Mon Sep 17 00:00:00 2001 From: Kelvin Li Date: Fri, 17 Oct 2025 17:19:56 -0400 Subject: [PATCH 0686/1018] TST: disable overflow exception test of numpy.power on AIX (#29740) --- numpy/_core/tests/test_numeric.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 127f08527f1e..fb5a37c33126 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1024,8 +1024,11 @@ def test_floating_exceptions(self, typecode): lambda a, b: a + b, ft_max, ft_max * ft_eps) self.assert_raises_fpe(overflow, lambda a, b: a - b, -ft_max, ft_max * ft_eps) - self.assert_raises_fpe(overflow, - np.power, ftype(2), ftype(2**fi.nexp)) + # On AIX, pow() with double does not raise the overflow exception, + # it returns inf. Long double is the same as double. + if sys.platform != 'aix' or typecode not in 'dDgG': + self.assert_raises_fpe(overflow, + np.power, ftype(2), ftype(2**fi.nexp)) self.assert_raises_fpe(divbyzero, lambda a, b: a / b, ftype(1), ftype(0)) self.assert_raises_fpe( From 4f83e3fb930b9324f2e3954dd2d97d76d9a0b37c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 01:38:52 +0200 Subject: [PATCH 0687/1018] TYP: fix stubtest errors in ``numpy.ma`` (#30018) * TYP: add missing parameter default in ``ma.MaskedArray`` for ``data`` * TYP: stub ``ma.MaskedArray.data`` * TYP: stub ``get_fill_value()`` and ``set_fill_value(_)`` of ``ma.MaskedArray`` * TYP: stub the ``dataiter`` and ``maskiter`` attributes of ``ma.MaskedIterator`` * TYP: annotate the missing return type of ``ma.MaskedIterator.__next__`` * TYP: fully stub ``ma.MaskedConstant`` * TYP: stub `ma._frommethod` and fix stubtest errors in `count`, `argmin`, `argmax` * TYP: remove stub for nonexistent ``ma.core.mask_rowcols`` function * TYP: fix inconsistent ``ma.corrcoef`` signature * Revert "TYP: stub `ma._frommethod` and fix stubtest errors in `count`, `argmin`, `argmax`" This reverts commit 7d42690638014fdf1e78f0ffaf51e08754db37f1. * TYP: fix inconsistent signature of `ma.count`, `na.argmin`, and `ma.argmax` --- numpy/ma/core.pyi | 121 ++++++++++++++++++++++++++------------------ numpy/ma/extras.pyi | 2 +- 2 files changed, 73 insertions(+), 50 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 2e069164c3d0..13aa3336fbd4 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3,9 +3,10 @@ import datetime as dt from _typeshed import Incomplete -from collections.abc import Iterator, Sequence +from collections.abc import Sequence from typing import ( Any, + Final, Generic, Literal, Never, @@ -18,7 +19,7 @@ from typing import ( TypeAlias, overload, ) -from typing_extensions import TypeIs, TypeVar +from typing_extensions import TypeIs, TypeVar, override import numpy as np from numpy import ( @@ -310,6 +311,10 @@ _ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsInd _ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None _ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +_Ignored: TypeAlias = object + +### + MaskType = bool_ nomask: bool_[Literal[False]] @@ -461,18 +466,23 @@ masked_print_option: _MaskedPrintOption def flatten_structured_array(a): ... +# TODO: Support non-boolean mask dtypes, such as `np.void`. This will require adding an +# additional generic type parameter to (at least) `MaskedArray` and `MaskedIterator` to +# hold the dtype of the mask. + class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): - ma: MaskedArray[_ShapeT_co, _DTypeT_co] - dataiter: Any - maskiter: Any + ma: MaskedArray[_ShapeT_co, _DTypeT_co] # readonly + dataiter: np.flatiter[ndarray[_ShapeT_co, _DTypeT_co]] # readonly + maskiter: Final[np.flatiter[NDArray[np.bool]]] + def __init__(self, ma: MaskedArray[_ShapeT_co, _DTypeT_co]) -> None: ... - def __iter__(self) -> Iterator[Any]: ... + def __iter__(self) -> Self: ... # Similar to `MaskedArray.__getitem__` but without the `void` case. @overload def __getitem__(self, indx: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... @overload - def __getitem__(self, indx: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + def __getitem__(self, indx: SupportsIndex | tuple[SupportsIndex, ...], /) -> Incomplete: ... @overload def __getitem__(self, indx: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... @@ -522,7 +532,8 @@ class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): @overload # catch-all def __setitem__(self, index: _ToIndices, value: ArrayLike, /) -> None: ... - def __next__(self) -> Any: ... + # TODO: Returns `mvoid[(), _DTypeT_co]` for masks with `np.void` dtype. + def __next__(self: MaskedIterator[Any, np.dtype[_ScalarT]]) -> _ScalarT: ... class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: Any @@ -575,7 +586,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __new__( cls, - data: object, + data: object = None, mask: _ArrayLikeBool_co = nomask, dtype: DTypeLike | None = None, copy: bool = False, @@ -653,6 +664,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def shape(self) -> _ShapeT_co: ... @shape.setter def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... + def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property def mask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... @@ -670,19 +682,28 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @property def sharedmask(self) -> bool: ... def shrink_mask(self) -> Self: ... + + @property + def baseclass(self) -> type[ndarray]: ... + @property - def baseclass(self) -> type[NDArray[Any]]: ... - data: Any + def _data(self) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @property + def data(self) -> ndarray[_ShapeT_co, _DTypeT_co]: ... # type: ignore[override] + + @property # type: ignore[override] def flat(self) -> MaskedIterator[_ShapeT_co, _DTypeT_co]: ... @flat.setter def flat(self, value: ArrayLike, /) -> None: ... + @property def fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... @fill_value.setter - def fill_value(self, value: _ScalarLike_co | None = None) -> None: ... - get_fill_value: Any - set_fill_value: Any + def fill_value(self, value: _ScalarLike_co | None = None, /) -> None: ... + + def get_fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... + def set_fill_value(self, /, value: _ScalarLike_co | None = None) -> None: ... + def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... @@ -2047,28 +2068,31 @@ isarray = isMaskedArray isMA = isMaskedArray # 0D float64 array -class MaskedConstant(MaskedArray[_AnyShape, dtype[float64]]): - def __new__(cls): ... - __class__: Any - def __array_finalize__(self, obj): ... - def __array_wrap__(self, obj, context=..., return_scalar=...): ... - def __format__(self, format_spec): ... - def __reduce__(self): ... - def __iop__(self, other): ... - __iadd__: Any - __isub__: Any - __imul__: Any - __ifloordiv__: Any - __itruediv__: Any - __ipow__: Any - def copy(self, *args, **kwargs): ... - def __copy__(self): ... - def __deepcopy__(self, memo): ... - def __setattr__(self, attr, value): ... - -masked: MaskedConstant -masked_singleton: MaskedConstant -masked_array = MaskedArray +class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): + def __new__(cls) -> Self: ... + + # these overrides are no-ops + @override + def __iadd__(self, other: _Ignored, /) -> Self: ... + @override + def __isub__(self, other: _Ignored, /) -> Self: ... + @override + def __imul__(self, other: _Ignored, /) -> Self: ... + @override + def __ifloordiv__(self, other: _Ignored, /) -> Self: ... + @override + def __itruediv__(self, other: _Ignored, /) -> Self: ... + @override + def __ipow__(self, other: _Ignored, /) -> Self: ... + @override + def __deepcopy__(self, /, memo: _Ignored) -> Self: ... + @override + def copy(self, /, *args: _Ignored, **kwargs: _Ignored) -> Self: ... + +masked: Final[MaskedConstant] = ... +masked_singleton: Final[MaskedConstant] = ... + +masked_array: TypeAlias = MaskedArray def array( data, @@ -2230,17 +2254,17 @@ trace: _frommethod var: _frommethod @overload -def count(self: ArrayLike, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... +def count(a: ArrayLike, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... @overload -def count(self: ArrayLike, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... +def count(a: ArrayLike, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... @overload -def count(self: ArrayLike, axis: _ShapeLike | None = ..., *, keepdims: Literal[True]) -> NDArray[int_]: ... +def count(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: Literal[True]) -> NDArray[int_]: ... @overload -def count(self: ArrayLike, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... +def count(a: ArrayLike, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... @overload def argmin( - self: ArrayLike, + a: ArrayLike, axis: None = None, fill_value: _ScalarLike_co | None = None, out: None = None, @@ -2249,7 +2273,7 @@ def argmin( ) -> intp: ... @overload def argmin( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, out: None = None, @@ -2258,7 +2282,7 @@ def argmin( ) -> Any: ... @overload def argmin( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, @@ -2267,7 +2291,7 @@ def argmin( ) -> _ArrayT: ... @overload def argmin( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, out: _ArrayT, @@ -2278,7 +2302,7 @@ def argmin( # @overload def argmax( - self: ArrayLike, + a: ArrayLike, axis: None = None, fill_value: _ScalarLike_co | None = None, out: None = None, @@ -2287,7 +2311,7 @@ def argmax( ) -> intp: ... @overload def argmax( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, out: None = None, @@ -2296,7 +2320,7 @@ def argmax( ) -> Any: ... @overload def argmax( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, @@ -2305,7 +2329,7 @@ def argmax( ) -> _ArrayT: ... @overload def argmax( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, out: _ArrayT, @@ -2450,4 +2474,3 @@ zeros_like: _convert2ma def append(a, b, axis=None): ... def dot(a, b, strict=False, out=None): ... -def mask_rowcols(a, axis=...): ... diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 034b309af080..afd03300ff5f 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -107,7 +107,7 @@ def isin(element, test_elements, assume_unique=False, invert=False): ... def union1d(ar1, ar2): ... def setdiff1d(ar1, ar2, assume_unique=False): ... def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... -def corrcoef(x, y=None, rowvar=True, bias=..., allow_masked=True, ddof=...): ... +def corrcoef(x, y=None, rowvar=True, allow_masked=True): ... class MAxisConcatenator(AxisConcatenator): __slots__ = () From d6445151ae8d39ae91495f17aef01852e1f3d1f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Oct 2025 17:39:49 -0600 Subject: [PATCH 0688/1018] MAINT: Bump github/codeql-action from 4.30.8 to 4.30.9 (#30015) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.30.8 to 4.30.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/f443b600d91635bebf5b0d9ebc620189c0d6fba5...16140ae1a102900babc80a33c44059580f687047) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.30.9 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 0d52c4126be6..4c033a26e15c 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v4.30.8 + uses: github/codeql-action/init@16140ae1a102900babc80a33c44059580f687047 # v4.30.9 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v4.30.8 + uses: github/codeql-action/autobuild@16140ae1a102900babc80a33c44059580f687047 # v4.30.9 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v4.30.8 + uses: github/codeql-action/analyze@16140ae1a102900babc80a33c44059580f687047 # v4.30.9 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 9a0c80aa3342..9587fc4e026d 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v2.1.27 + uses: github/codeql-action/upload-sarif@16140ae1a102900babc80a33c44059580f687047 # v2.1.27 with: sarif_file: results.sarif From f72f0f6bb4ed532bfe98b4c8bee4bf37cd8e5e15 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 01:41:54 +0200 Subject: [PATCH 0689/1018] TYP: stub ``MesonTemplate.objects_substitution()`` in ``f2py._backends._meson`` (#30004) --- numpy/f2py/_backends/_meson.pyi | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi index bcb2b0304401..5c85c61586fc 100644 --- a/numpy/f2py/_backends/_meson.pyi +++ b/numpy/f2py/_backends/_meson.pyi @@ -40,6 +40,7 @@ class MesonTemplate: # def initialize_template(self) -> None: ... def sources_substitution(self) -> None: ... + def objects_substitution(self) -> None: ... def deps_substitution(self) -> None: ... def libraries_substitution(self) -> None: ... def include_substitution(self) -> None: ... From 58db55a1b8b3db98d0b3092d23430be2222d23f9 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 01:42:55 +0200 Subject: [PATCH 0690/1018] TYP: update ``corrcoef`` signature (#30009) * TYP: update ``corrcoef`` signature * TYP: update ``ma.corrcoef`` signature --- numpy/lib/_function_base_impl.pyi | 8 -------- 1 file changed, 8 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index e99be06469f3..651ece6b3447 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -479,8 +479,6 @@ def corrcoef( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co | None = None, rowvar: bool = True, - bias: _NoValueType = ..., - ddof: _NoValueType = ..., *, dtype: None = None, ) -> NDArray[floating]: ... @@ -489,8 +487,6 @@ def corrcoef( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, - bias: _NoValueType = ..., - ddof: _NoValueType = ..., *, dtype: None = None, ) -> NDArray[complexfloating]: ... @@ -499,8 +495,6 @@ def corrcoef( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, - bias: _NoValueType = ..., - ddof: _NoValueType = ..., *, dtype: _DTypeLike[_ScalarT], ) -> NDArray[_ScalarT]: ... @@ -509,8 +503,6 @@ def corrcoef( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, - bias: _NoValueType = ..., - ddof: _NoValueType = ..., *, dtype: DTypeLike | None = None, ) -> NDArray[Any]: ... From c8ff9a75d60c7f61dae8b26e381cc1be290484c9 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 01:44:05 +0200 Subject: [PATCH 0691/1018] TYP: ``linalg.svdvals``: fix inconsistent signature and add dtype overloads (#30011) * TYP: ``linalg.svdvals``: fix inconsistent signature and add dtype overloads * TYP: ``linalg.svdvals``: add some type-tests --- numpy/linalg/_linalg.pyi | 16 +++++++++++----- numpy/typing/tests/data/fail/linalg.pyi | 4 ++++ numpy/typing/tests/data/reveal/linalg.pyi | 13 +++++++++++++ 3 files changed, 28 insertions(+), 5 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 4b8ac3da0ee2..ca4c6e0c77da 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -16,14 +16,12 @@ from numpy import ( complex128, complexfloating, float64, - # other floating, int32, object_, signedinteger, timedelta64, unsignedinteger, - # re-exports vecdot, ) from numpy._core.fromnumeric import matrix_transpose @@ -38,9 +36,11 @@ from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, + _ArrayLikeNumber_co, _ArrayLikeObject_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _NestedSequence, ) from numpy.linalg import LinAlgError @@ -301,9 +301,15 @@ def svd( hermitian: bool = False, ) -> NDArray[floating]: ... -def svdvals( - x: _ArrayLikeInt_co | _ArrayLikeFloat_co | _ArrayLikeComplex_co -) -> NDArray[floating]: ... +# the ignored `overload-overlap` mypy error below is a false-positive +@overload +def svdvals( # type: ignore[overload-overlap] + x: _ArrayLike[np.float64 | np.complex128 | np.integer | np.bool] | _NestedSequence[complex], / +) -> NDArray[np.float64]: ... +@overload +def svdvals(x: _ArrayLike[np.float32 | np.complex64], /) -> NDArray[np.float32]: ... +@overload +def svdvals(x: _ArrayLikeNumber_co, /) -> NDArray[floating]: ... # TODO: Returns a scalar for 2D arrays and # a `(x.ndim - 2)`` dimensionl array otherwise diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index c4695ee671cd..78aceb235f8d 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -32,6 +32,10 @@ np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] np.linalg.svd(AR_O) # type: ignore[arg-type] +np.linalg.svdvals(AR_O) # type: ignore[arg-type] +np.linalg.svdvals(AR_M) # type: ignore[arg-type] +np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] + np.linalg.cond(AR_O) # type: ignore[arg-type] np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index ef7819f448ca..cd8057651ad1 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -10,8 +10,11 @@ from numpy.linalg._linalg import ( SVDResult, ) +float_list_2d: list[list[float]] AR_i8: npt.NDArray[np.int64] +AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] AR_m: npt.NDArray[np.timedelta64] @@ -79,6 +82,16 @@ assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating]) assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating]) assert_type(np.linalg.svd(AR_c16, True, False), npt.NDArray[np.floating]) +assert_type(np.linalg.svdvals(AR_b), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) +assert_type(np.linalg.svdvals(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_c16), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals([[1, 2], [3, 4]]), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals([[1.0, 2.0], [3.0, 4.0]]), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals([[1j, 2j], [3j, 4j]]), npt.NDArray[np.float64]) + assert_type(np.linalg.cond(AR_i8), Any) assert_type(np.linalg.cond(AR_f8), Any) assert_type(np.linalg.cond(AR_c16), Any) From 262158f3e3aae882541c29547684e4d4059dd836 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 01:45:44 +0200 Subject: [PATCH 0692/1018] TYP: stub ``linalg.lapack_lite.LapackError`` (#30014) --- numpy/linalg/lapack_lite.pyi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/linalg/lapack_lite.pyi b/numpy/linalg/lapack_lite.pyi index 835293a26762..3ec3919bfa3b 100644 --- a/numpy/linalg/lapack_lite.pyi +++ b/numpy/linalg/lapack_lite.pyi @@ -57,6 +57,8 @@ class _ZUNGQR(TypedDict): _ilp64: Final[bool] = ... +class LapackError(Exception): ... + def dgelsd( m: int, n: int, From 0c514d94ae66ada8e275852f063bdd917cdf9c49 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 17 Oct 2025 18:02:38 -0600 Subject: [PATCH 0693/1018] MAINT, TST: Increase tolerance in fft test. `test_identity_long_short_reversed` fails fairly often, so increase the test tolerance by a bit. Note that `random` is used throughout this test module without a seed, but should be OK as the tolerance is set by the type spacing at 1, which should be out of range for the generated random values. --- numpy/fft/tests/test_pocketfft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index 021181845b3b..6f26ab6c6d65 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -55,7 +55,7 @@ def test_identity_long_short(self, dtype): def test_identity_long_short_reversed(self, dtype): # Also test explicitly given number of points in reversed order. maxlen = 16 - atol = 5 * np.spacing(np.array(1., dtype=dtype)) + atol = 6 * np.spacing(np.array(1., dtype=dtype)) x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype) xx = np.concatenate([x, np.zeros_like(x)]) for i in range(1, maxlen * 2): From 75fc468c52354e57efa8d9db1ec24f8b955f6c60 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Fri, 17 Oct 2025 18:55:43 -0700 Subject: [PATCH 0694/1018] DOC: Correct typos in numpy API documentation (#30020) --- doc/source/user/basics.subclassing.rst | 2 +- numpy/_core/src/multiarray/array_method.c | 2 +- numpy/_core/src/multiarray/dtypemeta.c | 2 +- numpy/_core/tests/test_api.py | 2 +- numpy/_core/tests/test_errstate.py | 2 +- numpy/f2py/symbolic.py | 2 +- numpy/lib/_format_impl.py | 2 +- numpy/lib/tests/test_io.py | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index a937521a7abb..2a369aaae17c 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -567,7 +567,7 @@ which inputs and outputs it converted. Hence, e.g., Note that another approach would be to use ``getattr(ufunc, methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example, the result would be identical, but there is a difference if another operand -also defines ``__array_ufunc__``. E.g., lets assume that we evaluate +also defines ``__array_ufunc__``. E.g., let's assume that we evaluate ``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has an override. If you use ``super`` as in the example, ``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index a0a6c3fda7d6..c7280435d3c3 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -4,7 +4,7 @@ * pointers to do fast operations on the given input functions. * It thus adds an abstraction layer around individual ufunc loops. * - * Unlike methods, a ArrayMethod can have multiple inputs and outputs. + * Unlike methods, an ArrayMethod can have multiple inputs and outputs. * This has some serious implication for garbage collection, and as far * as I (@seberg) understands, it is not possible to always guarantee correct * cyclic garbage collection of dynamically created DTypes with methods. diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 692c7cfc2e0a..1c208aa54a84 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -96,7 +96,7 @@ use_new_as_default(PyArray_DTypeMeta *self) return NULL; } /* - * Lets not trust that the DType is implemented correctly + * Let's not trust that the DType is implemented correctly * TODO: Should probably do an exact type-check (at least unless this is * an abstract DType). */ diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index 83e0a0179e0a..216a2c75afb8 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -88,7 +88,7 @@ def test_array_array(): o = type("o", (object,), {"__array_struct__": a.__array_struct__}) # wasn't what I expected... is np.array(o) supposed to equal a ? - # instead we get a array([...], dtype=">V18") + # instead we get an array([...], dtype=">V18") assert_equal(bytes(np.array(o).data), bytes(a.data)) # test __array__ diff --git a/numpy/_core/tests/test_errstate.py b/numpy/_core/tests/test_errstate.py index b72fb65a3239..f0735a045a4d 100644 --- a/numpy/_core/tests/test_errstate.py +++ b/numpy/_core/tests/test_errstate.py @@ -87,7 +87,7 @@ def test_errstate_enter_once(self): @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") def test_asyncio_safe(self): - # asyncio may not always work, lets assume its fine if missing + # asyncio may not always work, let's assume its fine if missing # Pyodide/wasm doesn't support it. If this test makes problems, # it should just be skipped liberally (or run differently). asyncio = pytest.importorskip("asyncio") diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py index 11645172fe30..820ae2ec9b9f 100644 --- a/numpy/f2py/symbolic.py +++ b/numpy/f2py/symbolic.py @@ -1478,7 +1478,7 @@ def restore(r): if isinstance(items, Expr): return items if paren in ['ROUNDDIV', 'SQUARE']: - # Expression is a array constructor + # Expression is an array constructor if isinstance(items, Expr): items = (items,) return as_array(items) diff --git a/numpy/lib/_format_impl.py b/numpy/lib/_format_impl.py index bfda7fec73b6..2bb557709c8b 100644 --- a/numpy/lib/_format_impl.py +++ b/numpy/lib/_format_impl.py @@ -645,7 +645,7 @@ def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE): "may be necessary.") # The header is a pretty-printed string representation of a literal - # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte + # Python dictionary with trailing newlines padded to an ARRAY_ALIGN byte # boundary. The keys are strings. # "shape" : tuple of int # "fortran_order" : bool diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 5b9c9ab42a5d..5ba634b9f612 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -2574,7 +2574,7 @@ def test_squeeze_scalar(self): @pytest.mark.parametrize("ndim", [0, 1, 2]) def test_ndmin_keyword(self, ndim: int): - # lets have the same behaviour of ndmin as loadtxt + # let's have the same behaviour of ndmin as loadtxt # as they should be the same for non-missing values txt = "42" From 086267d8a29af4d4e520296e043b9115d62467be Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 18 Oct 2025 03:58:54 +0200 Subject: [PATCH 0695/1018] DEP: Remove ``delimitor`` kwarg from ``ma.mrecords.fromtextfile`` --- numpy/ma/mrecords.py | 13 +------------ numpy/ma/mrecords.pyi | 2 -- numpy/ma/tests/test_deprecations.py | 21 --------------------- 3 files changed, 1 insertion(+), 35 deletions(-) diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index 835f3ce5b772..bb4a2707fec1 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -657,8 +657,7 @@ def openfile(fname): def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', - varnames=None, vartypes=None, - *, delimitor=np._NoValue): # backwards compatibility + varnames=None, vartypes=None): """ Creates a mrecarray from data stored in the file `filename`. @@ -682,16 +681,6 @@ def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', Ultra simple: the varnames are in the header, one line""" - if delimitor is not np._NoValue: - if delimiter is not None: - raise TypeError("fromtextfile() got multiple values for argument " - "'delimiter'") - # NumPy 1.22.0, 2021-09-23 - warnings.warn("The 'delimitor' keyword argument of " - "numpy.ma.mrecords.fromtextfile() is deprecated " - "since NumPy 1.22.0, use 'delimiter' instead.", - DeprecationWarning, stacklevel=2) - delimiter = delimitor # Try to open the file. ftext = openfile(fname) diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index c1f3592b0dd6..d5cdf097bb84 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -89,8 +89,6 @@ def fromtextfile( missingchar="", varnames=None, vartypes=None, - # NOTE: deprecated: NumPy 1.22.0, 2021-09-23 - # delimitor=..., ): ... def addfield(mrecord, newfield, newfieldname=None): ... diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py index a2c98d0c229d..07120b198bea 100644 --- a/numpy/ma/tests/test_deprecations.py +++ b/numpy/ma/tests/test_deprecations.py @@ -1,9 +1,6 @@ """Test deprecation and future warnings. """ -import io -import textwrap - import pytest import numpy as np @@ -66,21 +63,3 @@ def test_axis_default(self): result = ma_max(data1d) assert_equal(result, ma_max(data1d, axis=None)) assert_equal(result, ma_max(data1d, axis=0)) - - -class TestFromtextfile: - def test_fromtextfile_delimitor(self): - # NumPy 1.22.0, 2021-09-23 - - textfile = io.StringIO(textwrap.dedent( - """ - A,B,C,D - 'string 1';1;1.0;'mixed column' - 'string 2';2;2.0; - 'string 3';3;3.0;123 - 'string 4';4;4.0;3.14 - """ - )) - - with pytest.warns(DeprecationWarning): - result = np.ma.mrecords.fromtextfile(textfile, delimitor=';') From 16e001a96b97431e67967df825154a7aed77de29 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 18 Oct 2025 04:02:31 +0200 Subject: [PATCH 0696/1018] DOC: release note for #30021 --- doc/release/upcoming_changes/30021.expired.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/30021.expired.rst diff --git a/doc/release/upcoming_changes/30021.expired.rst b/doc/release/upcoming_changes/30021.expired.rst new file mode 100644 index 000000000000..31ca300ce35f --- /dev/null +++ b/doc/release/upcoming_changes/30021.expired.rst @@ -0,0 +1,5 @@ +Remove ``delimitor`` parameter from ``numpy.ma.mrecords.fromtextfile()`` +------------------------------------------------------------------------ + +The ``delimitor`` parameter was deprecated in NumPy 1.22.0 and has been +removed from ``numpy.ma.mrecords.fromtextfile()``. Use ``delimiter`` instead. From 938a9975a557a2829feb86ea117726e198ee10a7 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 19:26:24 +0200 Subject: [PATCH 0697/1018] TYP: Improved ``ndarray`` augmented assignment operators (#29862) * TYP: Improved ``ndarray`` augmented assignment operators * TYP: Remove redundant ``MaskedArray`` augmented assignment operator method overrides --- numpy/__init__.pyi | 178 ++++++++++++++++++---------------- numpy/ma/core.pyi | 122 +---------------------- numpy/matrixlib/defmatrix.pyi | 2 - 3 files changed, 98 insertions(+), 204 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 29d86584e1b7..9fa6aafb56bb 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -761,7 +761,21 @@ _FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) _ArrayT = TypeVar("_ArrayT", bound=ndarray) _ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True) -_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[integer | np.bool | object_]) +_BoolArrayT = TypeVar("_BoolArrayT", bound=NDArray[np.bool]) +_IntegerArrayT = TypeVar("_IntegerArrayT", bound=NDArray[integer]) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[np.bool | integer | object_]) +_FloatingArrayT = TypeVar("_FloatingArrayT", bound=NDArray[floating]) +_FloatingTimedeltaArrayT = TypeVar("_FloatingTimedeltaArrayT", bound=NDArray[floating | timedelta64]) +_ComplexFloatingArrayT = TypeVar("_ComplexFloatingArrayT", bound=NDArray[complexfloating]) +_InexactArrayT = TypeVar("_InexactArrayT", bound=NDArray[inexact]) +_InexactTimedeltaArrayT = TypeVar("_InexactTimedeltaArrayT", bound=NDArray[inexact | timedelta64]) +_NumberArrayT = TypeVar("_NumberArrayT", bound=NDArray[number]) +_NumberCharacterArrayT = TypeVar("_NumberCharacterArrayT", bound=ndarray[Any, dtype[number | character] | dtypes.StringDType]) +_TimedeltaArrayT = TypeVar("_TimedeltaArrayT", bound=NDArray[timedelta64]) +_TimeArrayT = TypeVar("_TimeArrayT", bound=NDArray[datetime64 | timedelta64]) +_ObjectArrayT = TypeVar("_ObjectArrayT", bound=NDArray[object_]) +_BytesArrayT = TypeVar("_BytesArrayT", bound=NDArray[bytes_]) +_StringArrayT = TypeVar("_StringArrayT", bound=ndarray[Any, dtype[str_] | dtypes.StringDType]) _RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelta64 | np.bool | object_]) _NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) @@ -3417,141 +3431,139 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # object and its value is >= 0 # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't # work, as this will lead to `false negatives` when using these inplace ops. - # Keep in sync with `MaskedArray.__iadd__` - @overload - def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # += + @overload # type: ignore[misc] + def __iadd__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __iadd__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __iadd__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... @overload - def __iadd__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... @overload - def __iadd__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... @overload - def __iadd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _BytesArrayT, other: _ArrayLikeBytes_co, /) -> _BytesArrayT: ... @overload - def __iadd__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], - other: _ArrayLikeStr_co | _ArrayLikeString_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _StringArrayT, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> _StringArrayT: ... @overload - def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... - # Keep in sync with `MaskedArray.__isub__` - @overload - def __isub__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # -= + @overload # type: ignore[misc] + def __isub__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __isub__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... @overload - def __isub__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... @overload - def __isub__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... @overload - def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... - # Keep in sync with `MaskedArray.__imul__` - @overload - def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # *= + @overload # type: ignore[misc] + def __imul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __imul__( - self: ndarray[Any, dtype[integer | character] | dtypes.StringDType], other: _ArrayLikeInt_co, / - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __imul__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... @overload - def __imul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__(self: _NumberCharacterArrayT, other: _ArrayLikeInt_co, /) -> _NumberCharacterArrayT: ... @overload - def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... - # Keep in sync with `MaskedArray.__ipow__` + # @= + @overload # type: ignore[misc] + def __imatmul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __ipow__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __ipow__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... @overload - def __ipow__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... @overload - def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... - # Keep in sync with `MaskedArray.__itruediv__` + # **= + @overload # type: ignore[misc] + def __ipow__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __itruediv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... @overload - def __itruediv__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... @overload - def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... - # keep in sync with `__imod__` and `MaskedArray.__ifloordiv__` + # /= + @overload # type: ignore[misc] + def __itruediv__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __ifloordiv__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __itruediv__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... @overload - def __ifloordiv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __itruediv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # //= + # keep in sync with `__imod__` + @overload # type: ignore[misc] + def __ifloordiv__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __ifloordiv__(self: _FloatingTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingTimedeltaArrayT: ... @overload - def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ifloordiv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # %= # keep in sync with `__ifloordiv__` + @overload # type: ignore[misc] + def __imod__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __imod__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imod__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imod__(self: _FloatingArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingArrayT: ... @overload - def __imod__( - self: NDArray[timedelta64], - other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imod__(self: _TimedeltaArrayT, other: _ArrayLike[timedelta64], /) -> _TimedeltaArrayT: ... @overload - def __imod__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imod__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # <<= # keep in sync with `__irshift__` + @overload # type: ignore[misc] + def __ilshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __ilshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ilshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # >>= # keep in sync with `__ilshift__` + @overload # type: ignore[misc] + def __irshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __irshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __irshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # &= # keep in sync with `__ixor__` and `__ior__` + @overload # type: ignore[misc] + def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iand__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iand__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iand__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # ^= # keep in sync with `__iand__` and `__ior__` + @overload # type: ignore[misc] + def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ixor__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ixor__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ixor__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # |= # keep in sync with `__iand__` and `__ixor__` + @overload # type: ignore[misc] + def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ior__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - - # - @overload - def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ior__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __imatmul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ior__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... # def __dlpack__( diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 13aa3336fbd4..aeb32bed01f9 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,5 +1,4 @@ # pyright: reportIncompatibleMethodOverride=false -# ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 import datetime as dt from _typeshed import Incomplete @@ -290,6 +289,9 @@ _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) + +_Ignored: TypeAlias = object + # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` _MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] @@ -1187,124 +1189,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rpow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - # Keep in sync with `ndarray.__iadd__` - @overload - def __iadd__( - self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: MaskedArray[Any, dtype[str_] | dtypes.StringDType], - other: _ArrayLikeStr_co | _ArrayLikeString_co, - /, - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # Keep in sync with `ndarray.__isub__` - @overload - def __isub__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __isub__( - self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __isub__( - self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __isub__( - self: _MaskedArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __isub__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # Keep in sync with `ndarray.__imul__` - @overload - def __imul__( - self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: MaskedArray[Any, dtype[integer] | dtype[character] | dtypes.StringDType], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # Keep in sync with `ndarray.__ifloordiv__` - @overload - def __ifloordiv__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ifloordiv__( - self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ifloordiv__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # Keep in sync with `ndarray.__itruediv__` - @overload - def __itruediv__( - self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __itruediv__( - self: _MaskedArray[complexfloating], - other: _ArrayLikeComplex_co, - /, - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __itruediv__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # Keep in sync with `ndarray.__ipow__` - @overload - def __ipow__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__( - self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__( - self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # @property # type: ignore[misc] def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index b5345a2d0d7c..40c747d1ae3d 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -54,12 +54,10 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): # def __mul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] def __rmul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __imul__(self, other: ArrayLike, /) -> Self: ... # def __pow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __ipow__(self, other: ArrayLike, /) -> Self: ... # keep in sync with `prod` and `mean` @overload # type: ignore[override] From 0066c73f573daafa01cbb975fde7f21d2b045ccb Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 19:27:16 +0200 Subject: [PATCH 0698/1018] TYP: ``linalg.tensordot``: fix inconsistent signature and simplify overloads (#30013) * TYP: ``linalg.tensordot``: fix inconsistent signature and simplify overloads * TYP: ``linalg.tensordot``: add some type-tests * TYP: update type-tests for ``_core.numeric.tensordot`` --- numpy/_core/numeric.pyi | 34 +++++----------- numpy/linalg/_linalg.pyi | 45 +++++++++++++++++++++- numpy/typing/tests/data/reveal/linalg.pyi | 7 ++++ numpy/typing/tests/data/reveal/numeric.pyi | 17 ++++---- 4 files changed, 68 insertions(+), 35 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 314bce708306..48576c77fd56 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -628,6 +628,7 @@ __all__ = [ _T = TypeVar("_T") _ScalarT = TypeVar("_ScalarT", bound=generic) +_NumericScalarT = TypeVar("_NumericScalarT", bound=number | timedelta64 | object_) _DTypeT = TypeVar("_DTypeT", bound=dtype) _ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) _ShapeT = TypeVar("_ShapeT", bound=_Shape) @@ -1071,54 +1072,37 @@ def outer( out: _ArrayT, ) -> _ArrayT: ... +# keep in sync with numpy.linalg._linalg.tensordot (ignoring `/, *`) @overload def tensordot( - a: _ArrayLike[Never], - b: _ArrayLike[Never], + a: _ArrayLike[_NumericScalarT], + b: _ArrayLike[_NumericScalarT], axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[Any]: ... +) -> NDArray[_NumericScalarT]: ... @overload def tensordot( a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[np.bool]: ... -@overload -def tensordot( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[unsignedinteger]: ... +) -> NDArray[bool_]: ... @overload def tensordot( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[signedinteger]: ... +) -> NDArray[int_ | Any]: ... @overload def tensordot( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[floating]: ... +) -> NDArray[float64 | Any]: ... @overload def tensordot( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[complexfloating]: ... -@overload -def tensordot( - a: _ArrayLikeTD64_co, - b: _ArrayLikeTD64_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[timedelta64]: ... -@overload -def tensordot( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[object_]: ... +) -> NDArray[complex128 | Any]: ... @overload def roll( diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index ca4c6e0c77da..cc22e0f9dc39 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -25,7 +25,6 @@ from numpy import ( vecdot, ) from numpy._core.fromnumeric import matrix_transpose -from numpy._core.numeric import tensordot from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, @@ -41,6 +40,7 @@ from numpy._typing import ( _ArrayLikeTD64_co, _ArrayLikeUInt_co, _NestedSequence, + _ShapeLike, ) from numpy.linalg import LinAlgError @@ -80,6 +80,7 @@ __all__ = [ ] _NumberT = TypeVar("_NumberT", bound=np.number) +_NumericScalarT = TypeVar("_NumericScalarT", bound=np.number | np.timedelta64 | np.object_) _ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] @@ -438,6 +439,48 @@ def vector_norm( keepdims: bool = False, ) -> Any: ... +# keep in sync with numpy._core.numeric.tensordot (ignoring `/, *`) +@overload +def tensordot( + a: _ArrayLike[_NumericScalarT], + b: _ArrayLike[_NumericScalarT], + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[_NumericScalarT]: ... +@overload +def tensordot( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.bool_]: ... +@overload +def tensordot( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.int_ | Any]: ... +@overload +def tensordot( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.float64 | Any]: ... +@overload +def tensordot( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.complex128 | Any]: ... + # TODO: Returns a scalar or array def multi_dot( arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index cd8057651ad1..60056516def0 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -132,6 +132,13 @@ assert_type(np.linalg.vector_norm(AR_f8), np.floating) assert_type(np.linalg.vector_norm(AR_c16), np.floating) assert_type(np.linalg.vector_norm(AR_S), np.floating) +assert_type(np.linalg.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.linalg.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.linalg.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) + assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), Any) assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), Any) assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 7c1ea8958e3b..247294cf34c6 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -72,16 +72,15 @@ assert_type(np.outer(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) assert_type(np.outer(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.tensordot(B, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=0), npt.NDArray[np.signedinteger]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1)), npt.NDArray[np.signedinteger]) +assert_type(np.tensordot(B, AR_i8), npt.NDArray[np.int_ | Any]) assert_type(np.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.tensordot(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.tensordot(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.tensordot(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.tensordot(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.tensordot(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.tensordot(AR_u8, AR_u8), npt.NDArray[np.uint64]) +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.tensordot(AR_i8, AR_i8, axes=0), npt.NDArray[np.int64]) +assert_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1)), npt.NDArray[np.int64]) +assert_type(np.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) assert_type(np.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.isscalar(i8), bool) From d04dcce7d4f89c30217a6b3bcd26ecab46e99a34 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Oct 2025 16:48:39 -0600 Subject: [PATCH 0699/1018] MAINT: Bump astral-sh/setup-uv from 7.1.0 to 7.1.1 (#30030) Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.1.0 to 7.1.1. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/3259c6206f993105e3a61b142c2d97bf4b9ef83d...2ddd2b9cb38ad8efd50337e8ab201519a34c9f24) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.1.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mypy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index b64cd8becd8c..753b75330aea 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -58,7 +58,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@3259c6206f993105e3a61b142c2d97bf4b9ef83d # v7.1.0 + - uses: astral-sh/setup-uv@2ddd2b9cb38ad8efd50337e8ab201519a34c9f24 # v7.1.1 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true From 473039a159b200c1a83f4cc8633e2bed075894d8 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 21 Oct 2025 00:56:17 +0200 Subject: [PATCH 0700/1018] TYP: `testing.check_support_sve`: fix inconsistent parameter default value (#30032) --- numpy/testing/_private/utils.pyi | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 670424f6a599..016bbecf4604 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -97,7 +97,6 @@ _Tss = ParamSpec("_Tss") _ET = TypeVar("_ET", bound=BaseException, default=BaseException) _FT = TypeVar("_FT", bound=Callable[..., Any]) _W_co = TypeVar("_W_co", bound=_WarnLog | None, default=_WarnLog | None, covariant=True) -_T_or_bool = TypeVar("_T_or_bool", default=bool) _StrLike: TypeAlias = str | bytes _RegexLike: TypeAlias = _StrLike | Pattern[Any] @@ -460,7 +459,7 @@ def temppath( ) -> _GeneratorContextManager[AnyStr]: ... # -def check_support_sve(__cache: list[_T_or_bool] = []) -> _T_or_bool: ... # noqa: PYI063 +def check_support_sve(__cache: list[bool] = ..., /) -> bool: ... # stubdefaulter: ignore[missing-default] # def decorate_methods( From 874b611f8526f4fbf2e3718db8d7e581d4a1b78f Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 21 Oct 2025 02:08:18 +0200 Subject: [PATCH 0701/1018] TYP: fix stubtest errors in ``numpy.polynomial.*`` (#30031) * TYP: `polynomial.polyutils._fit`: remove nonexistent parameter stub * TYP: `polynomial.polyutils`: fix `_vander_nd[_flat]`, `_valnd`, `_gridnd`, and `_pow` * TYP: `polynomial._polybase.ABCPolyBase`: fix `staticmethod`s that should be `classmethod`s * TYP: `polynomial._polybase.ABCPolyBase`: fix abstract properties and `__call__` return * TYP: `polynomial._polybase.ABCPolyBase`: stub missing `cutdeg` parameter * TYP: `polynomial._polybase.ABCPolyBase`: reconcile abstract properties with subclasses * TYP: `polynomial`: fix stubtest errors in module level functions * TYP: `polynomial`: remove nonexistent `*valfromroots` function stubs * TYP: `polynomial.polynomial`: inline `polyvalfromroots` function signature * TYP: `polynomial`: support for non-float64 dtypes in `domain` and `window` attrs * TYP: `polynomial`: restore covariance * TYP: `polynomial`: update type-tests --- numpy/polynomial/_polybase.pyi | 109 ++++----- numpy/polynomial/_polytypes.pyi | 215 ++---------------- numpy/polynomial/chebyshev.pyi | 79 ++++--- numpy/polynomial/hermite.pyi | 73 +++--- numpy/polynomial/hermite_e.pyi | 73 +++--- numpy/polynomial/laguerre.pyi | 67 +++--- numpy/polynomial/legendre.pyi | 67 +++--- numpy/polynomial/polynomial.pyi | 99 +++++--- numpy/polynomial/polyutils.pyi | 182 ++++++++++++--- .../tests/data/reveal/polynomial_polybase.pyi | 30 ++- .../tests/data/reveal/polynomial_series.pyi | 14 +- 11 files changed, 490 insertions(+), 518 deletions(-) diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index d922a08ffa9d..fcece8376170 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -1,13 +1,11 @@ import abc import decimal -import numbers -from collections.abc import Iterator, Mapping, Sequence +from collections.abc import Iterator, Sequence from typing import ( Any, ClassVar, Generic, Literal, - LiteralString, Self, SupportsIndex, TypeAlias, @@ -39,34 +37,36 @@ from ._polytypes import ( __all__ = ["ABCPolyBase"] -_NameCo = TypeVar( - "_NameCo", - bound=LiteralString | None, - covariant=True, - default=LiteralString | None -) -_Other = TypeVar("_Other", bound=ABCPolyBase) - +_NameT_co = TypeVar("_NameT_co", bound=str | None, default=str | None, covariant=True) +_PolyT = TypeVar("_PolyT", bound=ABCPolyBase) _AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co -_Hundred: TypeAlias = Literal[100] -class ABCPolyBase(Generic[_NameCo], abc.ABC): - __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] - __array_ufunc__: ClassVar[None] +class ABCPolyBase(Generic[_NameT_co], abc.ABC): + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + __array_ufunc__: ClassVar[None] = None + maxpower: ClassVar[Literal[100]] = 100 - maxpower: ClassVar[_Hundred] - _superscript_mapping: ClassVar[Mapping[int, str]] - _subscript_mapping: ClassVar[Mapping[int, str]] - _use_unicode: ClassVar[bool] + _superscript_mapping: ClassVar[dict[int, str]] = ... + _subscript_mapping: ClassVar[dict[int, str]] = ... + _use_unicode: ClassVar[bool] = ... - basis_name: _NameCo - coef: _CoefSeries - domain: _Array2[np.inexact | np.object_] - window: _Array2[np.inexact | np.object_] + _symbol: str + @property + def symbol(self, /) -> str: ... + + @property + @abc.abstractmethod + def domain(self) -> _Array2[np.float64 | Any]: ... - _symbol: LiteralString @property - def symbol(self, /) -> LiteralString: ... + @abc.abstractmethod + def window(self) -> _Array2[np.float64 | Any]: ... + + @property + @abc.abstractmethod + def basis_name(self) -> _NameT_co: ... + + coef: _CoefSeries def __init__( self, @@ -78,39 +78,17 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): ) -> None: ... @overload - def __call__(self, /, arg: _Other) -> _Other: ... - # TODO: Once `_ShapeT@ndarray` is covariant and bounded (see #26081), - # additionally include 0-d arrays as input types with scalar return type. + def __call__(self, /, arg: _PolyT) -> _PolyT: ... @overload - def __call__( - self, - /, - arg: _FloatLike_co | decimal.Decimal | numbers.Real | np.object_, - ) -> np.float64 | np.complex128: ... + def __call__(self, /, arg: _FloatLike_co | decimal.Decimal) -> np.float64 | Any: ... @overload - def __call__( - self, - /, - arg: _NumberLike_co | numbers.Complex, - ) -> np.complex128: ... + def __call__(self, /, arg: _NumberLike_co) -> np.complex128 | Any: ... @overload - def __call__(self, /, arg: _ArrayLikeFloat_co) -> ( - npt.NDArray[np.float64] - | npt.NDArray[np.complex128] - | npt.NDArray[np.object_] - ): ... + def __call__(self, /, arg: _ArrayLikeFloat_co) -> npt.NDArray[np.float64 | Any]: ... @overload - def __call__( - self, - /, - arg: _ArrayLikeComplex_co, - ) -> npt.NDArray[np.complex128] | npt.NDArray[np.object_]: ... + def __call__(self, /, arg: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128 | Any]: ... @overload - def __call__( - self, - /, - arg: _ArrayLikeCoefObject_co, - ) -> npt.NDArray[np.object_]: ... + def __call__(self, /, arg: _ArrayLikeCoefObject_co) -> npt.NDArray[np.object_]: ... def __format__(self, fmt_str: str, /) -> str: ... def __eq__(self, x: object, /) -> bool: ... @@ -133,21 +111,18 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): def __rmod__(self, x: _AnyOther, /) -> Self: ... def __rdivmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... def __len__(self, /) -> int: ... - def __iter__(self, /) -> Iterator[np.inexact | object]: ... + def __iter__(self, /) -> Iterator[np.float64 | Any]: ... def __getstate__(self, /) -> dict[str, Any]: ... def __setstate__(self, dict: dict[str, Any], /) -> None: ... def has_samecoef(self, /, other: ABCPolyBase) -> bool: ... def has_samedomain(self, /, other: ABCPolyBase) -> bool: ... def has_samewindow(self, /, other: ABCPolyBase) -> bool: ... - @overload - def has_sametype(self, /, other: ABCPolyBase) -> TypeIs[Self]: ... - @overload - def has_sametype(self, /, other: object) -> Literal[False]: ... + def has_sametype(self, /, other: object) -> TypeIs[Self]: ... def copy(self, /) -> Self: ... def degree(self, /) -> int: ... - def cutdeg(self, /) -> Self: ... + def cutdeg(self, /, deg: int) -> Self: ... def trim(self, /, tol: _FloatLike_co = 0) -> Self: ... def truncate(self, /, size: _AnyInt) -> Self: ... @@ -156,18 +131,18 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): self, /, domain: _SeriesLikeCoef_co | None, - kind: type[_Other], + kind: type[_PolyT], window: _SeriesLikeCoef_co | None = None, - ) -> _Other: ... + ) -> _PolyT: ... @overload def convert( self, /, domain: _SeriesLikeCoef_co | None = None, *, - kind: type[_Other], + kind: type[_PolyT], window: _SeriesLikeCoef_co | None = None, - ) -> _Other: ... + ) -> _PolyT: ... @overload def convert( self, @@ -278,7 +253,7 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): @classmethod def _str_term_unicode(cls, /, i: str, arg_str: str) -> str: ... - @staticmethod - def _str_term_ascii(i: str, arg_str: str) -> str: ... - @staticmethod - def _repr_latex_term(i: str, arg_str: str, needs_parens: bool) -> str: ... + @classmethod + def _str_term_ascii(cls, /, i: str, arg_str: str) -> str: ... + @classmethod + def _repr_latex_term(cls, /, i: str, arg_str: str, needs_parens: bool) -> str: ... diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 7b003c5742c6..91cb27e3a923 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -4,7 +4,6 @@ from collections.abc import Callable, Sequence from typing import ( Any, Literal, - LiteralString, NoReturn, Protocol, Self, @@ -23,7 +22,6 @@ from numpy._typing import ( # array-likes _ArrayLikeFloat_co, _ArrayLikeNumber_co, - _ArrayLikeObject_co, _ComplexLike_co, _FloatLike_co, # scalar-likes @@ -113,22 +111,10 @@ _ArrayLikeCoef_co: TypeAlias = ( | _ArrayLikeCoefObject_co ) -_Name_co = TypeVar( - "_Name_co", - bound=LiteralString, - covariant=True, - default=LiteralString -) - -@type_check_only -class _Named(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - -_Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_ScalarT]] +_Line: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] @type_check_only -class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): +class _FuncLine(Protocol): @overload def __call__(self, /, off: _ScalarT, scl: _ScalarT) -> _Line[_ScalarT]: ... @overload @@ -151,7 +137,7 @@ class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): ) -> _Line[np.object_]: ... @type_check_only -class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): +class _FuncFromRoots(Protocol): @overload def __call__(self, /, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload @@ -160,7 +146,7 @@ class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, roots: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): +class _FuncBinOp(Protocol): @overload def __call__( self, @@ -191,7 +177,7 @@ class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): ) -> _ObjectSeries: ... @type_check_only -class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): +class _FuncUnOp(Protocol): @overload def __call__(self, /, c: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload @@ -200,7 +186,7 @@ class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): +class _FuncPoly2Ortho(Protocol): @overload def __call__(self, /, pol: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload @@ -209,7 +195,7 @@ class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, pol: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): +class _FuncPow(Protocol): @overload def __call__( self, @@ -236,7 +222,7 @@ class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): ) -> _ObjectSeries: ... @type_check_only -class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): +class _FuncDer(Protocol): @overload def __call__( self, @@ -266,7 +252,7 @@ class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): ) -> _ObjectArray: ... @type_check_only -class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): +class _FuncInteg(Protocol): @overload def __call__( self, @@ -302,58 +288,7 @@ class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): ) -> _ObjectArray: ... @type_check_only -class _FuncValFromRoots(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( - self, - /, - x: _FloatLike_co, - r: _FloatLike_co, - tensor: bool = ..., - ) -> np.floating: ... - @overload - def __call__( - self, - /, - x: _NumberLike_co, - r: _NumberLike_co, - tensor: bool = ..., - ) -> np.complexfloating: ... - @overload - def __call__( - self, - /, - x: _FloatLike_co | _ArrayLikeFloat_co, - r: _ArrayLikeFloat_co, - tensor: bool = ..., - ) -> _FloatArray: ... - @overload - def __call__( - self, - /, - x: _NumberLike_co | _ArrayLikeComplex_co, - r: _ArrayLikeComplex_co, - tensor: bool = ..., - ) -> _ComplexArray: ... - @overload - def __call__( - self, - /, - x: _CoefLike_co | _ArrayLikeCoef_co, - r: _ArrayLikeCoef_co, - tensor: bool = ..., - ) -> _ObjectArray: ... - @overload - def __call__( - self, - /, - x: _CoefLike_co, - r: _CoefLike_co, - tensor: bool = ..., - ) -> _SupportsCoefOps[Any]: ... - -@type_check_only -class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVal(Protocol): @overload def __call__( self, @@ -404,7 +339,7 @@ class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): ) -> _SupportsCoefOps[Any]: ... @type_check_only -class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVal2D(Protocol): @overload def __call__( self, @@ -455,7 +390,7 @@ class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): ) -> _SupportsCoefOps[Any]: ... @type_check_only -class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVal3D(Protocol): @overload def __call__( self, @@ -517,58 +452,7 @@ _AnyValF: TypeAlias = Callable[ ] @type_check_only -class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( - self, - val_f: _AnyValF, - c: _SeriesLikeFloat_co, - /, - *args: _FloatLike_co, - ) -> np.floating: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _SeriesLikeComplex_co, - /, - *args: _NumberLike_co, - ) -> np.complexfloating: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _ArrayLikeFloat_co, - /, - *args: _ArrayLikeFloat_co, - ) -> _FloatArray: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _ArrayLikeComplex_co, - /, - *args: _ArrayLikeComplex_co, - ) -> _ComplexArray: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _SeriesLikeObject_co, - /, - *args: _CoefObjectLike_co, - ) -> _SupportsCoefOps[Any]: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _ArrayLikeCoef_co, - /, - *args: _ArrayLikeCoef_co, - ) -> _ObjectArray: ... - -@type_check_only -class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVander(Protocol): @overload def __call__( self, @@ -601,7 +485,7 @@ class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): _AnyDegrees: TypeAlias = Sequence[SupportsIndex] @type_check_only -class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVander2D(Protocol): @overload def __call__( self, @@ -636,7 +520,7 @@ class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): ) -> _CoefArray: ... @type_check_only -class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVander3D(Protocol): @overload def __call__( self, @@ -674,53 +558,10 @@ class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): deg: _AnyDegrees, ) -> _CoefArray: ... -# keep in sync with the broadest overload of `._FuncVander` -_AnyFuncVander: TypeAlias = Callable[ - [npt.ArrayLike, SupportsIndex], - _CoefArray, -] - -@type_check_only -class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[_ArrayLikeFloat_co], - degrees: Sequence[SupportsIndex], - ) -> _FloatArray: ... - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[_ArrayLikeComplex_co], - degrees: Sequence[SupportsIndex], - ) -> _ComplexArray: ... - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[ - _ArrayLikeObject_co | _ArrayLikeComplex_co, - ], - degrees: Sequence[SupportsIndex], - ) -> _ObjectArray: ... - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[npt.ArrayLike], - degrees: Sequence[SupportsIndex], - ) -> _CoefArray: ... - _FullFitResult: TypeAlias = Sequence[np.inexact | np.int32] @type_check_only -class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): +class _FuncFit(Protocol): @overload def __call__( self, @@ -827,7 +668,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): ) -> tuple[_ObjectArray, _FullFitResult]: ... @type_check_only -class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): +class _FuncRoots(Protocol): @overload def __call__( self, @@ -846,7 +687,7 @@ class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): _Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] @type_check_only -class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): +class _FuncCompanion(Protocol): @overload def __call__( self, @@ -863,7 +704,7 @@ class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, c: _SeriesLikeCoef_co) -> _Companion[np.object_]: ... @type_check_only -class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): +class _FuncGauss(Protocol): def __call__( self, /, @@ -871,22 +712,14 @@ class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): ) -> _Tuple2[_Series[np.float64]]: ... @type_check_only -class _FuncWeight(_Named[_Name_co], Protocol[_Name_co]): +class _FuncWeight(Protocol): @overload - def __call__( - self, - /, - c: _ArrayLikeFloat_co, - ) -> npt.NDArray[np.float64]: ... + def __call__(self, /, x: _ArrayLikeFloat_co) -> npt.NDArray[np.float64]: ... @overload - def __call__( - self, - /, - c: _ArrayLikeComplex_co, - ) -> npt.NDArray[np.complex128]: ... + def __call__(self, /, x: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128]: ... @overload - def __call__(self, /, c: _ArrayLikeCoef_co) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co) -> _ObjectArray: ... @type_check_only -class _FuncPts(_Named[_Name_co], Protocol[_Name_co]): +class _FuncPts(Protocol): def __call__(self, /, npts: _AnyInt) -> _Series[np.float64]: ... diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 71f8021bccc3..35043a001f10 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,5 +1,14 @@ from collections.abc import Callable, Iterable -from typing import Any, Concatenate, Final, Literal as L, Self, TypeVar, overload +from typing import ( + Any, + ClassVar, + Concatenate, + Final, + Literal as L, + Self, + TypeVar, + overload, +) import numpy as np import numpy.typing as npt @@ -26,7 +35,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -87,40 +95,39 @@ def _zseries_div( def _zseries_der(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... def _zseries_int(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -poly2cheb: _FuncPoly2Ortho[L["poly2cheb"]] -cheb2poly: _FuncUnOp[L["cheb2poly"]] +poly2cheb: Final[_FuncPoly2Ortho] = ... +cheb2poly: Final[_FuncUnOp] = ... -chebdomain: Final[_Array2[np.float64]] -chebzero: Final[_Array1[np.int_]] -chebone: Final[_Array1[np.int_]] -chebx: Final[_Array2[np.int_]] +chebdomain: Final[_Array2[np.float64]] = ... +chebzero: Final[_Array1[np.int_]] = ... +chebone: Final[_Array1[np.int_]] = ... +chebx: Final[_Array2[np.int_]] = ... -chebline: _FuncLine[L["chebline"]] -chebfromroots: _FuncFromRoots[L["chebfromroots"]] -chebadd: _FuncBinOp[L["chebadd"]] -chebsub: _FuncBinOp[L["chebsub"]] -chebmulx: _FuncUnOp[L["chebmulx"]] -chebmul: _FuncBinOp[L["chebmul"]] -chebdiv: _FuncBinOp[L["chebdiv"]] -chebpow: _FuncPow[L["chebpow"]] -chebder: _FuncDer[L["chebder"]] -chebint: _FuncInteg[L["chebint"]] -chebval: _FuncVal[L["chebval"]] -chebval2d: _FuncVal2D[L["chebval2d"]] -chebval3d: _FuncVal3D[L["chebval3d"]] -chebvalfromroots: _FuncValFromRoots[L["chebvalfromroots"]] -chebgrid2d: _FuncVal2D[L["chebgrid2d"]] -chebgrid3d: _FuncVal3D[L["chebgrid3d"]] -chebvander: _FuncVander[L["chebvander"]] -chebvander2d: _FuncVander2D[L["chebvander2d"]] -chebvander3d: _FuncVander3D[L["chebvander3d"]] -chebfit: _FuncFit[L["chebfit"]] -chebcompanion: _FuncCompanion[L["chebcompanion"]] -chebroots: _FuncRoots[L["chebroots"]] -chebgauss: _FuncGauss[L["chebgauss"]] -chebweight: _FuncWeight[L["chebweight"]] -chebpts1: _FuncPts[L["chebpts1"]] -chebpts2: _FuncPts[L["chebpts2"]] +chebline: Final[_FuncLine] = ... +chebfromroots: Final[_FuncFromRoots] = ... +chebadd: Final[_FuncBinOp] = ... +chebsub: Final[_FuncBinOp] = ... +chebmulx: Final[_FuncUnOp] = ... +chebmul: Final[_FuncBinOp] = ... +chebdiv: Final[_FuncBinOp] = ... +chebpow: Final[_FuncPow] = ... +chebder: Final[_FuncDer] = ... +chebint: Final[_FuncInteg] = ... +chebval: Final[_FuncVal] = ... +chebval2d: Final[_FuncVal2D] = ... +chebval3d: Final[_FuncVal3D] = ... +chebgrid2d: Final[_FuncVal2D] = ... +chebgrid3d: Final[_FuncVal3D] = ... +chebvander: Final[_FuncVander] = ... +chebvander2d: Final[_FuncVander2D] = ... +chebvander3d: Final[_FuncVander3D] = ... +chebfit: Final[_FuncFit] = ... +chebcompanion: Final[_FuncCompanion] = ... +chebroots: Final[_FuncRoots] = ... +chebgauss: Final[_FuncGauss] = ... +chebweight: Final[_FuncWeight] = ... +chebpts1: Final[_FuncPts] = ... +chebpts2: Final[_FuncPts] = ... # keep in sync with `Chebyshev.interpolate` _RT = TypeVar("_RT", bound=np.number | np.bool | np.object_) @@ -144,6 +151,10 @@ def chebinterpolate( ) -> npt.NDArray[_RT]: ... class Chebyshev(ABCPolyBase[L["T"]]): + basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + @overload @classmethod def interpolate( diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 07db43d0c000..9d8a63a6b95d 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,4 +1,4 @@ -from typing import Any, Final, Literal as L, TypeVar +from typing import Any, ClassVar, Final, Literal as L, TypeVar import numpy as np @@ -21,7 +21,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -63,44 +62,46 @@ __all__ = [ "hermweight", ] -poly2herm: _FuncPoly2Ortho[L["poly2herm"]] -herm2poly: _FuncUnOp[L["herm2poly"]] +poly2herm: Final[_FuncPoly2Ortho] = ... +herm2poly: Final[_FuncUnOp] = ... -hermdomain: Final[_Array2[np.float64]] -hermzero: Final[_Array1[np.int_]] -hermone: Final[_Array1[np.int_]] -hermx: Final[_Array2[np.int_]] +hermdomain: Final[_Array2[np.float64]] = ... +hermzero: Final[_Array1[np.int_]] = ... +hermone: Final[_Array1[np.int_]] = ... +hermx: Final[_Array2[np.int_]] = ... -hermline: _FuncLine[L["hermline"]] -hermfromroots: _FuncFromRoots[L["hermfromroots"]] -hermadd: _FuncBinOp[L["hermadd"]] -hermsub: _FuncBinOp[L["hermsub"]] -hermmulx: _FuncUnOp[L["hermmulx"]] -hermmul: _FuncBinOp[L["hermmul"]] -hermdiv: _FuncBinOp[L["hermdiv"]] -hermpow: _FuncPow[L["hermpow"]] -hermder: _FuncDer[L["hermder"]] -hermint: _FuncInteg[L["hermint"]] -hermval: _FuncVal[L["hermval"]] -hermval2d: _FuncVal2D[L["hermval2d"]] -hermval3d: _FuncVal3D[L["hermval3d"]] -hermvalfromroots: _FuncValFromRoots[L["hermvalfromroots"]] -hermgrid2d: _FuncVal2D[L["hermgrid2d"]] -hermgrid3d: _FuncVal3D[L["hermgrid3d"]] -hermvander: _FuncVander[L["hermvander"]] -hermvander2d: _FuncVander2D[L["hermvander2d"]] -hermvander3d: _FuncVander3D[L["hermvander3d"]] -hermfit: _FuncFit[L["hermfit"]] -hermcompanion: _FuncCompanion[L["hermcompanion"]] -hermroots: _FuncRoots[L["hermroots"]] +hermline: Final[_FuncLine] = ... +hermfromroots: Final[_FuncFromRoots] = ... +hermadd: Final[_FuncBinOp] = ... +hermsub: Final[_FuncBinOp] = ... +hermmulx: Final[_FuncUnOp] = ... +hermmul: Final[_FuncBinOp] = ... +hermdiv: Final[_FuncBinOp] = ... +hermpow: Final[_FuncPow] = ... +hermder: Final[_FuncDer] = ... +hermint: Final[_FuncInteg] = ... +hermval: Final[_FuncVal] = ... +hermval2d: Final[_FuncVal2D] = ... +hermval3d: Final[_FuncVal3D] = ... +hermgrid2d: Final[_FuncVal2D] = ... +hermgrid3d: Final[_FuncVal3D] = ... +hermvander: Final[_FuncVander] = ... +hermvander2d: Final[_FuncVander2D] = ... +hermvander3d: Final[_FuncVander3D] = ... +hermfit: Final[_FuncFit] = ... +hermcompanion: Final[_FuncCompanion] = ... +hermroots: Final[_FuncRoots] = ... -_ND = TypeVar("_ND", bound=Any) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) def _normed_hermite_n( - x: np.ndarray[_ND, np.dtype[np.float64]], + x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int | np.intp, -) -> np.ndarray[_ND, np.dtype[np.float64]]: ... +) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... -hermgauss: _FuncGauss[L["hermgauss"]] -hermweight: _FuncWeight[L["hermweight"]] +hermgauss: Final[_FuncGauss] = ... +hermweight: Final[_FuncWeight] = ... -class Hermite(ABCPolyBase[L["H"]]): ... +class Hermite(ABCPolyBase[L["H"]]): + basis_name: ClassVar[L["H"]] = "H" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index 94ad7248f268..b93975f246da 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,4 +1,4 @@ -from typing import Any, Final, Literal as L, TypeVar +from typing import Any, ClassVar, Final, Literal as L, TypeVar import numpy as np @@ -21,7 +21,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -63,44 +62,46 @@ __all__ = [ "hermeweight", ] -poly2herme: _FuncPoly2Ortho[L["poly2herme"]] -herme2poly: _FuncUnOp[L["herme2poly"]] +poly2herme: Final[_FuncPoly2Ortho] = ... +herme2poly: Final[_FuncUnOp] = ... -hermedomain: Final[_Array2[np.float64]] -hermezero: Final[_Array1[np.int_]] -hermeone: Final[_Array1[np.int_]] -hermex: Final[_Array2[np.int_]] +hermedomain: Final[_Array2[np.float64]] = ... +hermezero: Final[_Array1[np.int_]] = ... +hermeone: Final[_Array1[np.int_]] = ... +hermex: Final[_Array2[np.int_]] = ... -hermeline: _FuncLine[L["hermeline"]] -hermefromroots: _FuncFromRoots[L["hermefromroots"]] -hermeadd: _FuncBinOp[L["hermeadd"]] -hermesub: _FuncBinOp[L["hermesub"]] -hermemulx: _FuncUnOp[L["hermemulx"]] -hermemul: _FuncBinOp[L["hermemul"]] -hermediv: _FuncBinOp[L["hermediv"]] -hermepow: _FuncPow[L["hermepow"]] -hermeder: _FuncDer[L["hermeder"]] -hermeint: _FuncInteg[L["hermeint"]] -hermeval: _FuncVal[L["hermeval"]] -hermeval2d: _FuncVal2D[L["hermeval2d"]] -hermeval3d: _FuncVal3D[L["hermeval3d"]] -hermevalfromroots: _FuncValFromRoots[L["hermevalfromroots"]] -hermegrid2d: _FuncVal2D[L["hermegrid2d"]] -hermegrid3d: _FuncVal3D[L["hermegrid3d"]] -hermevander: _FuncVander[L["hermevander"]] -hermevander2d: _FuncVander2D[L["hermevander2d"]] -hermevander3d: _FuncVander3D[L["hermevander3d"]] -hermefit: _FuncFit[L["hermefit"]] -hermecompanion: _FuncCompanion[L["hermecompanion"]] -hermeroots: _FuncRoots[L["hermeroots"]] +hermeline: Final[_FuncLine] = ... +hermefromroots: Final[_FuncFromRoots] = ... +hermeadd: Final[_FuncBinOp] = ... +hermesub: Final[_FuncBinOp] = ... +hermemulx: Final[_FuncUnOp] = ... +hermemul: Final[_FuncBinOp] = ... +hermediv: Final[_FuncBinOp] = ... +hermepow: Final[_FuncPow] = ... +hermeder: Final[_FuncDer] = ... +hermeint: Final[_FuncInteg] = ... +hermeval: Final[_FuncVal] = ... +hermeval2d: Final[_FuncVal2D] = ... +hermeval3d: Final[_FuncVal3D] = ... +hermegrid2d: Final[_FuncVal2D] = ... +hermegrid3d: Final[_FuncVal3D] = ... +hermevander: Final[_FuncVander] = ... +hermevander2d: Final[_FuncVander2D] = ... +hermevander3d: Final[_FuncVander3D] = ... +hermefit: Final[_FuncFit] = ... +hermecompanion: Final[_FuncCompanion] = ... +hermeroots: Final[_FuncRoots] = ... -_ND = TypeVar("_ND", bound=Any) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) def _normed_hermite_e_n( - x: np.ndarray[_ND, np.dtype[np.float64]], + x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int | np.intp, -) -> np.ndarray[_ND, np.dtype[np.float64]]: ... +) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... -hermegauss: _FuncGauss[L["hermegauss"]] -hermeweight: _FuncWeight[L["hermeweight"]] +hermegauss: Final[_FuncGauss] = ... +hermeweight: Final[_FuncWeight] = ... -class HermiteE(ABCPolyBase[L["He"]]): ... +class HermiteE(ABCPolyBase[L["He"]]): + basis_name: ClassVar[L["He"]] = "He" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index a2b84f72bab7..8b70b899ed59 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -1,4 +1,4 @@ -from typing import Final, Literal as L +from typing import Any, ClassVar, Final, Literal as L import numpy as np @@ -21,7 +21,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -63,37 +62,39 @@ __all__ = [ "lagweight", ] -poly2lag: _FuncPoly2Ortho[L["poly2lag"]] -lag2poly: _FuncUnOp[L["lag2poly"]] +poly2lag: Final[_FuncPoly2Ortho] = ... +lag2poly: Final[_FuncUnOp] = ... -lagdomain: Final[_Array2[np.float64]] -lagzero: Final[_Array1[np.int_]] -lagone: Final[_Array1[np.int_]] -lagx: Final[_Array2[np.int_]] +lagdomain: Final[_Array2[np.float64]] = ... +lagzero: Final[_Array1[np.int_]] = ... +lagone: Final[_Array1[np.int_]] = ... +lagx: Final[_Array2[np.int_]] = ... -lagline: _FuncLine[L["lagline"]] -lagfromroots: _FuncFromRoots[L["lagfromroots"]] -lagadd: _FuncBinOp[L["lagadd"]] -lagsub: _FuncBinOp[L["lagsub"]] -lagmulx: _FuncUnOp[L["lagmulx"]] -lagmul: _FuncBinOp[L["lagmul"]] -lagdiv: _FuncBinOp[L["lagdiv"]] -lagpow: _FuncPow[L["lagpow"]] -lagder: _FuncDer[L["lagder"]] -lagint: _FuncInteg[L["lagint"]] -lagval: _FuncVal[L["lagval"]] -lagval2d: _FuncVal2D[L["lagval2d"]] -lagval3d: _FuncVal3D[L["lagval3d"]] -lagvalfromroots: _FuncValFromRoots[L["lagvalfromroots"]] -laggrid2d: _FuncVal2D[L["laggrid2d"]] -laggrid3d: _FuncVal3D[L["laggrid3d"]] -lagvander: _FuncVander[L["lagvander"]] -lagvander2d: _FuncVander2D[L["lagvander2d"]] -lagvander3d: _FuncVander3D[L["lagvander3d"]] -lagfit: _FuncFit[L["lagfit"]] -lagcompanion: _FuncCompanion[L["lagcompanion"]] -lagroots: _FuncRoots[L["lagroots"]] -laggauss: _FuncGauss[L["laggauss"]] -lagweight: _FuncWeight[L["lagweight"]] +lagline: Final[_FuncLine] = ... +lagfromroots: Final[_FuncFromRoots] = ... +lagadd: Final[_FuncBinOp] = ... +lagsub: Final[_FuncBinOp] = ... +lagmulx: Final[_FuncUnOp] = ... +lagmul: Final[_FuncBinOp] = ... +lagdiv: Final[_FuncBinOp] = ... +lagpow: Final[_FuncPow] = ... +lagder: Final[_FuncDer] = ... +lagint: Final[_FuncInteg] = ... +lagval: Final[_FuncVal] = ... +lagval2d: Final[_FuncVal2D] = ... +lagval3d: Final[_FuncVal3D] = ... +laggrid2d: Final[_FuncVal2D] = ... +laggrid3d: Final[_FuncVal3D] = ... +lagvander: Final[_FuncVander] = ... +lagvander2d: Final[_FuncVander2D] = ... +lagvander3d: Final[_FuncVander3D] = ... +lagfit: Final[_FuncFit] = ... +lagcompanion: Final[_FuncCompanion] = ... +lagroots: Final[_FuncRoots] = ... +laggauss: Final[_FuncGauss] = ... +lagweight: Final[_FuncWeight] = ... -class Laguerre(ABCPolyBase[L["L"]]): ... +class Laguerre(ABCPolyBase[L["L"]]): + basis_name: ClassVar[L["L"]] = "L" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index d81f3e6f54a4..53f8f7c210fa 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -1,4 +1,4 @@ -from typing import Final, Literal as L +from typing import Any, ClassVar, Final, Literal as L import numpy as np @@ -21,7 +21,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -63,37 +62,39 @@ __all__ = [ "legweight", ] -poly2leg: _FuncPoly2Ortho[L["poly2leg"]] -leg2poly: _FuncUnOp[L["leg2poly"]] +poly2leg: Final[_FuncPoly2Ortho] = ... +leg2poly: Final[_FuncUnOp] = ... -legdomain: Final[_Array2[np.float64]] -legzero: Final[_Array1[np.int_]] -legone: Final[_Array1[np.int_]] -legx: Final[_Array2[np.int_]] +legdomain: Final[_Array2[np.float64]] = ... +legzero: Final[_Array1[np.int_]] = ... +legone: Final[_Array1[np.int_]] = ... +legx: Final[_Array2[np.int_]] = ... -legline: _FuncLine[L["legline"]] -legfromroots: _FuncFromRoots[L["legfromroots"]] -legadd: _FuncBinOp[L["legadd"]] -legsub: _FuncBinOp[L["legsub"]] -legmulx: _FuncUnOp[L["legmulx"]] -legmul: _FuncBinOp[L["legmul"]] -legdiv: _FuncBinOp[L["legdiv"]] -legpow: _FuncPow[L["legpow"]] -legder: _FuncDer[L["legder"]] -legint: _FuncInteg[L["legint"]] -legval: _FuncVal[L["legval"]] -legval2d: _FuncVal2D[L["legval2d"]] -legval3d: _FuncVal3D[L["legval3d"]] -legvalfromroots: _FuncValFromRoots[L["legvalfromroots"]] -leggrid2d: _FuncVal2D[L["leggrid2d"]] -leggrid3d: _FuncVal3D[L["leggrid3d"]] -legvander: _FuncVander[L["legvander"]] -legvander2d: _FuncVander2D[L["legvander2d"]] -legvander3d: _FuncVander3D[L["legvander3d"]] -legfit: _FuncFit[L["legfit"]] -legcompanion: _FuncCompanion[L["legcompanion"]] -legroots: _FuncRoots[L["legroots"]] -leggauss: _FuncGauss[L["leggauss"]] -legweight: _FuncWeight[L["legweight"]] +legline: Final[_FuncLine] = ... +legfromroots: Final[_FuncFromRoots] = ... +legadd: Final[_FuncBinOp] = ... +legsub: Final[_FuncBinOp] = ... +legmulx: Final[_FuncUnOp] = ... +legmul: Final[_FuncBinOp] = ... +legdiv: Final[_FuncBinOp] = ... +legpow: Final[_FuncPow] = ... +legder: Final[_FuncDer] = ... +legint: Final[_FuncInteg] = ... +legval: Final[_FuncVal] = ... +legval2d: Final[_FuncVal2D] = ... +legval3d: Final[_FuncVal3D] = ... +leggrid2d: Final[_FuncVal2D] = ... +leggrid3d: Final[_FuncVal3D] = ... +legvander: Final[_FuncVander] = ... +legvander2d: Final[_FuncVander2D] = ... +legvander3d: Final[_FuncVander3D] = ... +legfit: Final[_FuncFit] = ... +legcompanion: Final[_FuncCompanion] = ... +legroots: Final[_FuncRoots] = ... +leggauss: Final[_FuncGauss] = ... +legweight: Final[_FuncWeight] = ... -class Legendre(ABCPolyBase[L["P"]]): ... +class Legendre(ABCPolyBase[L["P"]]): + basis_name: ClassVar[L["P"]] = "P" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index 2942adf2afa8..ddd10fa4282e 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -1,11 +1,19 @@ -from typing import Final, Literal as L +from typing import Any, ClassVar, Final, overload import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeFloat_co, + _ArrayLikeNumber_co, + _FloatLike_co, + _NumberLike_co, +) from ._polybase import ABCPolyBase from ._polytypes import ( _Array1, _Array2, + _ArrayLikeCoef_co, _FuncBinOp, _FuncCompanion, _FuncDer, @@ -19,7 +27,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -57,32 +64,66 @@ __all__ = [ "polycompanion", ] -polydomain: Final[_Array2[np.float64]] -polyzero: Final[_Array1[np.int_]] -polyone: Final[_Array1[np.int_]] -polyx: Final[_Array2[np.int_]] +polydomain: Final[_Array2[np.float64]] = ... +polyzero: Final[_Array1[np.int_]] = ... +polyone: Final[_Array1[np.int_]] = ... +polyx: Final[_Array2[np.int_]] = ... + +polyline: Final[_FuncLine] = ... +polyfromroots: Final[_FuncFromRoots] = ... +polyadd: Final[_FuncBinOp] = ... +polysub: Final[_FuncBinOp] = ... +polymulx: Final[_FuncUnOp] = ... +polymul: Final[_FuncBinOp] = ... +polydiv: Final[_FuncBinOp] = ... +polypow: Final[_FuncPow] = ... +polyder: Final[_FuncDer] = ... +polyint: Final[_FuncInteg] = ... +polyval: Final[_FuncVal] = ... +polyval2d: Final[_FuncVal2D] = ... +polyval3d: Final[_FuncVal3D] = ... + +@overload +def polyvalfromroots( + x: _FloatLike_co, + r: _FloatLike_co, + tensor: bool = True, +) -> np.float64 | Any: ... +@overload +def polyvalfromroots( + x: _NumberLike_co, + r: _NumberLike_co, + tensor: bool = True, +) -> np.complex128 | Any: ... +@overload +def polyvalfromroots( + x: _ArrayLikeFloat_co, + r: _ArrayLikeFloat_co, + tensor: bool = True, +) -> npt.NDArray[np.float64 | Any]: ... +@overload +def polyvalfromroots( + x: _ArrayLikeNumber_co, + r: _ArrayLikeNumber_co, + tensor: bool = True, +) -> npt.NDArray[np.complex128 | Any]: ... +@overload +def polyvalfromroots( + x: _ArrayLikeCoef_co, + r: _ArrayLikeCoef_co, + tensor: bool = True, +) -> npt.NDArray[np.object_ | Any]: ... -polyline: _FuncLine[L["Polyline"]] -polyfromroots: _FuncFromRoots[L["polyfromroots"]] -polyadd: _FuncBinOp[L["polyadd"]] -polysub: _FuncBinOp[L["polysub"]] -polymulx: _FuncUnOp[L["polymulx"]] -polymul: _FuncBinOp[L["polymul"]] -polydiv: _FuncBinOp[L["polydiv"]] -polypow: _FuncPow[L["polypow"]] -polyder: _FuncDer[L["polyder"]] -polyint: _FuncInteg[L["polyint"]] -polyval: _FuncVal[L["polyval"]] -polyval2d: _FuncVal2D[L["polyval2d"]] -polyval3d: _FuncVal3D[L["polyval3d"]] -polyvalfromroots: _FuncValFromRoots[L["polyvalfromroots"]] -polygrid2d: _FuncVal2D[L["polygrid2d"]] -polygrid3d: _FuncVal3D[L["polygrid3d"]] -polyvander: _FuncVander[L["polyvander"]] -polyvander2d: _FuncVander2D[L["polyvander2d"]] -polyvander3d: _FuncVander3D[L["polyvander3d"]] -polyfit: _FuncFit[L["polyfit"]] -polycompanion: _FuncCompanion[L["polycompanion"]] -polyroots: _FuncRoots[L["polyroots"]] +polygrid2d: Final[_FuncVal2D] = ... +polygrid3d: Final[_FuncVal3D] = ... +polyvander: Final[_FuncVander] = ... +polyvander2d: Final[_FuncVander2D] = ... +polyvander3d: Final[_FuncVander3D] = ... +polyfit: Final[_FuncFit] = ... +polycompanion: Final[_FuncCompanion] = ... +polyroots: Final[_FuncRoots] = ... -class Polynomial(ABCPolyBase[None]): ... +class Polynomial(ABCPolyBase[None]): + basis_name: ClassVar[None] = None # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 8938c3cc8259..fd818ce0c90d 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,35 +1,38 @@ from collections.abc import Callable, Iterable, Sequence -from typing import Final, Literal, SupportsIndex, TypeAlias, TypeVar, overload +from typing import Any, Final, Literal, SupportsIndex, TypeAlias, TypeVar, overload import numpy as np import numpy.typing as npt from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeFloat_co, + _ArrayLikeObject_co, _FloatLike_co, _NumberLike_co, ) from ._polytypes import ( _AnyInt, + _AnyValF, _Array2, _ArrayLikeCoef_co, _CoefArray, _CoefLike_co, + _CoefObjectLike_co, _CoefSeries, _ComplexArray, _ComplexSeries, _FloatArray, _FloatSeries, _FuncBinOp, - _FuncValND, - _FuncVanderND, _ObjectArray, _ObjectSeries, _SeriesLikeCoef_co, _SeriesLikeComplex_co, _SeriesLikeFloat_co, _SeriesLikeInt_co, + _SeriesLikeObject_co, + _SupportsCoefOps, _Tuple2, ) @@ -43,18 +46,9 @@ __all__: Final[Sequence[str]] = [ "trimseq", ] -_AnyLineF: TypeAlias = Callable[ - [_CoefLike_co, _CoefLike_co], - _CoefArray, -] -_AnyMulF: TypeAlias = Callable[ - [npt.ArrayLike, npt.ArrayLike], - _CoefArray, -] -_AnyVanderF: TypeAlias = Callable[ - [npt.ArrayLike, SupportsIndex], - _CoefArray, -] +_AnyLineF: TypeAlias = Callable[[_CoefLike_co, _CoefLike_co], _CoefArray] +_AnyMulF: TypeAlias = Callable[[npt.ArrayLike, npt.ArrayLike], _CoefArray] +_AnyVanderF: TypeAlias = Callable[[npt.ArrayLike, SupportsIndex], _CoefArray] @overload def as_series( @@ -262,8 +256,57 @@ def _nth_slice( ndim: SupportsIndex, ) -> tuple[slice | None, ...]: ... -_vander_nd: _FuncVanderND[Literal["_vander_nd"]] -_vander_nd_flat: _FuncVanderND[Literal["_vander_nd_flat"]] +# keep in sync with `vander_nd_flat` +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], +) -> _FloatArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ComplexArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeObject_co | _ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ObjectArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], +) -> _CoefArray: ... + +# keep in sync with `vander_nd` +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], +) -> _FloatArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ComplexArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeObject_co | _ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ObjectArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], +) -> _CoefArray: ... # keep in sync with `._polytypes._FuncFromRoots` @overload @@ -282,7 +325,7 @@ def _fromroots( def _fromroots( line_f: _AnyLineF, mul_f: _AnyMulF, - roots: _SeriesLikeCoef_co, + roots: _SeriesLikeObject_co, ) -> _ObjectSeries: ... @overload def _fromroots( @@ -291,8 +334,81 @@ def _fromroots( roots: _SeriesLikeCoef_co, ) -> _CoefSeries: ... -_valnd: _FuncValND[Literal["_valnd"]] -_gridnd: _FuncValND[Literal["_gridnd"]] +# keep in sync with `_gridnd` +@overload +def _valnd( + val_f: _AnyValF, + c: _SeriesLikeFloat_co, + *args: _FloatLike_co, +) -> np.floating: ... +@overload +def _valnd( + val_f: _AnyValF, + c: _SeriesLikeComplex_co, + *args: _NumberLike_co, +) -> np.complexfloating: ... +@overload +def _valnd( + val_f: _AnyValF, + c: _ArrayLikeFloat_co, + *args: _ArrayLikeFloat_co, +) -> _FloatArray: ... +@overload +def _valnd( + val_f: _AnyValF, + c: _ArrayLikeComplex_co, + *args: _ArrayLikeComplex_co, +) -> _ComplexArray: ... +@overload +def _valnd( + val_f: _AnyValF, + c: _SeriesLikeObject_co, + *args: _CoefObjectLike_co, +) -> _SupportsCoefOps[Any]: ... +@overload +def _valnd( + val_f: _AnyValF, + c: _ArrayLikeCoef_co, + *args: _ArrayLikeCoef_co, +) -> _ObjectArray: ... + +# keep in sync with `_valnd` +@overload +def _gridnd( + val_f: _AnyValF, + c: _SeriesLikeFloat_co, + *args: _FloatLike_co, +) -> np.floating: ... +@overload +def _gridnd( + val_f: _AnyValF, + c: _SeriesLikeComplex_co, + *args: _NumberLike_co, +) -> np.complexfloating: ... +@overload +def _gridnd( + val_f: _AnyValF, + c: _ArrayLikeFloat_co, + *args: _ArrayLikeFloat_co, +) -> _FloatArray: ... +@overload +def _gridnd( + val_f: _AnyValF, + c: _ArrayLikeComplex_co, + *args: _ArrayLikeComplex_co, +) -> _ComplexArray: ... +@overload +def _gridnd( + val_f: _AnyValF, + c: _SeriesLikeObject_co, + *args: _CoefObjectLike_co, +) -> _SupportsCoefOps[Any]: ... +@overload +def _gridnd( + val_f: _AnyValF, + c: _ArrayLikeCoef_co, + *args: _ArrayLikeCoef_co, +) -> _ObjectArray: ... # keep in sync with `_polytypes._FuncBinOp` @overload @@ -310,8 +426,8 @@ def _div( @overload def _div( mul_f: _AnyMulF, - c1: _SeriesLikeCoef_co, - c2: _SeriesLikeCoef_co, + c1: _SeriesLikeObject_co, + c2: _SeriesLikeObject_co, ) -> _Tuple2[_ObjectSeries]: ... @overload def _div( @@ -320,37 +436,37 @@ def _div( c2: _SeriesLikeCoef_co, ) -> _Tuple2[_CoefSeries]: ... -_add: Final[_FuncBinOp] -_sub: Final[_FuncBinOp] +_add: Final[_FuncBinOp] = ... +_sub: Final[_FuncBinOp] = ... # keep in sync with `_polytypes._FuncPow` @overload -def _pow( # type: ignore[overload-overlap] +def _pow( mul_f: _AnyMulF, c: _SeriesLikeFloat_co, pow: _AnyInt, - maxpower: _AnyInt | None = ..., + maxpower: _AnyInt | None, ) -> _FloatSeries: ... @overload def _pow( mul_f: _AnyMulF, c: _SeriesLikeComplex_co, pow: _AnyInt, - maxpower: _AnyInt | None = ..., + maxpower: _AnyInt | None, ) -> _ComplexSeries: ... @overload def _pow( mul_f: _AnyMulF, - c: _SeriesLikeCoef_co, + c: _SeriesLikeObject_co, pow: _AnyInt, - maxpower: _AnyInt | None = ..., + maxpower: _AnyInt | None, ) -> _ObjectSeries: ... @overload def _pow( mul_f: _AnyMulF, c: _SeriesLikeCoef_co, pow: _AnyInt, - maxpower: _AnyInt | None = ..., + maxpower: _AnyInt | None, ) -> _CoefSeries: ... # keep in sync with `_polytypes._FuncFit` @@ -360,7 +476,6 @@ def _fit( # type: ignore[overload-overlap] x: _SeriesLikeFloat_co, y: _ArrayLikeFloat_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeFloat_co | None = ..., rcond: _FloatLike_co | None = None, full: Literal[False] = False, w: _SeriesLikeFloat_co | None = None, @@ -371,7 +486,6 @@ def _fit( x: _SeriesLikeComplex_co, y: _ArrayLikeComplex_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeComplex_co | None = ..., rcond: _FloatLike_co | None = None, full: Literal[False] = False, w: _SeriesLikeComplex_co | None = None, @@ -382,7 +496,6 @@ def _fit( x: _SeriesLikeCoef_co, y: _ArrayLikeCoef_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None = ..., rcond: _FloatLike_co | None = None, full: Literal[False] = False, w: _SeriesLikeCoef_co | None = None, @@ -393,10 +506,8 @@ def _fit( x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None, rcond: _FloatLike_co | None, full: Literal[True], - /, w: _SeriesLikeCoef_co | None = None, ) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... @overload @@ -405,7 +516,6 @@ def _fit( x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None = ..., rcond: _FloatLike_co | None = None, *, full: Literal[True], diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index 2870d50310a2..4c4899ad6308 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,7 +1,6 @@ from collections.abc import Sequence from decimal import Decimal -from fractions import Fraction -from typing import Any, Literal as L, LiteralString, TypeAlias, TypeVar, assert_type +from typing import Any, Literal as L, TypeAlias, TypeVar, assert_type import numpy as np import numpy.polynomial as npp @@ -17,7 +16,7 @@ _Ar_f_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] _Ar_c_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] _Ar_O_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] -_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.inexact | np.object_]] +_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64 | Any]] _Ar_f_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating]] _Ar_c_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] _Ar_O_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] @@ -103,7 +102,7 @@ assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) assert_type(PS_all.coef, _Ar_x_n) assert_type(PS_all.domain, _Ar_x_2) assert_type(PS_all.window, _Ar_x_2) -assert_type(PS_all.symbol, LiteralString) +assert_type(PS_all.symbol, str) # instance methods @@ -113,7 +112,7 @@ assert_type(PS_all.has_samewindow(PS_all), bool) assert_type(PS_all.has_sametype(PS_all), bool) assert_type(PS_poly.has_sametype(PS_poly), bool) assert_type(PS_poly.has_sametype(PS_leg), bool) -assert_type(PS_poly.has_sametype(NotADirectoryError), L[False]) +assert_type(PS_poly.has_sametype(NotADirectoryError), bool) assert_type(PS_poly.copy(), npp.Polynomial) assert_type(PS_cheb.copy(), npp.Chebyshev) @@ -122,7 +121,7 @@ assert_type(PS_herme.copy(), npp.HermiteE) assert_type(PS_lag.copy(), npp.Laguerre) assert_type(PS_leg.copy(), npp.Legendre) -assert_type(PS_leg.cutdeg(), npp.Legendre) +assert_type(PS_leg.cutdeg(3), npp.Legendre) assert_type(PS_leg.trim(), npp.Legendre) assert_type(PS_leg.trim(tol=SC_f_co), npp.Legendre) assert_type(PS_leg.truncate(SC_i_co), npp.Legendre) @@ -172,17 +171,16 @@ assert_type(repr(PS_all), str) assert_type(format(PS_all), str) assert_type(len(PS_all), int) -assert_type(next(iter(PS_all)), np.inexact | object) - -assert_type(PS_all(SC_f_co), np.float64 | np.complex128) -assert_type(PS_all(SC_c_co), np.complex128) -assert_type(PS_all(Decimal()), np.float64 | np.complex128) -assert_type(PS_all(Fraction()), np.float64 | np.complex128) -assert_type(PS_poly(SQ_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) -assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(next(iter(PS_all)), np.float64 | Any) + +assert_type(PS_all(SC_f_co), np.float64 | Any) +assert_type(PS_all(SC_c_co), np.complex128 | Any) +assert_type(PS_all(Decimal()), np.float64 | Any) +assert_type(PS_poly(SQ_f), npt.NDArray[np.float64 | Any]) +assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128 | Any]) assert_type(PS_poly(SQ_O), npt.NDArray[np.object_]) -assert_type(PS_poly(AR_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) -assert_type(PS_poly(AR_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(AR_f), npt.NDArray[np.float64 | Any]) +assert_type(PS_poly(AR_c), npt.NDArray[np.complex128 | Any]) assert_type(PS_poly(AR_O), npt.NDArray[np.object_]) assert_type(PS_all(PS_poly), npp.Polynomial) diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi index 0f4a9e09f2e7..3188ad9a1239 100644 --- a/numpy/typing/tests/data/reveal/polynomial_series.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -1,5 +1,5 @@ from collections.abc import Sequence -from typing import TypeAlias, assert_type +from typing import Any, TypeAlias, assert_type import numpy as np import numpy.polynomial as npp @@ -79,12 +79,12 @@ assert_type(npp.polynomial.polyval3d(AR_f8, AR_f8, AR_f8, AR_i8), npt.NDArray[np assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(npp.polynomial.polyval3d(AR_O, AR_O, AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.floating]) -assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.floating]) -assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.floating]) -assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.floating]) -assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complex128 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_ | Any]) assert_type(npp.polynomial.polyvander(AR_f8, 3), npt.NDArray[np.floating]) assert_type(npp.polynomial.polyvander(AR_c16, 3), npt.NDArray[np.complexfloating]) From 75e1f93049fd0e418e2bfc31aed61e65a10924ea Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 21 Oct 2025 02:11:51 +0200 Subject: [PATCH 0702/1018] TYP: fix stubtest error in ``numpy.typing`` (#30033) --- numpy/typing/__init__.pyi | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 numpy/typing/__init__.pyi diff --git a/numpy/typing/__init__.pyi b/numpy/typing/__init__.pyi new file mode 100644 index 000000000000..7a4c7b41079c --- /dev/null +++ b/numpy/typing/__init__.pyi @@ -0,0 +1,3 @@ +from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray + +__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] From 9147472ebe97223604d2a969499a1b2f1f11152e Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 21 Oct 2025 09:40:29 +0200 Subject: [PATCH 0703/1018] DEV: add a `spin stubtest` command --- .spin/cmds.py | 31 ++++++ pyproject.toml | 2 + tools/stubtest/allowlist.txt | 205 +++++++++++++++++++++++++++++++++++ tools/stubtest/mypy.ini | 6 + 4 files changed, 244 insertions(+) create mode 100644 tools/stubtest/allowlist.txt create mode 100644 tools/stubtest/mypy.ini diff --git a/.spin/cmds.py b/.spin/cmds.py index 9490aca297d9..25ae4551781b 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -521,6 +521,37 @@ def mypy(ctx): ctx.forward(test) +@click.command() +@click.option( + '--concise', + is_flag=True, + default=False, + help="Concise output format", +) +def stubtest(*, concise: bool) -> None: + """🧐 Run stubtest on NumPy's .pyi stubs + + Requires mypy to be installed + """ + ctx = click.get_current_context() + ctx.invoke(build) + + stubtest_dir = curdir.parent / 'tools' / 'stubtest' + mypy_config = stubtest_dir / 'mypy.ini' + allowlist = stubtest_dir / 'allowlist.txt' + + cmd = [ + 'stubtest', + '--ignore-disjoint-bases', + f'--mypy-config-file={mypy_config}', + f'--allowlist={allowlist}', + ] + if concise: + cmd.append('--concise') + cmd.append('numpy') + spin.util.run(cmd) + + @click.command(context_settings={ 'ignore_unknown_options': True }) diff --git a/pyproject.toml b/pyproject.toml index 5ffbd9ea0247..0dba95b04e84 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -235,6 +235,7 @@ meson = 'vendored-meson/meson/meson.py' [tool.meson-python.args] install = ['--tags=runtime,python-runtime,tests,devel'] + [tool.spin] package = 'numpy' @@ -246,6 +247,7 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:build", ".spin/cmds.py:test", ".spin/cmds.py:mypy", + ".spin/cmds.py:stubtest", ".spin/cmds.py:config_openblas", ".spin/cmds.py:lint", ] diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt new file mode 100644 index 000000000000..c5d7fe2418d3 --- /dev/null +++ b/tools/stubtest/allowlist.txt @@ -0,0 +1,205 @@ +# intentional type-check-only deviations from runtime +numpy\._typing.* +numpy\.polynomial\._polytypes +numpy\.typing\.mypy_plugin\..* + +# internal testing code +numpy\.conftest.* +numpy\.random\._generator\.__test__ +numpy(\.\w+)?\.tests.* + +# system-dependent extended precision types +numpy(\..+)?\.float(96|128) +numpy(\..+)?\.complex(192|256) + +# system-dependent SIMD constants +numpy\._core\._simd\.\w+ + +# these are always either float96/complex192 or float128/complex256 +numpy\.__all__ +numpy\._?core\.__all__ +numpy\._?core\.numeric\.__all__ +numpy\._?core\.numerictypes\.__all__ +numpy\.matlib\.__all__ + +# requires numpy/_core/code_generators to be on the PYTHONPATH when running stubtest +numpy\._core\.cversions + +# raises SystemExit on import +numpy\.f2py\.__main__ + +# inexpressible: the `dtype.type` class-attribute is `None` unless instantiated +numpy(\..+)?\.dtype\.type + +# mypy false positive "... is not a Union" errors (py314 only?) +numpy\.typing\.ArrayLike +numpy\.typing\.DTypeLike + +# distutils +numpy\.distutils.* +numpy\.f2py\._backends\._distutils + +# import errors +numpy\._build_utils.* +numpy\._core\.code_generators.* +numpy\._core\.include\.numpy\.libdivide +numpy\._core\.src.* +numpy\._pyinstaller.* +numpy\.fft\.pocketfft +numpy\.random\._examples\..* +numpy\.random\.include +numpy\.random\.src.* + +# known minor deviations from runtime +numpy\.(\w+\.)*integer\.__index__ +numpy\.(\w+\.)*integer\.bit_count +numpy\.(\w+\.)*floating\.as_integer_ratio +numpy\.(\w+\.)*floating\.is_integer +numpy\.(\w+\.)*complexfloating\.__complex__ + +# intentionally missing deprecated module stubs +numpy\.core\._dtype +numpy\.core\._dtype_ctypes +numpy\.core\._internal +numpy\.core\._multiarray_umath.* +numpy\.core\.arrayprint.* +numpy\.core\.defchararray.* +numpy\.core\.einsumfunc.* +numpy\.core\.fromnumeric.* +numpy\.core\.function_base.* +numpy\.core\.getlimits.* +numpy\.core\.multiarray.* +numpy\.core\.numeric.* +numpy\.core\.overrides +numpy\.core\.records.* +numpy\.core\.shape_base.* +numpy\.core\.umath.* + +# ufuncs +numpy\.(\w+\.)*abs +numpy\.(\w+\.)*absolute +numpy\.(\w+\.)*acos +numpy\.(\w+\.)*acosh +numpy\.(\w+\.)*add +numpy\.(\w+\.)*arccos +numpy\.(\w+\.)*arccosh +numpy\.(\w+\.)*arcsin +numpy\.(\w+\.)*arcsinh +numpy\.(\w+\.)*arctan +numpy\.(\w+\.)*arctan2 +numpy\.(\w+\.)*arctanh +numpy\.(\w+\.)*asin +numpy\.(\w+\.)*asinh +numpy\.(\w+\.)*atan +numpy\.(\w+\.)*atan2 +numpy\.(\w+\.)*atanh +numpy\.(\w+\.)*bitwise_and +numpy\.(\w+\.)*bitwise_count +numpy\.(\w+\.)*bitwise_invert +numpy\.(\w+\.)*bitwise_left_shift +numpy\.(\w+\.)*bitwise_not +numpy\.(\w+\.)*bitwise_or +numpy\.(\w+\.)*bitwise_right_shift +numpy\.(\w+\.)*bitwise_xor +numpy\.(\w+\.)*cbrt +numpy\.(\w+\.)*ceil +numpy\.(\w+\.)*conj +numpy\.(\w+\.)*conjugate +numpy\.(\w+\.)*copysign +numpy\.(\w+\.)*cos +numpy\.(\w+\.)*cosh +numpy\.(\w+\.)*deg2rad +numpy\.(\w+\.)*degrees +numpy\.(\w+\.)*divide +numpy\.(\w+\.)*divmod +numpy\.(\w+\.)*equal +numpy\.(\w+\.)*exp +numpy\.(\w+\.)*exp2 +numpy\.(\w+\.)*expm1 +numpy\.(\w+\.)*fabs +numpy\.(\w+\.)*float_power +numpy\.(\w+\.)*floor +numpy\.(\w+\.)*floor_divide +numpy\.(\w+\.)*fmax +numpy\.(\w+\.)*fmin +numpy\.(\w+\.)*fmod +numpy\.(\w+\.)*frexp +numpy\.(\w+\.)*gcd +numpy\.(\w+\.)*greater +numpy\.(\w+\.)*greater_equal +numpy\.(\w+\.)*heaviside +numpy\.(\w+\.)*hypot +numpy\.(\w+\.)*invert +numpy\.(\w+\.)*isfinite +numpy\.(\w+\.)*isinf +numpy\.(\w+\.)*isnan +numpy\.(\w+\.)*isnat +numpy\.(\w+\.)*lcm +numpy\.(\w+\.)*ldexp +numpy\.(\w+\.)*left_shift +numpy\.(\w+\.)*less +numpy\.(\w+\.)*less_equal +numpy\.(\w+\.)*log +numpy\.(\w+\.)*log10 +numpy\.(\w+\.)*log1p +numpy\.(\w+\.)*log2 +numpy\.(\w+\.)*logaddexp +numpy\.(\w+\.)*logaddexp2 +numpy\.(\w+\.)*logical_and +numpy\.(\w+\.)*logical_not +numpy\.(\w+\.)*logical_or +numpy\.(\w+\.)*logical_xor +numpy\.(\w+\.)*matmul +numpy\.(\w+\.)*matvec +numpy\.(\w+\.)*maximum +numpy\.(\w+\.)*minimum +numpy\.(\w+\.)*mod +numpy\.(\w+\.)*modf +numpy\.(\w+\.)*multiply +numpy\.(\w+\.)*negative +numpy\.(\w+\.)*nextafter +numpy\.(\w+\.)*not_equal +numpy\.(\w+\.)*positive +numpy\.(\w+\.)*pow +numpy\.(\w+\.)*power +numpy\.(\w+\.)*rad2deg +numpy\.(\w+\.)*radians +numpy\.(\w+\.)*reciprocal +numpy\.(\w+\.)*remainder +numpy\.(\w+\.)*right_shift +numpy\.(\w+\.)*rint +numpy\.(\w+\.)*sign +numpy\.(\w+\.)*signbit +numpy\.(\w+\.)*sin +numpy\.(\w+\.)*sinh +numpy\.(\w+\.)*spacing +numpy\.(\w+\.)*sqrt +numpy\.(\w+\.)*square +numpy\.(\w+\.)*subtract +numpy\.(\w+\.)*tan +numpy\.(\w+\.)*tanh +numpy\.(\w+\.)*true_divide +numpy\.(\w+\.)*trunc +numpy\.(\w+\.)*vecdot +numpy\.(\w+\.)*vecmat +numpy\.(\w+\.)*isalnum +numpy\.(\w+\.)*isalpha +numpy\.(\w+\.)*isdecimal +numpy\.(\w+\.)*isdigit +numpy\.(\w+\.)*islower +numpy\.(\w+\.)*isnumeric +numpy\.(\w+\.)*isspace +numpy\.(\w+\.)*istitle +numpy\.(\w+\.)*isupper +numpy\.(\w+\.)*str_len +numpy\._core\._methods\.umr_bitwise_count +numpy\._core\._methods\.umr_bitwise_count +numpy\.linalg\._umath_linalg\.qr_complete +numpy\.linalg\._umath_linalg\.qr_reduced +numpy\.linalg\._umath_linalg\.solve +numpy\.linalg\._umath_linalg\.solve1 + +# ma.core._frommethod callables +numpy\.ma\.(core\.)?argmax +numpy\.ma\.(core\.)?argmin +numpy\.ma\.(core\.)?count diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini new file mode 100644 index 000000000000..307590b0f922 --- /dev/null +++ b/tools/stubtest/mypy.ini @@ -0,0 +1,6 @@ +; Stubtest requires mypy to pass before running, which would currently fail +; on numerous stubs. To allow running stubtest independently, we ignore mypy +; errors here. + +[mypy] +ignore_errors = True From 964e6986dc976370a5819ca3e5f94f69b586d1af Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 21 Oct 2025 10:42:24 +0200 Subject: [PATCH 0704/1018] TYP: restore abstract scalar type constructor parameters --- numpy/__init__.pyi | 12 ++++++------ numpy/typing/tests/data/fail/scalars.pyi | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 9fa6aafb56bb..30b298e1d607 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3589,7 +3589,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # See https://github.com/numpy/numpy-stubs/pull/80 for more details. class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod - def __new__(cls) -> Self: ... + def __new__(cls, /, *args: Any, **kwargs: Any) -> Self: ... if sys.version_info >= (3, 12): def __buffer__(self, flags: int, /) -> memoryview: ... @@ -3916,8 +3916,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): def dtype(self) -> _dtype[Self]: ... class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): - @abstractmethod - def __new__(cls) -> Self: ... + @abstractmethod # `SupportsIndex | str | bytes` equivs `_ConvertibleToInt & _ConvertibleToFloat` + def __new__(cls, value: SupportsIndex | str | bytes = 0, /) -> Self: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... def __neg__(self) -> Self: ... @@ -4289,7 +4289,7 @@ class object_(_RealMixin, generic): class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): @abstractmethod - def __new__(cls) -> Self: ... + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes def bit_count(self, /) -> int: ... @@ -4825,7 +4825,7 @@ ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): @abstractmethod - def __new__(cls) -> Self: ... + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... @@ -5621,7 +5621,7 @@ class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): # type: ignore[misc] @abstractmethod - def __new__(cls) -> Self: ... + def __new__(cls, value: object = ..., /) -> Self: ... # NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index 02043e06e8fe..018a88e652ae 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -46,12 +46,12 @@ np.uint64(A()) # type: ignore[arg-type] np.void("test") # type: ignore[call-overload] np.void("test", dtype=None) # type: ignore[call-overload] -np.generic(1) # type: ignore[abstract, call-arg] -np.number(1) # type: ignore[abstract, call-arg] -np.integer(1) # type: ignore[abstract, call-arg] -np.inexact(1) # type: ignore[abstract, call-arg] -np.character("test") # type: ignore[abstract, call-arg] -np.flexible(b"test") # type: ignore[abstract, call-arg] +np.generic(1) # type: ignore[abstract] +np.number(1) # type: ignore[abstract] +np.integer(1) # type: ignore[abstract] +np.inexact(1) # type: ignore[abstract] +np.character("test") # type: ignore[abstract] +np.flexible(b"test") # type: ignore[abstract] np.float64(value=0.0) # type: ignore[call-arg] np.int64(value=0) # type: ignore[call-arg] From e618a050737c53481845c0a746373a0fb8f42760 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Tue, 21 Oct 2025 17:43:32 +0800 Subject: [PATCH 0705/1018] TYP: Add type annotations for ASIMD, NEON, and RVV targets (#30034) --- numpy/_core/_simd.pyi | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/_core/_simd.pyi b/numpy/_core/_simd.pyi index 70bb7077797e..0ba7d78eeb87 100644 --- a/numpy/_core/_simd.pyi +++ b/numpy/_core/_simd.pyi @@ -8,6 +8,13 @@ AVX2: ModuleType | None = ... AVX512F: ModuleType | None = ... AVX512_SKX: ModuleType | None = ... +# NOTE: these 2 are only defined on systems with an arm processor +ASIMD: ModuleType | None = ... +NEON: ModuleType | None = ... + +# NOTE: This is only defined on systems with an riscv64 processor. +RVV: ModuleType | None = ... + baseline: ModuleType | None = ... @type_check_only @@ -17,6 +24,9 @@ class SimdTargets(TypedDict): FMA3: ModuleType | None AVX512F: ModuleType | None AVX512_SKX: ModuleType | None + ASIMD: ModuleType | None + NEON: ModuleType | None + RVV: ModuleType | None baseline: ModuleType | None targets: SimdTargets = ... From de6f6858045b559162f6cd0f38684d833c79d657 Mon Sep 17 00:00:00 2001 From: Parsa Shemirani Date: Tue, 21 Oct 2025 08:09:34 -0700 Subject: [PATCH 0706/1018] DOC: Clarify signed vs unsigned intptr_t vs uintptr_t in user/basics.types Clarified that uintptr_t is for pointers without sign as opposed to intptr_t. Note: I am just a beginner with Numpy getting started with the docs so apologies if I misunderstood it. --- doc/source/user/basics.types.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index 8145f6adb93b..d6914f437faa 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -227,7 +227,7 @@ confusion with builtin python type names, such as `numpy.bool_`. * - N/A - ``'P'`` - ``uintptr_t`` - - Guaranteed to hold pointers. Character code only (Python and C). + - Guaranteed to hold pointers without sign. Character code only (Python and C). * - `numpy.int32` or `numpy.int64` - `numpy.long` From 9300e4accbeee2622e0a4f43567225afb2416bad Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 21 Oct 2025 19:04:14 +0200 Subject: [PATCH 0707/1018] MAINT: bump ``hypothesis`` to ``6.142.2`` --- requirements/test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 101cb6251251..33b5756b7362 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -2,7 +2,7 @@ Cython wheel==0.38.1 setuptools==65.5.1 ; python_version < '3.12' setuptools ; python_version >= '3.12' -hypothesis==6.137.1 +hypothesis==6.142.2 pytest==7.4.0 pytest-cov==4.1.0 meson From 97d41bf02d133bbacdd70ea425e93d179e221a16 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Tue, 21 Oct 2025 16:29:52 -0400 Subject: [PATCH 0708/1018] CI, TST: Enable parallel threads testing in macOS CI job (#30005) --- .github/workflows/macos.yml | 9 ++++++++- .spin/cmds.py | 18 +++++++++++++++++- doc/TESTS.rst | 17 +++++++++++++++++ doc/source/dev/development_environment.rst | 1 + numpy/_core/tests/test_array_coercion.py | 1 + numpy/_core/tests/test_arrayprint.py | 4 +--- numpy/_core/tests/test_deprecations.py | 3 +++ numpy/_core/tests/test_dtype.py | 12 +++--------- numpy/_core/tests/test_multiarray.py | 1 + numpy/_core/tests/test_scalarmath.py | 8 ++------ numpy/conftest.py | 13 ++++++------- numpy/lib/tests/test_function_base.py | 4 ---- numpy/linalg/tests/test_regression.py | 1 + 13 files changed, 61 insertions(+), 31 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index d747ab959dd6..f4393e71523c 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -115,7 +115,7 @@ jobs: build_runner: - [ macos-15-intel, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] - version: ["3.11", "3.14t-dev"] + version: ["3.11", "3.14t"] steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 @@ -144,4 +144,11 @@ jobs: spin build -- -Duse-ilp64=true -Ddisable-optimization=true -Dallow-noblas=false - name: Test (fast tests) + if: ${{ matrix.version != '3.14t' || matrix.build_runner[0] != 'macos-14' }} run: spin test -j2 -- --timeout=600 --durations=10 + + - name: Test in multiple threads + if: ${{ matrix.version == '3.14t' && matrix.build_runner[0] == 'macos-14' }} + run: | + pip install pytest-run-parallel==0.7.0 + spin test -p 4 -- --timeout=600 --durations=10 diff --git a/.spin/cmds.py b/.spin/cmds.py index 25ae4551781b..5c0c21198d78 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -141,11 +141,24 @@ def docs(*, parent_callback, **kwargs): default=default, help="Run tests with the given markers" ) +@click.option( + "-p", + "--parallel-threads", + metavar='PARALLEL_THREADS', + default="1", + help="Run tests many times in number of parallel threads under pytest-run-parallel." + " Can be set to `auto` to use all cores. Use `spin test -p -- " + "--skip-thread-unsafe=true` to only run tests that can run in parallel. " + "pytest-run-parallel must be installed to use." +) @spin.util.extend_command(spin.cmds.meson.test) -def test(*, parent_callback, pytest_args, tests, markexpr, **kwargs): +def test(*, parent_callback, pytest_args, tests, markexpr, parallel_threads, **kwargs): """ By default, spin will run `-m 'not slow'`. To run the full test suite, use `spin test -m full` + + When pytest-run-parallel is avaliable, use `spin test -p auto` or + `spin test -p ` to run tests sequentional in parallel threads. """ # noqa: E501 if (not pytest_args) and (not tests): pytest_args = ('--pyargs', 'numpy') @@ -154,6 +167,9 @@ def test(*, parent_callback, pytest_args, tests, markexpr, **kwargs): if markexpr != "full": pytest_args = ('-m', markexpr) + pytest_args + if parallel_threads != "1": + pytest_args = ('--parallel-threads', parallel_threads) + pytest_args + kwargs['pytest_args'] = pytest_args parent_callback(**{'pytest_args': pytest_args, 'tests': tests, **kwargs}) diff --git a/doc/TESTS.rst b/doc/TESTS.rst index ee8a8b4b07e1..b74dc700b1b9 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -74,6 +74,22 @@ Testing a subset of NumPy:: For detailed info on testing, see :ref:`testing-builds` +Running tests in multiple threads +--------------------------------- + +To help with stress testing NumPy for thread safety, the test suite can be run under +`pytest-run-parallel`_. To install ``pytest-run-parallel``:: + + $ pip install pytest-run-parallel + +To run the test suite in multiple threads:: + + $ spin test -p auto # have pytest-run-parallel detect the number of available cores + $ spin test -p 4 # run each test under 4 threads + $ spin test -p auto -- --skip-thread-unsafe=true # run ONLY tests that are thread-safe + +When you write new tests (see below), it is worth testing to make sure they do not fail +under ``pytest-run-parallel``, since the CI jobs makes use of it. Running doctests ---------------- @@ -406,3 +422,4 @@ Documentation for ``numpy.test`` .. _Hypothesis: https://hypothesis.readthedocs.io/en/latest/ .. _vscode: https://code.visualstudio.com/docs/python/testing#_enable-a-test-framework .. _pycharm: https://www.jetbrains.com/help/pycharm/testing-your-first-python-application.html +.. _pytest-run-parallel: https://github.com/Quansight-Labs/pytest-run-parallel diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index da8135def475..c2085a0013ef 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -92,6 +92,7 @@ one of:: $ spin test -v $ spin test numpy/random # to run the tests in a specific module $ spin test -v -t numpy/_core/tests/test_nditer.py::test_iter_c_order + $ spin test -p auto # to run tests in parallel threads using pytest-run-parallel This builds NumPy first, so the first time it may take a few minutes. diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 9b2a7a5bab85..ba28e886282f 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -712,6 +712,7 @@ def __array__(self, dtype=None, copy=None): assert arr[0] is ArrayLike @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + @pytest.mark.thread_unsafe(reason="large slow test in parallel") def test_too_large_array_error_paths(self): """Test the error paths, including for memory leaks""" arr = np.array(0, dtype="uint8") diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 04969cb74a4e..bad1aae78d5d 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -533,9 +533,6 @@ def test_nested_array_repr(self): ) @given(hynp.from_dtype(np.dtype("U"))) - @pytest.mark.thread_unsafe( - reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" - ) def test_any_text(self, text): # This test checks that, given any value that can be represented in an # array of dtype("U") (i.e. unicode string), ... @@ -1324,6 +1321,7 @@ async def main(): loop.close() @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support threads") +@pytest.mark.thread_unsafe(reason="test is already explicitly multi-threaded") def test_multithreaded_array_printing(): # the dragon4 implementation uses a static scratch space for performance # reasons this test makes sure it is set up in a thread-safe manner diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index cd9c93f6dad6..37fee7705504 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -361,6 +361,9 @@ def test_parenthesized_repeat_count(self, string): class TestAddNewdocUFunc(_DeprecationTestCase): # Deprecated in Numpy 2.2, 2024-11 + @pytest.mark.thread_unsafe( + reason="modifies and checks docstring which is global state" + ) def test_deprecated(self): doc = struct_ufunc.add_triplet.__doc__ # gh-26718 diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index e6c80d8d4212..611d5857fb15 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -2,7 +2,6 @@ import gc import operator import pickle -import random import sys import types from itertools import permutations @@ -1316,9 +1315,6 @@ def test_object_flag_not_inherited(self): @pytest.mark.slow @hypothesis.given(dtype=hynp.nested_dtypes()) - @pytest.mark.thread_unsafe( - reason="hynp.nested_dtypes thread unsafe (HypothesisWorks/hypothesis#4562)" - ) def test_make_canonical_hypothesis(self, dtype): canonical = np.result_type(dtype) self.check_canonical(dtype, canonical) @@ -1327,14 +1323,12 @@ def test_make_canonical_hypothesis(self, dtype): assert np.can_cast(two_arg_result, canonical, casting="no") @pytest.mark.slow - @pytest.mark.thread_unsafe( - reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" - ) @hypothesis.given( dtype=hypothesis.extra.numpy.array_dtypes( subtype_strategy=hypothesis.extra.numpy.array_dtypes(), - min_size=5, max_size=10, allow_subarrays=True)) - def test_structured(self, dtype): + min_size=5, max_size=10, allow_subarrays=True), + random=hypothesis.strategies.randoms()) + def test_structured(self, dtype, random): # Pick 4 of the fields at random. This will leave empty space in the # dtype (since we do not canonicalize it here). field_subset = random.sample(dtype.names, k=4) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index c2d54dd15caf..ca34756e5882 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -1003,6 +1003,7 @@ def test_too_big_error(self): @pytest.mark.skipif(not IS_64BIT, reason="malloc may not fail on 32 bit systems") + @pytest.mark.thread_unsafe(reason="large slow test in parallel") def test_malloc_fails(self): # This test is guaranteed to fail due to a too large allocation with assert_raises(np._core._exceptions._ArrayMemoryError): diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index bbde90c08bbb..bfbc9a54cbfe 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -871,9 +871,7 @@ def recursionlimit(n): @given(sampled_from(objecty_things), sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) -@pytest.mark.thread_unsafe( - reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" -) +@pytest.mark.thread_unsafe(reason="sets recursion limit globally") def test_operator_object_left(o, op, type_): try: with recursionlimit(200): @@ -885,9 +883,7 @@ def test_operator_object_left(o, op, type_): @given(sampled_from(objecty_things), sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) -@pytest.mark.thread_unsafe( - reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" -) +@pytest.mark.thread_unsafe(reason="sets recursion limit globally") def test_operator_object_right(o, op, type_): try: with recursionlimit(200): diff --git a/numpy/conftest.py b/numpy/conftest.py index c158a2251914..c3c96ef3bc39 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -117,7 +117,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config): pytest.exit("GIL re-enabled during tests", returncode=1) # FIXME when yield tests are gone. -@pytest.hookimpl() +@pytest.hookimpl(tryfirst=True) def pytest_itemcollected(item): """ Check FPU precision mode was not changed during test collection. @@ -136,6 +136,11 @@ def pytest_itemcollected(item): _collect_results[item] = (_old_fpu_mode, mode) _old_fpu_mode = mode + # mark f2py tests as thread unsafe + if Path(item.fspath).parent == Path(__file__).parent / 'f2py' / 'tests': + item.add_marker(pytest.mark.thread_unsafe( + reason="f2py tests are thread-unsafe")) + @pytest.fixture(scope="function", autouse=True) def check_fpu_mode(request): @@ -241,9 +246,3 @@ def warnings_errors_and_rng(test=None): 'numpy/random/_examples', 'numpy/f2py/_backends/_distutils.py', ] - -def pytest_collection_modifyitems(config, items): - for item in items: - if Path(item.fspath).parent == Path(__file__).parent / 'f2py' / 'tests': - item.add_marker(pytest.mark.thread_unsafe( - reason="f2py tests are thread-unsafe")) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 57c8911be3fb..5c5f1b0315d0 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -3938,10 +3938,6 @@ def test_quantile_monotonic(self, method): quantile = np.quantile([0., 1., 2., 3.], p0, method=method) assert_equal(np.sort(quantile), quantile) - @pytest.mark.thread_unsafe( - reason="gives unreliable results w/ hypothesis " - "(HypothesisWorks/hypothesis#4562)" - ) @hypothesis.given( arr=arrays(dtype=np.float64, shape=st.integers(min_value=3, max_value=1000), diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index e02e955cfa40..053e7130da63 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -165,6 +165,7 @@ def test_matrix_rank_rtol_argument(self, rtol): res = np.linalg.matrix_rank(x, rtol=rtol) assert res.shape == (4,) + @pytest.mark.thread_unsafe(reason="test is already testing threads with openblas") def test_openblas_threading(self): # gh-27036 # Test whether matrix multiplication involving a large matrix always From 0f8499daee14db8fc7e243bb0618abbfcd5bead4 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 22 Oct 2025 01:03:36 +0200 Subject: [PATCH 0709/1018] DEV: Set correct ``PYTHONPATH`` in ``spin stubtest`` (#30039) --- .spin/cmds.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 5c0c21198d78..be97fe228ea8 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -544,13 +544,19 @@ def mypy(ctx): default=False, help="Concise output format", ) -def stubtest(*, concise: bool) -> None: +@meson.build_dir_option +def stubtest(*, concise: bool, build_dir: str) -> None: """🧐 Run stubtest on NumPy's .pyi stubs Requires mypy to be installed """ - ctx = click.get_current_context() - ctx.invoke(build) + click.get_current_context().invoke(build) + meson._set_pythonpath(build_dir) + print(f"{build_dir = !r}") + + import sysconfig + purellib = sysconfig.get_paths()["purelib"] + print(f"{purellib = !r}") stubtest_dir = curdir.parent / 'tools' / 'stubtest' mypy_config = stubtest_dir / 'mypy.ini' @@ -565,6 +571,7 @@ def stubtest(*, concise: bool) -> None: if concise: cmd.append('--concise') cmd.append('numpy') + spin.util.run(cmd) From 39e3c5617007deb23f510f3631ae61a414b2d3f1 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 22 Oct 2025 01:06:18 +0200 Subject: [PATCH 0710/1018] DEV: separate stubtest allowlist for py312+ (#30045) --- .spin/cmds.py | 6 ++++-- tools/stubtest/allowlist.txt | 5 ----- tools/stubtest/allowlist_py312.txt | 8 ++++++++ 3 files changed, 12 insertions(+), 7 deletions(-) create mode 100644 tools/stubtest/allowlist_py312.txt diff --git a/.spin/cmds.py b/.spin/cmds.py index be97fe228ea8..925991c5c804 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -560,13 +560,15 @@ def stubtest(*, concise: bool, build_dir: str) -> None: stubtest_dir = curdir.parent / 'tools' / 'stubtest' mypy_config = stubtest_dir / 'mypy.ini' - allowlist = stubtest_dir / 'allowlist.txt' + allowlists = [stubtest_dir / 'allowlist.txt'] + if sys.version_info >= (3, 12): + allowlists.append(stubtest_dir / 'allowlist_py312.txt') cmd = [ 'stubtest', '--ignore-disjoint-bases', f'--mypy-config-file={mypy_config}', - f'--allowlist={allowlist}', + *(f'--allowlist={allowlist}' for allowlist in allowlists), ] if concise: cmd.append('--concise') diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index c5d7fe2418d3..6b2c3a29bbc6 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -31,13 +31,8 @@ numpy\.f2py\.__main__ # inexpressible: the `dtype.type` class-attribute is `None` unless instantiated numpy(\..+)?\.dtype\.type -# mypy false positive "... is not a Union" errors (py314 only?) -numpy\.typing\.ArrayLike -numpy\.typing\.DTypeLike - # distutils numpy\.distutils.* -numpy\.f2py\._backends\._distutils # import errors numpy\._build_utils.* diff --git a/tools/stubtest/allowlist_py312.txt b/tools/stubtest/allowlist_py312.txt new file mode 100644 index 000000000000..867b2f1870a3 --- /dev/null +++ b/tools/stubtest/allowlist_py312.txt @@ -0,0 +1,8 @@ +# python >= 3.12 + +# false positive "... is not a Union" errors +numpy\.typing\.ArrayLike +numpy\.typing\.DTypeLike + +# only exists before Python 3.12 +numpy\.f2py\._backends\._distutils From a824e79bf505edcaecd0082b7f96568450ab20f4 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 22 Oct 2025 17:32:24 +0200 Subject: [PATCH 0711/1018] CI: Python 3.14 stable --- .github/workflows/linux.yml | 2 +- .github/workflows/windows.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 42a9601f33e1..45a9f65bd9f0 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -58,7 +58,7 @@ jobs: MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" strategy: matrix: - version: ["3.11", "3.12", "3.13", "3.14-dev", "3.14t-dev"] + version: ["3.11", "3.12", "3.13", "3.14", "3.14t"] steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index b86543189941..886a70e1be2b 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -30,7 +30,7 @@ jobs: - name: Setup Python uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: - python-version: "3.14t-dev" + python-version: "3.14t" - name: Install build dependencies from PyPI run: | @@ -97,7 +97,7 @@ jobs: - name: Build and install run: | python -m pip install . -v -Ccompile-args="-j2" -Csetup-args="-Dallow-noblas=true" 2>&1 | tee build.log - + - name: Check warnings uses: ./.github/check-warnings with: From b4c575a63af23cec0168ca392331fcc5f42054c5 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 22 Oct 2025 20:28:07 +0200 Subject: [PATCH 0712/1018] ENH: Add ``stable`` kwarg to ``chararray.argsort`` (#30000) * ENH: Add ``stable`` kwarg to ``chararray.argsort`` * TST: regression test for `chararray.argsort(stable=)` Co-authored-by: Tyler Reddy --------- Co-authored-by: Tyler Reddy --- numpy/_core/defchararray.py | 4 ++-- numpy/_core/tests/test_defchararray.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 098f4ab9ce9b..1a8750507f41 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -718,7 +718,7 @@ def __mod__(self, i): def __rmod__(self, other): return NotImplemented - def argsort(self, axis=-1, kind=None, order=None): + def argsort(self, axis=-1, kind=None, order=None, *, stable=None): """ Return the indices that sort the array lexicographically. @@ -736,7 +736,7 @@ def argsort(self, axis=-1, kind=None, order=None): dtype='|S5') """ - return self.__array__().argsort(axis, kind, order) + return self.__array__().argsort(axis, kind, order, stable=stable) argsort.__doc__ = ndarray.argsort.__doc__ def capitalize(self): diff --git a/numpy/_core/tests/test_defchararray.py b/numpy/_core/tests/test_defchararray.py index 04518bdbd671..e98632b62829 100644 --- a/numpy/_core/tests/test_defchararray.py +++ b/numpy/_core/tests/test_defchararray.py @@ -694,6 +694,11 @@ def B(self): return np.array([['efg', '456'], ['051', 'tuv']]).view(np.char.chararray) + def test_argsort(self): + arr = np.array(['abc'] * 4).view(np.char.chararray) + actual = arr.argsort(stable=True) + assert_array_equal(actual, [0, 1, 2, 3]) + def test_add(self): A, B = self.A(), self.B() AB = np.array([['abcefg', '123456'], From 4cc4a5808842f9538e11cafc0b2676cecf235ec1 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 22 Oct 2025 20:30:19 +0200 Subject: [PATCH 0713/1018] MAINT: Remove ``NDArrayOperatorsMixin.um`` class attribute ``umath`` reexport (#30007) --- numpy/lib/mixins.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py index 831bb34cfb55..9a531c6b9022 100644 --- a/numpy/lib/mixins.py +++ b/numpy/lib/mixins.py @@ -1,6 +1,7 @@ """ Mixin classes for custom array types that don't inherit from ndarray. """ +from numpy._core import umath as um __all__ = ['NDArrayOperatorsMixin'] @@ -135,7 +136,6 @@ class that simply wraps a NumPy array and ensures that the result of any ArrayLike preserve a well-defined casting hierarchy. """ - from numpy._core import umath as um __slots__ = () # Like np.ndarray, this mixin class implements "Option 1" from the ufunc From 76e91189b23d4e0afc34130e95f4f460a3d57d95 Mon Sep 17 00:00:00 2001 From: Jake Vanderplas Date: Wed, 22 Oct 2025 11:46:57 -0700 Subject: [PATCH 0714/1018] BUG: support axis sequence in `np.trim_zeros` (#29947) --- .../upcoming_changes/29947.improvement.rst | 7 +++ numpy/lib/_function_base_impl.py | 27 +++++----- numpy/lib/tests/test_function_base.py | 53 +++++++++++++++++++ 3 files changed, 74 insertions(+), 13 deletions(-) create mode 100644 doc/release/upcoming_changes/29947.improvement.rst diff --git a/doc/release/upcoming_changes/29947.improvement.rst b/doc/release/upcoming_changes/29947.improvement.rst new file mode 100644 index 000000000000..99c67e598347 --- /dev/null +++ b/doc/release/upcoming_changes/29947.improvement.rst @@ -0,0 +1,7 @@ +Multiple axes are now supported in ``numpy.trim_zeros`` +------------------------------------------------------- +The ``axis`` argument of `numpy.trim_zeros` now accepts a sequence; for example +``np.trim_zeros(x, axis=(0, 1))`` will trim the zeros from a multi-dimensional +array ``x`` along axes 0 and 1. This fixes issue +`gh‑29945 `__ and was implemented +in pull request `gh‑29947 `__. diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 54e6645f6182..9d5553cc7a34 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1986,6 +1986,14 @@ def trim_zeros(filt, trim='fb', axis=None): trim = trim.lower() if trim not in {"fb", "bf", "f", "b"}: raise ValueError(f"unexpected character(s) in `trim`: {trim!r}") + if axis is None: + axis_tuple = tuple(range(filt_.ndim)) + else: + axis_tuple = _nx.normalize_axis_tuple(axis, filt_.ndim, argname="axis") + + if not axis_tuple: + # No trimming requested -> return input unmodified. + return filt start, stop = _arg_trim_zeros(filt_) stop += 1 # Adjust for slicing @@ -2000,20 +2008,13 @@ def trim_zeros(filt, trim='fb', axis=None): if 'b' not in trim: stop = (None,) * filt_.ndim - if len(start) == 1: - # filt is 1D -> don't use multi-dimensional slicing to preserve + sl = tuple(slice(start[ax], stop[ax]) if ax in axis_tuple else slice(None) + for ax in range(filt_.ndim)) + if len(sl) == 1: + # filt is 1D -> avoid multi-dimensional slicing to preserve # non-array input types - sl = slice(start[0], stop[0]) - elif axis is None: - # trim all axes - sl = tuple(slice(*x) for x in zip(start, stop)) - else: - # only trim single axis - axis = normalize_axis_index(axis, filt_.ndim) - sl = (slice(None),) * axis + (slice(start[axis], stop[axis]),) + (...,) - - trimmed = filt[sl] - return trimmed + return filt[sl[0]] + return filt[sl] def _extract_dispatcher(condition, arr): diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 5c5f1b0315d0..1d4026d8562f 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1380,6 +1380,36 @@ class TestTrimZeros: c = a.astype(complex) d = a.astype(object) + def construct_input_output(self, rng, shape, axis, trim): + """Construct an input/output test pair for trim_zeros""" + # Standardize axis to a tuple. + if axis is None: + axis = tuple(range(len(shape))) + elif isinstance(axis, int): + axis = (len(shape) + axis if axis < 0 else axis,) + else: + axis = tuple(len(shape) + ax if ax < 0 else ax for ax in axis) + + # Populate a random interior slice with nonzero entries. + data = np.zeros(shape) + i_start = rng.integers(low=0, high=np.array(shape) - 1) + i_end = rng.integers(low=i_start + 1, high=shape) + inner_shape = tuple(i_end - i_start) + inner_data = 1 + rng.random(inner_shape) + data[tuple(slice(i, j) for i, j in zip(i_start, i_end))] = inner_data + + # Construct the expected output of N-dimensional trim_zeros + # with the given axis and trim arguments. + if 'f' not in trim: + i_start = np.array([None for _ in shape]) + if 'b' not in trim: + i_end = np.array([None for _ in shape]) + idx = tuple(slice(i, j) if ax in axis else slice(None) + for ax, (i, j) in enumerate(zip(i_start, i_end))) + expected = data[idx] + + return data, expected + def values(self): attr_names = ('a', 'b', 'c', 'd') return (getattr(self, name) for name in attr_names) @@ -1465,6 +1495,29 @@ def test_unexpected_trim_value(self, trim): with pytest.raises(ValueError, match=r"unexpected character\(s\) in `trim`"): trim_zeros(arr, trim=trim) + @pytest.mark.parametrize("shape, axis", [ + [(5,), None], + [(5,), ()], + [(5,), 0], + [(5, 6), None], + [(5, 6), ()], + [(5, 6), 0], + [(5, 6), (-1,)], + [(5, 6, 7), None], + [(5, 6, 7), ()], + [(5, 6, 7), 1], + [(5, 6, 7), (0, 2)], + [(5, 6, 7, 8), None], + [(5, 6, 7, 8), ()], + [(5, 6, 7, 8), -2], + [(5, 6, 7, 8), (0, 1, 3)], + ]) + @pytest.mark.parametrize("trim", ['fb', 'f', 'b']) + def test_multiple_axes(self, shape, axis, trim): + rng = np.random.default_rng(4321) + data, expected = self.construct_input_output(rng, shape, axis, trim) + assert_array_equal(trim_zeros(data, axis=axis, trim=trim), expected) + class TestExtins: From 5a510b26958ee258009684b4d64a426e9d356b75 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 02:41:44 +0200 Subject: [PATCH 0715/1018] TYP: fix mypy error in `_array_api_info` --- numpy/_array_api_info.pyi | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index 069ef478de92..396125143e92 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -1,5 +1,4 @@ from typing import ( - ClassVar, Literal, Never, TypeAlias, @@ -118,7 +117,7 @@ _EmptyDict: TypeAlias = dict[Never, Never] @final class __array_namespace_info__: - __module__: ClassVar[Literal["numpy"]] + __module__: Literal["numpy"] = "numpy" def capabilities(self) -> _Capabilities: ... def default_device(self) -> _Device: ... From 6fed6c675161a3e5bf29eab0a2f4286b11180e25 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 02:51:38 +0200 Subject: [PATCH 0716/1018] TYP: ignore false-positive mypy errors in `__init__.pyi` --- numpy/__init__.pyi | 82 ++++++++++++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 31 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 30b298e1d607..8d754d917ad5 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5002,6 +5002,10 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __getformat__(cls, typestr: L["double", "float"], /) -> str: ... # undocumented # float64-specific operator overrides + # NOTE: Mypy reports [misc] errors about "unsafely overlapping signatures" for the + # reflected methods. But since they are identical to the non-reflected versions, + # these errors appear to be false positives. + @overload def __add__(self, other: _Float64_co, /) -> float64: ... @overload @@ -5010,10 +5014,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __add__(self, other: complex, /) -> float64 | complex128: ... + @overload - def __radd__(self, other: _Float64_co, /) -> float64: ... + def __radd__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload def __radd__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload @@ -5027,10 +5032,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __sub__(self, other: complex, /) -> float64 | complex128: ... + @overload - def __rsub__(self, other: _Float64_co, /) -> float64: ... + def __rsub__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload def __rsub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload @@ -5044,10 +5050,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __mul__(self, other: complex, /) -> float64 | complex128: ... + @overload - def __rmul__(self, other: _Float64_co, /) -> float64: ... + def __rmul__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload def __rmul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload @@ -5061,10 +5068,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __truediv__(self, other: complex, /) -> float64 | complex128: ... + @overload - def __rtruediv__(self, other: _Float64_co, /) -> float64: ... + def __rtruediv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload def __rtruediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload @@ -5078,8 +5086,9 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __floordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __floordiv__(self, other: complex, /) -> float64 | complex128: ... + @overload - def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... + def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload @@ -5097,10 +5106,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + @overload - def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... + def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... # type: ignore[misc] @overload - def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... + def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] @overload def __rpow__( self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / @@ -5345,21 +5355,22 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __mod__(self, x: timedelta64, /) -> timedelta64: ... - # the L[0] makes __mod__ non-commutative, which the first two overloads reflect + # NOTE: The L[0] makes __mod__ non-commutative, which the first two overloads + # reflect. However, mypy does not seem to like this, so we ignore the errors. @overload - def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[misc] @overload def __rmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> timedelta64[None]: ... @overload - def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... # type: ignore[misc] @overload - def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... # type: ignore[misc] @overload def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... @overload - def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... # type: ignore[misc] @overload - def __rmod__(self, x: timedelta64, /) -> timedelta64: ... + def __rmod__(self, x: timedelta64, /) -> timedelta64: ... # type: ignore[misc] # keep in sync with __mod__ @overload @@ -5369,7 +5380,9 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + def __divmod__( + self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / + ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... @overload def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... @overload @@ -5379,19 +5392,21 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] # keep in sync with __rmod__ @overload - def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... + def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + def __rdivmod__( # type: ignore[misc] + self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / + ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... @overload def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... @overload - def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... # type: ignore[misc] @overload def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... @@ -5406,18 +5421,20 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... + # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. + # This confuses mypy, so we ignore the [misc] errors it reports. @overload def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... @overload def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... + def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] @overload - def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] @overload def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... @overload - def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... + def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... # type: ignore[misc] @overload def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... @@ -5429,6 +5446,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __truediv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... @overload def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... + @overload def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... @overload @@ -5442,6 +5460,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... @overload def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... @overload @@ -5557,6 +5576,7 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __sub__(self, x: datetime64, /) -> timedelta64: ... + # NOTE: mypy gets confused by the non-commutativity of subtraction here @overload def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... @overload @@ -5566,13 +5586,13 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __rsub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... @overload - def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... + def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... # type: ignore[misc] @overload - def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... + def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... # type: ignore[misc] @overload - def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... + def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... # type: ignore[misc] @overload - def __rsub__(self, x: datetime64, /) -> timedelta64: ... + def __rsub__(self, x: datetime64, /) -> timedelta64: ... # type: ignore[misc] @overload def __lt__(self, other: datetime64, /) -> bool_: ... From 3e3edd6898266c8c07de436b78b8d6ec1105fa08 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 02:55:49 +0200 Subject: [PATCH 0717/1018] TYP: fix mypy `attr-defined` import error in `_core.defchararray` --- numpy/_core/defchararray.pyi | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 53ff96f450f5..62a1c59b3ae5 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -7,19 +7,10 @@ from typing import ( TypeAlias, overload, ) -from typing_extensions import TypeVar +from typing_extensions import Buffer, TypeVar import numpy as np -from numpy import ( - _OrderKACF, - _SupportsBuffer, - bytes_, - dtype, - int_, - ndarray, - object_, - str_, -) +from numpy import _OrderKACF, bytes_, dtype, int_, ndarray, object_, str_ from numpy._core.multiarray import compare_chararrays from numpy._typing import ( NDArray, @@ -108,7 +99,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt = 1, unicode: L[False] = False, - buffer: _SupportsBuffer | None = None, + buffer: Buffer | np.ndarray | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, order: _OrderKACF = "C", @@ -119,7 +110,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt, unicode: L[True], - buffer: _SupportsBuffer | None = None, + buffer: Buffer | np.ndarray | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, order: _OrderKACF = "C", @@ -131,7 +122,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): itemsize: SupportsIndex | SupportsInt = 1, *, unicode: L[True], - buffer: _SupportsBuffer | None = None, + buffer: Buffer | np.ndarray | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, order: _OrderKACF = "C", From 06fb0f5e7a0148a33b034d07d9bbbbb26fd9495b Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 02:59:25 +0200 Subject: [PATCH 0718/1018] TYP: fix mypy `misc` error in `ctypeslib._ctypeslib._concrete_ndptr` --- numpy/ctypeslib/_ctypeslib.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index 4ab670037e2b..abcd5a4264e8 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -73,7 +73,7 @@ class _ndptr(ctypes.c_void_p, Generic[_DTypeOptionalT]): # In practice these 4 classvars are defined in the dynamic class # returned by `ndpointer` _dtype_: ClassVar[_DTypeOptionalT] - _shape_: ClassVar[None] + _shape_: ClassVar[_AnyShape | None] _ndim_: ClassVar[int | None] _flags_: ClassVar[list[_FlagsKind] | None] From 9267d29c75bcd25b2811ff99dcc095a23c43c418 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 03:03:07 +0200 Subject: [PATCH 0719/1018] TYP: remove duplicate `_Ignored` type alias in `ma/core.pyi` --- numpy/ma/core.pyi | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index aeb32bed01f9..f8e735e9294d 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -313,8 +313,6 @@ _ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsInd _ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None _ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None -_Ignored: TypeAlias = object - ### MaskType = bool_ From 964cca5dacbc03c042301c38569834af6c12befb Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 03:09:06 +0200 Subject: [PATCH 0720/1018] TYP: ignore false-positive mypy errors in `lib/_index_tricks_impl.pyi` --- numpy/lib/_index_tricks_impl.pyi | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index cfd7ea0919eb..8f12e4a36d99 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -133,14 +133,16 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] ndmin: _NDMinT_co trans1d: _Trans1DT_co - # + # NOTE: mypy does not understand that these default values are the same as the + # TypeVar defaults. Since the workaround would require us to write 16 overloads, + # we ignore the assignment type errors here. def __init__( self, /, - axis: _AxisT_co = 0, - matrix: _MatrixT_co = False, - ndmin: _NDMinT_co = 1, - trans1d: _Trans1DT_co = -1, + axis: _AxisT_co = 0, # type: ignore[assignment] + matrix: _MatrixT_co = False, # type: ignore[assignment] + ndmin: _NDMinT_co = 1, # type: ignore[assignment] + trans1d: _Trans1DT_co = -1, # type: ignore[assignment] ) -> None: ... # TODO(jorenham): annotate this From e3b61d4177c15579f47b439ad3013a8ba619ce24 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 03:31:20 +0200 Subject: [PATCH 0721/1018] TYP: Type-checking --- tools/stubtest/mypy.ini | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini index 307590b0f922..1144b84434f1 100644 --- a/tools/stubtest/mypy.ini +++ b/tools/stubtest/mypy.ini @@ -1,6 +1,23 @@ -; Stubtest requires mypy to pass before running, which would currently fail -; on numerous stubs. To allow running stubtest independently, we ignore mypy -; errors here. - [mypy] -ignore_errors = True +files = numpy/**/*.pyi +exclude = (?x)( + ^numpy/( + .+\.py$ + | _build_utils/ + | _core/code_generators/ + | distutils/ + ) + ) +namespace_packages = False + +enable_error_code = ignore-without-code, redundant-expr, truthy-bool +warn_unreachable = False +strict_bytes = True +allow_redefinition_new = True +local_partial_types = True + +; Stubtest requires mypy to pass before running, which would currently fail +; on numerous stubs. To allow running stubtest independently, we temporarily +; ignore these errors here. The goal is to eventually fix these mypy errors +; and remove the ignores. +disable_error_code = overload-cannot-match, overload-overlap, override From bce7e51d6f885d2b73a86935eb1b5f38128812c9 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 03:31:57 +0200 Subject: [PATCH 0722/1018] MAINT: remove unused stubtest allowlist entries --- tools/stubtest/allowlist.txt | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index 6b2c3a29bbc6..77ff582c9f66 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -1,7 +1,6 @@ # intentional type-check-only deviations from runtime numpy\._typing.* numpy\.polynomial\._polytypes -numpy\.typing\.mypy_plugin\..* # internal testing code numpy\.conftest.* @@ -31,19 +30,8 @@ numpy\.f2py\.__main__ # inexpressible: the `dtype.type` class-attribute is `None` unless instantiated numpy(\..+)?\.dtype\.type -# distutils -numpy\.distutils.* - # import errors -numpy\._build_utils.* -numpy\._core\.code_generators.* -numpy\._core\.include\.numpy\.libdivide -numpy\._core\.src.* -numpy\._pyinstaller.* -numpy\.fft\.pocketfft -numpy\.random\._examples\..* -numpy\.random\.include -numpy\.random\.src.* +numpy\._pyinstaller\..* # known minor deviations from runtime numpy\.(\w+\.)*integer\.__index__ @@ -69,6 +57,7 @@ numpy\.core\.overrides numpy\.core\.records.* numpy\.core\.shape_base.* numpy\.core\.umath.* +numpy\.typing\.mypy_plugin # ufuncs numpy\.(\w+\.)*abs From e9eaffa8743cb52146f5e7f31debf17a42db2bc7 Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Thu, 23 Oct 2025 19:12:42 +0900 Subject: [PATCH 0723/1018] BUG: allow division between object-dtype arrays and timedelta objects (#30054) Fixes a bug where dividing an object-dtype array containing Python timedelta objects by a NumPy timedelta64 scalar would raise a `_UFuncBinaryResolutionError`. The operation now falls back to the generic object ufunc loop, enabling elementwise Python evaluation consistent with other object operations. See gh-30025. * MAINT: refactor handling and tests for division between object-dtype arrays and timedelta objects --- numpy/_core/src/umath/ufunc_type_resolution.c | 7 +-- numpy/_core/tests/test_datetime.py | 48 +++++++++++++++++++ 2 files changed, 52 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index 8d617f3ddc6c..f5a203719b54 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -1252,9 +1252,10 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc, type_num2 = PyArray_DESCR(operands[1])->type_num; /* Use the default when datetime and timedelta are not involved */ - if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) { - return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, - type_tup, out_dtypes); + if ((!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) || + (PyTypeNum_ISOBJECT(type_num1) || PyTypeNum_ISOBJECT(type_num2))) { + return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, type_tup, + out_dtypes); } if (type_num1 == NPY_TIMEDELTA) { diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 18b32ea42da2..c7b11149ed43 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -2700,6 +2700,54 @@ def test_timedelta_hash_big_positive(self, wk, unit): td2 = np.timedelta64(td, unit) _assert_equal_hash(td, td2) + @pytest.mark.parametrize( + "inputs, divisor, expected", + [ + ( + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.int64(2), + np.array( + [datetime.timedelta(seconds=10), datetime.timedelta(days=1)], + dtype="object", + ), + ), + ( + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.timedelta64(2, "s"), + np.array( + [10.0, 24.0 * 60.0 * 60.0], + dtype="object", + ), + ), + ( + datetime.timedelta(seconds=2), + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.array( + [1.0 / 10.0, 1.0 / (24.0 * 60.0 * 60.0)], + dtype="object", + ), + ), + ], + ) + def test_true_divide_object_by_timedelta( + self, + inputs: np.ndarray | type[np.generic], + divisor: np.ndarray | type[np.generic], + expected: np.ndarray, + ): + # gh-30025 + results = inputs / divisor + assert_array_equal(results, expected) + class TestDateTimeData: From ab22f74e376722229c37b605520c3bac7a8d0f82 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:32:32 +0200 Subject: [PATCH 0724/1018] TYP: run mypy in strict mode --- tools/stubtest/mypy.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini index 1144b84434f1..422372300b40 100644 --- a/tools/stubtest/mypy.ini +++ b/tools/stubtest/mypy.ini @@ -12,7 +12,7 @@ namespace_packages = False enable_error_code = ignore-without-code, redundant-expr, truthy-bool warn_unreachable = False -strict_bytes = True +strict = True allow_redefinition_new = True local_partial_types = True From 93caf6f110c10e97c5940e157bc12dceab064056 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:55:02 +0200 Subject: [PATCH 0725/1018] TYP: Fix `override` mypy errors in `random.*` --- numpy/random/_mt19937.pyi | 2 +- numpy/random/_pcg64.pyi | 13 ++++--------- numpy/random/_philox.pyi | 11 +++-------- numpy/random/_sfc64.pyi | 11 +++-------- 4 files changed, 11 insertions(+), 26 deletions(-) diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi index c8ea2dccda02..03373a6dd6ea 100644 --- a/numpy/random/_mt19937.pyi +++ b/numpy/random/_mt19937.pyi @@ -21,7 +21,7 @@ class MT19937(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... def jumped(self, jumps: int = 1) -> MT19937: ... - @property + @property # type: ignore[override] def state(self) -> _MT19937State: ... @state.setter def state(self, value: _MT19937State) -> None: ... diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index 6055c5d2921d..a9e81f7f181b 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -20,7 +20,7 @@ class _PCG64State(TypedDict): class PCG64(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def jumped(self, jumps: int = 1) -> PCG64: ... - @property + @property # type: ignore[override] def state( self, ) -> _PCG64State: ... @@ -34,13 +34,8 @@ class PCG64(BitGenerator): class PCG64DXSM(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def jumped(self, jumps: int = 1) -> PCG64DXSM: ... - @property - def state( - self, - ) -> _PCG64State: ... + @property # type: ignore[override] + def state(self) -> _PCG64State: ... @state.setter - def state( - self, - value: _PCG64State, - ) -> None: ... + def state(self, value: _PCG64State) -> None: ... def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi index ab1aee31eb02..3089f11ea629 100644 --- a/numpy/random/_philox.pyi +++ b/numpy/random/_philox.pyi @@ -28,14 +28,9 @@ class Philox(BitGenerator): counter: _ArrayLikeInt_co | None = ..., key: _ArrayLikeInt_co | None = ..., ) -> None: ... - @property - def state( - self, - ) -> _PhiloxState: ... + @property # type: ignore[override] + def state(self) -> _PhiloxState: ... @state.setter - def state( - self, - value: _PhiloxState, - ) -> None: ... + def state(self, value: _PhiloxState) -> None: ... def jumped(self, jumps: int = 1) -> Philox: ... def advance(self, delta: int) -> Philox: ... diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi index 1e563fdebfde..f5f3fed9c251 100644 --- a/numpy/random/_sfc64.pyi +++ b/numpy/random/_sfc64.pyi @@ -19,12 +19,7 @@ class _SFC64State(TypedDict): class SFC64(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... - @property - def state( - self, - ) -> _SFC64State: ... + @property # type: ignore[override] + def state(self) -> _SFC64State: ... @state.setter - def state( - self, - value: _SFC64State, - ) -> None: ... + def state(self, value: _SFC64State) -> None: ... From 7b46bc98e7a98cfe4ecd2211e89e4bfb503fc2d4 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:55:12 +0200 Subject: [PATCH 0726/1018] TYP: Fix `override` mypy errors in `chararray` --- numpy/_core/defchararray.pyi | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 62a1c59b3ae5..406d11ea0eb7 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -129,11 +129,11 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): ) -> _CharArray[str_]: ... def __array_finalize__(self, obj: object) -> None: ... - def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... - def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... - def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... + def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __eq__( self: _CharArray[str_], other: U_co, @@ -144,7 +144,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __ne__( self: _CharArray[str_], other: U_co, @@ -155,7 +155,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __ge__( self: _CharArray[str_], other: U_co, @@ -166,7 +166,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __le__( self: _CharArray[str_], other: U_co, @@ -177,7 +177,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __gt__( self: _CharArray[str_], other: U_co, @@ -188,7 +188,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __lt__( self: _CharArray[str_], other: U_co, @@ -199,7 +199,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __add__( self: _CharArray[str_], other: U_co, @@ -210,7 +210,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> _CharArray[bytes_]: ... - @overload + @overload # type: ignore[override] def __radd__( self: _CharArray[str_], other: U_co, @@ -346,7 +346,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): chars: S_co | None = None, ) -> _CharArray[bytes_]: ... - @overload + @overload # type: ignore[override] def partition( self: _CharArray[str_], sep: U_co, From 7d0dd1d28e8a4f3bc3184b1d427a7e7ea1fa6832 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:55:24 +0200 Subject: [PATCH 0727/1018] TYP: Fix `override` mypy errors in `ctypeslib` --- numpy/ctypeslib/_ctypeslib.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index abcd5a4264e8..2c87833b5501 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -77,7 +77,7 @@ class _ndptr(ctypes.c_void_p, Generic[_DTypeOptionalT]): _ndim_: ClassVar[int | None] _flags_: ClassVar[list[_FlagsKind] | None] - @overload + @overload # type: ignore[override] @classmethod def from_param(cls: type[_ndptr[None]], obj: NDArray[Any]) -> _ctypes[Any]: ... @overload From 97836e4762868010eb1d8bf02c8d9d5bd8b67ce8 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:55:35 +0200 Subject: [PATCH 0728/1018] TYP: Fix `override` mypy errors in `ma.*` --- numpy/ma/core.pyi | 68 +++++++++++++++++++++++------------------------ 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index f8e735e9294d..e613c26ee829 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -606,7 +606,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): return_scalar: bool = False, ) -> MaskedArray[_ShapeT, _DTypeT]: ... - @overload # () + @overload # type: ignore[override] # () def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... @overload # (dtype: DTypeT) def view( @@ -662,7 +662,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @property def shape(self) -> _ShapeT_co: ... - @shape.setter + @shape.setter # type: ignore[override] def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @@ -707,7 +707,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... - @overload + @overload # type: ignore[override] def compress( self, condition: _ArrayLikeBool_co, @@ -748,7 +748,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __lt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] # Keep in sync with `ndarray.__add__` - @overload + @overload # type: ignore[override] def __add__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -796,7 +796,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __add__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__radd__` - @overload # signature equivalent to __add__ + @overload # type: ignore[override] # signature equivalent to __add__ def __radd__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -844,7 +844,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __radd__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__sub__` - @overload + @overload # type: ignore[override] def __sub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -882,7 +882,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __sub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__rsub__` - @overload + @overload # type: ignore[override] def __rsub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -920,7 +920,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rsub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__mul__` - @overload + @overload # type: ignore[override] def __mul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -962,7 +962,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __mul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__rmul__` - @overload # signature equivalent to __mul__ + @overload # type: ignore[override] # signature equivalent to __mul__ def __rmul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -1004,7 +1004,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rmul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__truediv__` - @overload + @overload # type: ignore[override] def __truediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload def __truediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @@ -1036,7 +1036,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __truediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__rtruediv__` - @overload + @overload # type: ignore[override] def __rtruediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload def __rtruediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @@ -1066,7 +1066,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rtruediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__floordiv__` - @overload + @overload # type: ignore[override] def __floordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] @@ -1096,7 +1096,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __floordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__rfloordiv__` - @overload + @overload # type: ignore[override] def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] @@ -1124,7 +1124,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__pow__` (minus the `mod` parameter) - @overload + @overload # type: ignore[override] def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -1156,7 +1156,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __pow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__rpow__` (minus the `mod` parameter) - @overload + @overload # type: ignore[override] def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -1189,10 +1189,10 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # @property # type: ignore[misc] - def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] get_imag: Any @property # type: ignore[misc] - def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] get_real: Any # keep in sync with `np.ma.count` @@ -1290,12 +1290,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): copy: bool | None = None, ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... - def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... + def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... # type: ignore[override] def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def ids(self) -> tuple[int, int]: ... def iscontiguous(self) -> bool: ... - @overload + @overload # type: ignore[override] def all( self, axis: None = None, @@ -1340,7 +1340,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... - @overload + @overload # type: ignore[override] def any( self, axis: None = None, @@ -1423,7 +1423,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = False) -> _ArrayT: ... - @overload + @overload # type: ignore[override] def sum( self, /, @@ -1460,7 +1460,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... - @overload + @overload # type: ignore[override] def prod( self, /, @@ -1499,7 +1499,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... - @overload + @overload # type: ignore[override] def mean( self, axis: _ShapeLike | None = None, @@ -1534,7 +1534,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... - @overload + @overload # type: ignore[override] def var( self, axis: _ShapeLike | None = None, @@ -1566,7 +1566,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> _ArrayT: ... - @overload + @overload # type: ignore[override] def std( self, axis: _ShapeLike | None = None, @@ -1606,7 +1606,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... - def argsort( + def argsort( # type: ignore[override] self, axis: SupportsIndex | _NoValueType = ..., kind: _SortKind | None = None, @@ -1851,7 +1851,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): ) -> _MaskedArray[intp]: ... # Keep in-sync with np.ma.take - @overload + @overload # type: ignore[override] def take( # type: ignore[overload-overlap] self: _MaskedArray[_ScalarT], indices: _IntLike_co, @@ -1943,7 +1943,7 @@ class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __iter__(self): ... def __len__(self): ... def filled(self, fill_value=None): ... - def tolist(self): ... + def tolist(self): ... # type: ignore[override] def isMaskedArray(x): ... isarray = isMaskedArray @@ -1955,19 +1955,19 @@ class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): # these overrides are no-ops @override - def __iadd__(self, other: _Ignored, /) -> Self: ... + def __iadd__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] @override - def __isub__(self, other: _Ignored, /) -> Self: ... + def __isub__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] @override - def __imul__(self, other: _Ignored, /) -> Self: ... + def __imul__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] @override def __ifloordiv__(self, other: _Ignored, /) -> Self: ... @override - def __itruediv__(self, other: _Ignored, /) -> Self: ... + def __itruediv__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] @override - def __ipow__(self, other: _Ignored, /) -> Self: ... + def __ipow__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] @override - def __deepcopy__(self, /, memo: _Ignored) -> Self: ... + def __deepcopy__(self, /, memo: _Ignored) -> Self: ... # type: ignore[override] @override def copy(self, /, *args: _Ignored, **kwargs: _Ignored) -> Self: ... From 5238c595468273ae1b8c12d3c96974da8c91a19d Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:55:44 +0200 Subject: [PATCH 0729/1018] TYP: Fix `override` mypy errors in `__init__.pyi` --- numpy/__init__.pyi | 73 +++++++++++++++++++++++++--------------------- 1 file changed, 39 insertions(+), 34 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8d754d917ad5..4c530888a95d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2222,7 +2222,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... - def setflags(self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ...) -> None: ... + def setflags(self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ...) -> None: ... # type: ignore[override] def squeeze( self, @@ -5006,7 +5006,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] # reflected methods. But since they are identical to the non-reflected versions, # these errors appear to be false positives. - @overload + @overload # type: ignore[override] def __add__(self, other: _Float64_co, /) -> float64: ... @overload def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -5015,7 +5015,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __add__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __radd__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @@ -5024,7 +5024,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __radd__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __sub__(self, other: _Float64_co, /) -> float64: ... @overload def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -5033,7 +5033,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __sub__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __rsub__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @@ -5042,7 +5042,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rsub__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __mul__(self, other: _Float64_co, /) -> float64: ... @overload def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -5051,7 +5051,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __mul__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __rmul__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @@ -5060,7 +5060,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rmul__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __truediv__(self, other: _Float64_co, /) -> float64: ... @overload def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -5069,7 +5069,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __truediv__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __rtruediv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @@ -5078,7 +5078,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __floordiv__(self, other: _Float64_co, /) -> float64: ... @overload def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -5096,7 +5096,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __pow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... @overload def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... @@ -5107,7 +5107,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... # type: ignore[misc] @overload def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] @@ -5153,59 +5153,63 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): def __complex__(self, /) -> complex: ... def __abs__(self, /) -> floating[_NBit1 | _NBit2]: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __add__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __radd__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __radd__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + @overload # type: ignore[override] def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __sub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __rsub__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __rsub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + @overload # type: ignore[override] def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __mul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __rmul__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __rmul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + @overload # type: ignore[override] def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __truediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __rtruediv__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + @overload # type: ignore[override] def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __pow__( @@ -5215,7 +5219,8 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): def __pow__( self, other: number[_NBit], mod: None = None, / ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @@ -5242,37 +5247,37 @@ class complex128(complexfloating[_64Bit, _64Bit], complex): def __getnewargs__(self, /) -> tuple[float, float]: ... # complex128-specific operator overrides - @overload + @overload # type: ignore[override] def __add__(self, other: _Complex128_co, /) -> complex128: ... @overload def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __radd__(self, other: _Complex128_co, /) -> complex128: ... + def __radd__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __sub__(self, other: _Complex128_co, /) -> complex128: ... @overload def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rsub__(self, other: _Complex128_co, /) -> complex128: ... + def __rsub__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __mul__(self, other: _Complex128_co, /) -> complex128: ... @overload def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rmul__(self, other: _Complex128_co, /) -> complex128: ... + def __rmul__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __truediv__(self, other: _Complex128_co, /) -> complex128: ... @overload def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... + def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... @overload def __pow__( self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... + def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... # type: ignore[override] csingle: TypeAlias = complex64 cdouble: TypeAlias = complex128 @@ -5668,7 +5673,7 @@ class ufunc: @property def __qualname__(self) -> LiteralString: ... @property - def __doc__(self) -> str: ... + def __doc__(self) -> str: ... # type: ignore[override] @property def nin(self) -> int: ... @property @@ -5995,7 +6000,7 @@ class memmap(ndarray[_ShapeT_co, _DTypeT_co]): def __array_finalize__(self, obj: object) -> None: ... def __array_wrap__( self, - array: memmap[_ShapeT_co, _DTypeT_co], + array: memmap[_ShapeT_co, _DTypeT_co], # type: ignore[override] context: tuple[ufunc, tuple[Any, ...], int] | None = None, return_scalar: builtins.bool = False, ) -> Any: ... From 096975101e8d132370b1669aaac831c7d9922571 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:56:00 +0200 Subject: [PATCH 0730/1018] TYP: Enable `override` mypy error code --- tools/stubtest/mypy.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini index 422372300b40..f44e10cb883f 100644 --- a/tools/stubtest/mypy.ini +++ b/tools/stubtest/mypy.ini @@ -20,4 +20,4 @@ local_partial_types = True ; on numerous stubs. To allow running stubtest independently, we temporarily ; ignore these errors here. The goal is to eventually fix these mypy errors ; and remove the ignores. -disable_error_code = overload-cannot-match, overload-overlap, override +disable_error_code = overload-cannot-match, overload-overlap From bddee9203fecf6e411be2cd4a7a0081ce92b8dad Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 14:21:40 +0200 Subject: [PATCH 0731/1018] TYP: Fix `overload-cannot-match` mypy errors in `ctypeslib` --- numpy/ctypeslib/_ctypeslib.pyi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index 2c87833b5501..8881141f8ed5 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -185,7 +185,7 @@ def as_ctypes(obj: intc) -> ctypes.c_int: ... @overload def as_ctypes(obj: long) -> ctypes.c_long: ... @overload -def as_ctypes(obj: longlong) -> ctypes.c_longlong: ... +def as_ctypes(obj: longlong) -> ctypes.c_longlong: ... # type: ignore[overload-cannot-match] @overload def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ... @overload @@ -195,7 +195,7 @@ def as_ctypes(obj: uintc) -> ctypes.c_uint: ... @overload def as_ctypes(obj: ulong) -> ctypes.c_ulong: ... @overload -def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ... +def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ... # type: ignore[overload-cannot-match] @overload def as_ctypes(obj: single) -> ctypes.c_float: ... @overload @@ -215,7 +215,7 @@ def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ... @overload def as_ctypes(obj: NDArray[long]) -> ctypes.Array[ctypes.c_long]: ... @overload -def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ... +def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ... # type: ignore[overload-cannot-match] @overload def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ... @overload @@ -225,7 +225,7 @@ def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ... @overload def as_ctypes(obj: NDArray[ulong]) -> ctypes.Array[ctypes.c_ulong]: ... @overload -def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ... +def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ... # type: ignore[overload-cannot-match] @overload def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ... @overload From 3c70d6ccf757e8f3bf7e3856a7f5a2d3820848c8 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 14:23:57 +0200 Subject: [PATCH 0732/1018] TYP: Fix `overload-cannot-match` mypy errors in `__init__.pyi` --- numpy/__init__.pyi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 4c530888a95d..86cae3b3e48f 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5334,7 +5334,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... @overload - def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] __radd__ = __add__ @overload @@ -5424,7 +5424,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... @overload - def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... + def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. # This confuses mypy, so we ignore the [misc] errors it reports. @@ -5437,7 +5437,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] @overload - def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... + def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] @overload def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... # type: ignore[misc] From 1fe0bf986d067d5e89bfa9175c2d2260374f6122 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 14:24:33 +0200 Subject: [PATCH 0733/1018] TYP: Fix `overload-cannot-match` mypy errors in `linalg.matmul` --- numpy/linalg/_linalg.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index cc22e0f9dc39..60320b021c71 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -539,10 +539,10 @@ def cross( @overload def matmul(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload -def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... -@overload def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload +def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... +@overload def matmul(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def matmul(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... From 1c74b87f0ee89bf03f07e62c69e99fda1a04c3d6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 14:24:45 +0200 Subject: [PATCH 0734/1018] TYP: Enable `overload-cannot-match` mypy error code --- tools/stubtest/mypy.ini | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini index f44e10cb883f..ded91d7d56f3 100644 --- a/tools/stubtest/mypy.ini +++ b/tools/stubtest/mypy.ini @@ -13,6 +13,7 @@ namespace_packages = False enable_error_code = ignore-without-code, redundant-expr, truthy-bool warn_unreachable = False strict = True +strict_bytes = True allow_redefinition_new = True local_partial_types = True @@ -20,4 +21,5 @@ local_partial_types = True ; on numerous stubs. To allow running stubtest independently, we temporarily ; ignore these errors here. The goal is to eventually fix these mypy errors ; and remove the ignores. -disable_error_code = overload-cannot-match, overload-overlap +; See also https://github.com/numpy/numpy/issues/27032 +disable_error_code = overload-overlap From 3aafa2f4c2b75c00b3a55e70933a19802c654594 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 17:40:17 +0200 Subject: [PATCH 0735/1018] TYP: Annotate ``ma.array()`` --- numpy/ma/core.pyi | 56 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 44 insertions(+), 12 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index aeb32bed01f9..b795aa48f9b0 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1978,19 +1978,51 @@ masked_singleton: Final[MaskedConstant] = ... masked_array: TypeAlias = MaskedArray +# keep in sync with `MaskedArray.__new__` +@overload def array( - data, - dtype=None, - copy=False, - order=None, - mask=..., - fill_value=None, - keep_mask=True, - hard_mask=False, - shrink=True, - subok=True, - ndmin=0, -): ... + data: _ArrayLike[_ScalarT], + dtype: None = None, + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[_ScalarT]: ... +@overload +def array( + data: object, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[_ScalarT]: ... +@overload +def array( + data: object, + dtype: DTypeLike | None = None, + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[_ScalarT]: ... + +# def is_masked(x: object) -> bool: ... class _extrema_operation(_MaskedUFunc): From 9c2ca3a3e78124314433a9944feedaadd701002c Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 17:57:51 +0200 Subject: [PATCH 0736/1018] TYP: Annotate ``ma.asarray()`` and ``ma.asanyarray()`` --- numpy/ma/core.pyi | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index b795aa48f9b0..7fef6e29bd24 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -285,6 +285,7 @@ _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=Tr _DTypeT = TypeVar("_DTypeT", bound=dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) _ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) +_MArrayT = TypeVar("_MArrayT", bound=MaskedArray[Any, Any]) _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) @@ -2022,6 +2023,24 @@ def array( ndmin: int = 0, ) -> _MaskedArray[_ScalarT]: ... +# keep in sync with `array` +@overload +def asarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... + +# keep in sync with `asarray` (but note the additional first overload) +@overload +def asanyarray(a: _MArrayT, dtype: None = None) -> _MArrayT: ... +@overload +def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asanyarray(a: object, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +@overload +def asanyarray(a: object, dtype: DTypeLike | None = None) -> _MaskedArray[_ScalarT]: ... + # def is_masked(x: object) -> bool: ... @@ -2365,8 +2384,6 @@ def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float = 1e-5, atol: float = 1e-8) -> bool: ... -def asarray(a, dtype=None, order=None): ... -def asanyarray(a, dtype=None): ... def fromflex(fxarray): ... class _convert2ma: From 144b3cc43eb1f1ec70d98ae307af4720195cfe39 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 23 Oct 2025 18:27:45 +0200 Subject: [PATCH 0737/1018] CI, TYP: stubtest (#30043) * CI: stubtest * CI: upgrade hypothesis before running stubtest to work around a mypy bug * CI: run stubtest using `spin stubtest` --- .github/workflows/stubtest.yml | 64 ++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 .github/workflows/stubtest.yml diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml new file mode 100644 index 000000000000..13554bfc427c --- /dev/null +++ b/.github/workflows/stubtest.yml @@ -0,0 +1,64 @@ +name: stubtest +permissions: read-all + +# Stubtest depends on different branches and paths than mypy does, so we have a separate workflow. + +on: + pull_request: + branches: + - "main" + - "maintenance/2.**" + # Stubtest requires numpy>=2.4 + - "!maintenance/2.[0-3].x" + paths: + - ".github/workflows/stubtest.yml" + - "numpy/**" + - "!numpy/**/tests/**" + - "requirements/test_requirements.txt" + - "tools/stubtest/**" + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + mypy: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + + name: stubtest + runs-on: ${{ matrix.os }}-latest + strategy: + fail-fast: false + matrix: + # TODO: consider including macos and windows + os: [ubuntu] + py: ["3.11", "3.14"] + + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - uses: astral-sh/setup-uv@2ddd2b9cb38ad8efd50337e8ab201519a34c9f24 # v7.1.1 + with: + python-version: ${{ matrix.py }} + activate-environment: true + cache-dependency-glob: | + requirements/build_requirements.txt + requirements/test_requirements.txt + + - name: uv pip install + run: >- + uv pip install + -r requirements/build_requirements.txt + -r requirements/test_requirements.txt + + - name: spin build + run: spin build -j2 -- -Dallow-noblas=true -Ddisable-optimization=true --vsenv + + - name: spin stubtest + run: spin stubtest From 5fbdeca14c5bc9eb38c6467ee37559ae20442bf2 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 23 Oct 2025 18:31:28 +0200 Subject: [PATCH 0738/1018] CI: Skip test runs if all changes are docs or stubs (#30050) --- .github/workflows/compiler_sanitizers.yml | 4 ++++ .github/workflows/cygwin.yml | 4 ++++ .github/workflows/emscripten.yml | 4 ++++ .github/workflows/linux-ppc64le.yml | 4 ++++ .github/workflows/linux.yml | 4 ++++ .github/workflows/linux_blas.yml | 7 +++++-- .github/workflows/linux_qemu.yml | 4 ++++ .github/workflows/linux_simd.yml | 4 ++++ .github/workflows/macos.yml | 7 +++++-- .github/workflows/windows.yml | 4 ++++ 10 files changed, 42 insertions(+), 4 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 2721d77eb4a2..a86ff5ae8b53 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -8,6 +8,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' defaults: run: diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 9d95da102fee..153f18d2956d 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -4,6 +4,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index e152870efd7d..159f6b719fde 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -5,6 +5,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' env: FORCE_COLOR: 3 diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index a593bcd774a2..9b23c1939b00 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -5,6 +5,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' workflow_dispatch: permissions: diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 45a9f65bd9f0..5a9fb69c5178 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -14,6 +14,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' defaults: run: diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 98d45ab8c435..c282365ab012 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -40,6 +40,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' defaults: run: @@ -213,7 +217,7 @@ jobs: - name: Test run: spin test -j auto -- numpy/linalg --timeout=600 --durations=10 - + netlib-debian: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest @@ -404,4 +408,3 @@ jobs: - name: Test run: spin test -- numpy/linalg - diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index c11eca8dc5ab..5fd10b73b9be 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -14,6 +14,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' workflow_dispatch: defaults: diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index cfc45b8a6da4..de77a8410f18 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -31,6 +31,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' defaults: run: diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index f4393e71523c..399bec631713 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -5,7 +5,10 @@ on: branches: - main - maintenance/** - + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' permissions: contents: read # to fetch code (actions/checkout) @@ -146,7 +149,7 @@ jobs: - name: Test (fast tests) if: ${{ matrix.version != '3.14t' || matrix.build_runner[0] != 'macos-14' }} run: spin test -j2 -- --timeout=600 --durations=10 - + - name: Test in multiple threads if: ${{ matrix.version == '3.14t' && matrix.build_runner[0] == 'macos-14' }} run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 886a70e1be2b..96c175c07725 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -5,6 +5,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} From 9bb5dd875302f6b8dcbb9166058fa5de6b87eb6b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 23 Oct 2025 18:35:14 +0200 Subject: [PATCH 0739/1018] TYP, STY: ``polynomial``: reformat the stubs (#30052) [skip azp] [skip circle] [skip cirrus] --- numpy/polynomial/__init__.pyi | 18 +- numpy/polynomial/_polybase.pyi | 35 +-- numpy/polynomial/_polytypes.pyi | 358 ++++++---------------------- numpy/polynomial/chebyshev.pyi | 33 +-- numpy/polynomial/hermite.pyi | 9 +- numpy/polynomial/hermite_e.pyi | 9 +- numpy/polynomial/polynomial.pyi | 30 +-- numpy/polynomial/polyutils.pyi | 406 ++++++++------------------------ 8 files changed, 222 insertions(+), 676 deletions(-) diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index 6fb0fb5ec7fa..ad005a2dbe38 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -10,12 +10,18 @@ from .polynomial import Polynomial __all__ = [ "set_default_printstyle", - "polynomial", "Polynomial", - "chebyshev", "Chebyshev", - "legendre", "Legendre", - "hermite", "Hermite", - "hermite_e", "HermiteE", - "laguerre", "Laguerre", + "polynomial", + "Polynomial", + "chebyshev", + "Chebyshev", + "legendre", + "Legendre", + "hermite", + "Hermite", + "hermite_e", + "HermiteE", + "laguerre", + "Laguerre", ] def set_default_printstyle(style: Literal["ascii", "unicode"]) -> None: ... diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index fcece8376170..b16b06c8a734 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -53,15 +53,12 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): _symbol: str @property def symbol(self, /) -> str: ... - @property @abc.abstractmethod def domain(self) -> _Array2[np.float64 | Any]: ... - @property @abc.abstractmethod def window(self) -> _Array2[np.float64 | Any]: ... - @property @abc.abstractmethod def basis_name(self) -> _NameT_co: ... @@ -77,6 +74,7 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): symbol: str = "x", ) -> None: ... + # @overload def __call__(self, /, arg: _PolyT) -> _PolyT: ... @overload @@ -90,19 +88,21 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): @overload def __call__(self, /, arg: _ArrayLikeCoefObject_co) -> npt.NDArray[np.object_]: ... - def __format__(self, fmt_str: str, /) -> str: ... - def __eq__(self, x: object, /) -> bool: ... - def __ne__(self, x: object, /) -> bool: ... + # unary ops def __neg__(self, /) -> Self: ... def __pos__(self, /) -> Self: ... + + # binary ops def __add__(self, x: _AnyOther, /) -> Self: ... def __sub__(self, x: _AnyOther, /) -> Self: ... def __mul__(self, x: _AnyOther, /) -> Self: ... + def __pow__(self, x: _AnyOther, /) -> Self: ... def __truediv__(self, x: _AnyOther, /) -> Self: ... def __floordiv__(self, x: _AnyOther, /) -> Self: ... def __mod__(self, x: _AnyOther, /) -> Self: ... def __divmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... - def __pow__(self, x: _AnyOther, /) -> Self: ... + + # reflected binary ops def __radd__(self, x: _AnyOther, /) -> Self: ... def __rsub__(self, x: _AnyOther, /) -> Self: ... def __rmul__(self, x: _AnyOther, /) -> Self: ... @@ -110,22 +110,29 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): def __rfloordiv__(self, x: _AnyOther, /) -> Self: ... def __rmod__(self, x: _AnyOther, /) -> Self: ... def __rdivmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... + + # iterable and sized def __len__(self, /) -> int: ... def __iter__(self, /) -> Iterator[np.float64 | Any]: ... + + # pickling def __getstate__(self, /) -> dict[str, Any]: ... def __setstate__(self, dict: dict[str, Any], /) -> None: ... + # def has_samecoef(self, /, other: ABCPolyBase) -> bool: ... def has_samedomain(self, /, other: ABCPolyBase) -> bool: ... def has_samewindow(self, /, other: ABCPolyBase) -> bool: ... def has_sametype(self, /, other: object) -> TypeIs[Self]: ... + # def copy(self, /) -> Self: ... def degree(self, /) -> int: ... def cutdeg(self, /, deg: int) -> Self: ... def trim(self, /, tol: _FloatLike_co = 0) -> Self: ... def truncate(self, /, size: _AnyInt) -> Self: ... + # @overload def convert( self, @@ -152,8 +159,8 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): window: _SeriesLikeCoef_co | None = None, ) -> Self: ... + # def mapparms(self, /) -> _Tuple2[Any]: ... - def integ( self, /, @@ -161,11 +168,8 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): k: _CoefLike_co | _SeriesLikeCoef_co = [], lbnd: _CoefLike_co | None = None, ) -> Self: ... - def deriv(self, /, m: SupportsIndex = 1) -> Self: ... - def roots(self, /) -> _CoefSeries: ... - def linspace( self, /, @@ -173,6 +177,7 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): domain: _SeriesLikeCoef_co | None = None, ) -> _Tuple2[_Series[np.float64 | np.complex128]]: ... + # @overload @classmethod def fit( @@ -211,12 +216,14 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): deg: int | _SeriesLikeInt_co, domain: _SeriesLikeCoef_co | None, rcond: _FloatLike_co, - full: Literal[True], /, + full: Literal[True], + /, w: _SeriesLikeCoef_co | None = None, window: _SeriesLikeCoef_co | None = None, symbol: str = "x", ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... + # @classmethod def fromroots( cls, @@ -225,7 +232,6 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): window: _SeriesLikeCoef_co | None = None, symbol: str = "x", ) -> Self: ... - @classmethod def identity( cls, @@ -233,7 +239,6 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): window: _SeriesLikeCoef_co | None = None, symbol: str = "x", ) -> Self: ... - @classmethod def basis( cls, @@ -242,7 +247,6 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): window: _SeriesLikeCoef_co | None = None, symbol: str = "x", ) -> Self: ... - @classmethod def cast( cls, @@ -250,7 +254,6 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): domain: _SeriesLikeCoef_co | None = None, window: _SeriesLikeCoef_co | None = None, ) -> Self: ... - @classmethod def _str_term_unicode(cls, /, i: str, arg_str: str) -> str: ... @classmethod diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 91cb27e3a923..58482aea1be9 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -1,6 +1,6 @@ -# ruff: noqa: PYI046, PYI047 +# ruff: noqa: PYI046 -from collections.abc import Callable, Sequence +from collections.abc import Sequence from typing import ( Any, Literal, @@ -40,15 +40,12 @@ _ScalarT = TypeVar("_ScalarT", bound=np.number | np.bool | np.object_) class _SupportsCoefOps(Protocol[_T_contra]): def __eq__(self, x: object, /) -> bool: ... def __ne__(self, x: object, /) -> bool: ... - def __neg__(self, /) -> Self: ... def __pos__(self, /) -> Self: ... - def __add__(self, x: _T_contra, /) -> Self: ... def __sub__(self, x: _T_contra, /) -> Self: ... def __mul__(self, x: _T_contra, /) -> Self: ... def __pow__(self, x: _T_contra, /) -> Self | float: ... - def __radd__(self, x: _T_contra, /) -> Self: ... def __rsub__(self, x: _T_contra, /) -> Self: ... def __rmul__(self, x: _T_contra, /) -> Self: ... @@ -75,41 +72,15 @@ _CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps[Any] _CoefLike_co: TypeAlias = _NumberLike_co | _CoefObjectLike_co # The term "series" is used here to refer to 1-d arrays of numeric scalars. -_SeriesLikeBool_co: TypeAlias = ( - _SupportsArray[np.dtype[np.bool]] - | Sequence[bool | np.bool] -) -_SeriesLikeInt_co: TypeAlias = ( - _SupportsArray[np.dtype[np.integer | np.bool]] - | Sequence[_IntLike_co] -) -_SeriesLikeFloat_co: TypeAlias = ( - _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] - | Sequence[_FloatLike_co] -) -_SeriesLikeComplex_co: TypeAlias = ( - _SupportsArray[np.dtype[np.inexact | np.integer | np.bool]] - | Sequence[_ComplexLike_co] -) -_SeriesLikeObject_co: TypeAlias = ( - _SupportsArray[np.dtype[np.object_]] - | Sequence[_CoefObjectLike_co] -) -_SeriesLikeCoef_co: TypeAlias = ( - _SupportsArray[np.dtype[np.number | np.bool | np.object_]] - | Sequence[_CoefLike_co] -) +_SeriesLikeBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[bool | np.bool] +_SeriesLikeInt_co: TypeAlias = _SupportsArray[np.dtype[np.integer | np.bool]] | Sequence[_IntLike_co] +_SeriesLikeFloat_co: TypeAlias = _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] | Sequence[_FloatLike_co] +_SeriesLikeComplex_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool]] | Sequence[_ComplexLike_co] +_SeriesLikeObject_co: TypeAlias = _SupportsArray[np.dtype[np.object_]] | Sequence[_CoefObjectLike_co] +_SeriesLikeCoef_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool | np.object_]] | Sequence[_CoefLike_co] -_ArrayLikeCoefObject_co: TypeAlias = ( - _CoefObjectLike_co - | _SeriesLikeObject_co - | _NestedSequence[_SeriesLikeObject_co] -) -_ArrayLikeCoef_co: TypeAlias = ( - npt.NDArray[np.number | np.bool | np.object_] - | _ArrayLikeNumber_co - | _ArrayLikeCoefObject_co -) +_ArrayLikeCoefObject_co: TypeAlias = _CoefObjectLike_co | _SeriesLikeObject_co | _NestedSequence[_SeriesLikeObject_co] +_ArrayLikeCoef_co: TypeAlias = npt.NDArray[np.number | np.bool | np.object_] | _ArrayLikeNumber_co | _ArrayLikeCoefObject_co _Line: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] @@ -122,19 +93,9 @@ class _FuncLine(Protocol): @overload def __call__(self, /, off: float, scl: float) -> _Line[np.float64]: ... @overload - def __call__( - self, - /, - off: complex, - scl: complex, - ) -> _Line[np.complex128]: ... + def __call__(self, /, off: complex, scl: complex) -> _Line[np.complex128]: ... @overload - def __call__( - self, - /, - off: _SupportsCoefOps[Any], - scl: _SupportsCoefOps[Any], - ) -> _Line[np.object_]: ... + def __call__(self, /, off: _SupportsCoefOps[Any], scl: _SupportsCoefOps[Any]) -> _Line[np.object_]: ... @type_check_only class _FuncFromRoots(Protocol): @@ -148,33 +109,13 @@ class _FuncFromRoots(Protocol): @type_check_only class _FuncBinOp(Protocol): @overload - def __call__( - self, - /, - c1: _SeriesLikeBool_co, - c2: _SeriesLikeBool_co, - ) -> NoReturn: ... + def __call__(self, /, c1: _SeriesLikeBool_co, c2: _SeriesLikeBool_co) -> NoReturn: ... @overload - def __call__( - self, - /, - c1: _SeriesLikeFloat_co, - c2: _SeriesLikeFloat_co, - ) -> _FloatSeries: ... + def __call__(self, /, c1: _SeriesLikeFloat_co, c2: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload - def __call__( - self, - /, - c1: _SeriesLikeComplex_co, - c2: _SeriesLikeComplex_co, - ) -> _ComplexSeries: ... + def __call__(self, /, c1: _SeriesLikeComplex_co, c2: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload - def __call__( - self, - /, - c1: _SeriesLikeCoef_co, - c2: _SeriesLikeCoef_co, - ) -> _ObjectSeries: ... + def __call__(self, /, c1: _SeriesLikeCoef_co, c2: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only class _FuncUnOp(Protocol): @@ -197,29 +138,11 @@ class _FuncPoly2Ortho(Protocol): @type_check_only class _FuncPow(Protocol): @overload - def __call__( - self, - /, - c: _SeriesLikeFloat_co, - pow: _IntLike_co, - maxpower: _IntLike_co | None = ..., - ) -> _FloatSeries: ... + def __call__(self, /, c: _SeriesLikeFloat_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _FloatSeries: ... @overload - def __call__( - self, - /, - c: _SeriesLikeComplex_co, - pow: _IntLike_co, - maxpower: _IntLike_co | None = ..., - ) -> _ComplexSeries: ... + def __call__(self, /, c: _SeriesLikeComplex_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _ComplexSeries: ... @overload - def __call__( - self, - /, - c: _SeriesLikeCoef_co, - pow: _IntLike_co, - maxpower: _IntLike_co | None = ..., - ) -> _ObjectSeries: ... + def __call__(self, /, c: _SeriesLikeCoef_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _ObjectSeries: ... @type_check_only class _FuncDer(Protocol): @@ -228,27 +151,27 @@ class _FuncDer(Protocol): self, /, c: _ArrayLikeFloat_co, - m: SupportsIndex = ..., - scl: _FloatLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + scl: _FloatLike_co = 1, + axis: SupportsIndex = 0, ) -> _FloatArray: ... @overload def __call__( self, /, c: _ArrayLikeComplex_co, - m: SupportsIndex = ..., - scl: _ComplexLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + scl: _ComplexLike_co = 1, + axis: SupportsIndex = 0, ) -> _ComplexArray: ... @overload def __call__( self, /, c: _ArrayLikeCoef_co, - m: SupportsIndex = ..., - scl: _CoefLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + scl: _CoefLike_co = 1, + axis: SupportsIndex = 0, ) -> _ObjectArray: ... @type_check_only @@ -258,136 +181,64 @@ class _FuncInteg(Protocol): self, /, c: _ArrayLikeFloat_co, - m: SupportsIndex = ..., - k: _FloatLike_co | _SeriesLikeFloat_co = ..., - lbnd: _FloatLike_co = ..., - scl: _FloatLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + k: _FloatLike_co | _SeriesLikeFloat_co = [], + lbnd: _FloatLike_co = 0, + scl: _FloatLike_co = 1, + axis: SupportsIndex = 0, ) -> _FloatArray: ... @overload def __call__( self, /, c: _ArrayLikeComplex_co, - m: SupportsIndex = ..., - k: _ComplexLike_co | _SeriesLikeComplex_co = ..., - lbnd: _ComplexLike_co = ..., - scl: _ComplexLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + k: _ComplexLike_co | _SeriesLikeComplex_co = [], + lbnd: _ComplexLike_co = 0, + scl: _ComplexLike_co = 1, + axis: SupportsIndex = 0, ) -> _ComplexArray: ... @overload def __call__( self, /, c: _ArrayLikeCoef_co, - m: SupportsIndex = ..., - k: _CoefLike_co | _SeriesLikeCoef_co = ..., - lbnd: _CoefLike_co = ..., - scl: _CoefLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + k: _CoefLike_co | _SeriesLikeCoef_co = [], + lbnd: _CoefLike_co = 0, + scl: _CoefLike_co = 1, + axis: SupportsIndex = 0, ) -> _ObjectArray: ... @type_check_only class _FuncVal(Protocol): @overload - def __call__( - self, - /, - x: _FloatLike_co, - c: _SeriesLikeFloat_co, - tensor: bool = ..., - ) -> np.floating: ... + def __call__(self, /, x: _FloatLike_co, c: _SeriesLikeFloat_co, tensor: bool = True) -> np.floating: ... @overload - def __call__( - self, - /, - x: _NumberLike_co, - c: _SeriesLikeComplex_co, - tensor: bool = ..., - ) -> np.complexfloating: ... + def __call__(self, /, x: _NumberLike_co, c: _SeriesLikeComplex_co, tensor: bool = True) -> np.complexfloating: ... @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - c: _ArrayLikeFloat_co, - tensor: bool = ..., - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co, tensor: bool = True) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - c: _ArrayLikeComplex_co, - tensor: bool = ..., - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co, tensor: bool = True) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - c: _ArrayLikeCoef_co, - tensor: bool = ..., - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co, tensor: bool = True) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: _CoefLike_co, - c: _SeriesLikeObject_co, - tensor: bool = ..., - ) -> _SupportsCoefOps[Any]: ... + def __call__(self, /, x: _CoefLike_co, c: _SeriesLikeObject_co, tensor: bool = True) -> _SupportsCoefOps[Any]: ... @type_check_only class _FuncVal2D(Protocol): @overload - def __call__( - self, - /, - x: _FloatLike_co, - y: _FloatLike_co, - c: _SeriesLikeFloat_co, - ) -> np.floating: ... + def __call__(self, /, x: _FloatLike_co, y: _FloatLike_co, c: _SeriesLikeFloat_co) -> np.floating: ... @overload - def __call__( - self, - /, - x: _NumberLike_co, - y: _NumberLike_co, - c: _SeriesLikeComplex_co, - ) -> np.complexfloating: ... + def __call__(self, /, x: _NumberLike_co, y: _NumberLike_co, c: _SeriesLikeComplex_co) -> np.complexfloating: ... @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, - c: _ArrayLikeFloat_co, - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - c: _ArrayLikeComplex_co, - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - y: _ArrayLikeCoef_co, - c: _ArrayLikeCoef_co, - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: _CoefLike_co, - y: _CoefLike_co, - c: _SeriesLikeCoef_co, - ) -> _SupportsCoefOps[Any]: ... + def __call__(self, /, x: _CoefLike_co, y: _CoefLike_co, c: _SeriesLikeCoef_co) -> _SupportsCoefOps[Any]: ... @type_check_only class _FuncVal3D(Protocol): @@ -398,7 +249,7 @@ class _FuncVal3D(Protocol): x: _FloatLike_co, y: _FloatLike_co, z: _FloatLike_co, - c: _SeriesLikeFloat_co + c: _SeriesLikeFloat_co, ) -> np.floating: ... @overload def __call__( @@ -446,78 +297,29 @@ class _FuncVal3D(Protocol): c: _SeriesLikeCoef_co, ) -> _SupportsCoefOps[Any]: ... -_AnyValF: TypeAlias = Callable[ - [npt.ArrayLike, npt.ArrayLike, bool], - _CoefArray, -] - @type_check_only class _FuncVander(Protocol): @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - deg: SupportsIndex, - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, deg: SupportsIndex) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - deg: SupportsIndex, - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, deg: SupportsIndex) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - deg: SupportsIndex, - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, deg: SupportsIndex) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: npt.ArrayLike, - deg: SupportsIndex, - ) -> _CoefArray: ... + def __call__(self, /, x: npt.ArrayLike, deg: SupportsIndex) -> _CoefArray: ... _AnyDegrees: TypeAlias = Sequence[SupportsIndex] @type_check_only class _FuncVander2D(Protocol): @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, - deg: _AnyDegrees, - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: _AnyDegrees) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - deg: _AnyDegrees, - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: _AnyDegrees) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - y: _ArrayLikeCoef_co, - deg: _AnyDegrees, - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, deg: _AnyDegrees) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: npt.ArrayLike, - y: npt.ArrayLike, - deg: _AnyDegrees, - ) -> _CoefArray: ... + def __call__(self, /, x: npt.ArrayLike, y: npt.ArrayLike, deg: _AnyDegrees) -> _CoefArray: ... @type_check_only class _FuncVander3D(Protocol): @@ -596,7 +398,6 @@ class _FuncFit(Protocol): full: Literal[True], w: _SeriesLikeFloat_co | None = ..., ) -> tuple[_FloatArray, _FullFitResult]: ... - @overload def __call__( self, @@ -631,7 +432,6 @@ class _FuncFit(Protocol): full: Literal[True], w: _SeriesLikeFloat_co | None = ..., ) -> tuple[_ComplexArray, _FullFitResult]: ... - @overload def __call__( self, @@ -670,17 +470,9 @@ class _FuncFit(Protocol): @type_check_only class _FuncRoots(Protocol): @overload - def __call__( - self, - /, - c: _SeriesLikeFloat_co, - ) -> _Series[np.float64]: ... + def __call__(self, /, c: _SeriesLikeFloat_co) -> _Series[np.float64]: ... @overload - def __call__( - self, - /, - c: _SeriesLikeComplex_co, - ) -> _Series[np.complex128]: ... + def __call__(self, /, c: _SeriesLikeComplex_co) -> _Series[np.complex128]: ... @overload def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... @@ -689,27 +481,15 @@ _Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] @type_check_only class _FuncCompanion(Protocol): @overload - def __call__( - self, - /, - c: _SeriesLikeFloat_co, - ) -> _Companion[np.float64]: ... + def __call__(self, /, c: _SeriesLikeFloat_co) -> _Companion[np.float64]: ... @overload - def __call__( - self, - /, - c: _SeriesLikeComplex_co, - ) -> _Companion[np.complex128]: ... + def __call__(self, /, c: _SeriesLikeComplex_co) -> _Companion[np.complex128]: ... @overload def __call__(self, /, c: _SeriesLikeCoef_co) -> _Companion[np.object_]: ... @type_check_only class _FuncGauss(Protocol): - def __call__( - self, - /, - deg: SupportsIndex, - ) -> _Tuple2[_Series[np.float64]]: ... + def __call__(self, /, deg: SupportsIndex) -> _Tuple2[_Series[np.float64]]: ... @type_check_only class _FuncWeight(Protocol): diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 35043a001f10..d70842020ebe 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -82,16 +82,12 @@ __all__ = [ ] _NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) +_CoefScalarT = TypeVar("_CoefScalarT", bound=np.number | np.bool | np.object_) + def _cseries_to_zseries(c: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... def _zseries_to_cseries(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_mul( - z1: npt.NDArray[_NumberOrObjectT], - z2: npt.NDArray[_NumberOrObjectT], -) -> _Series[_NumberOrObjectT]: ... -def _zseries_div( - z1: npt.NDArray[_NumberOrObjectT], - z2: npt.NDArray[_NumberOrObjectT], -) -> _Series[_NumberOrObjectT]: ... +def _zseries_mul(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_div(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... def _zseries_der(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... def _zseries_int(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... @@ -129,8 +125,7 @@ chebweight: Final[_FuncWeight] = ... chebpts1: Final[_FuncPts] = ... chebpts2: Final[_FuncPts] = ... -# keep in sync with `Chebyshev.interpolate` -_RT = TypeVar("_RT", bound=np.number | np.bool | np.object_) +# keep in sync with `Chebyshev.interpolate` (minus `domain` parameter) @overload def chebinterpolate( func: np.ufunc, @@ -139,16 +134,16 @@ def chebinterpolate( ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... @overload def chebinterpolate( - func: Callable[[npt.NDArray[np.float64]], _RT], + func: Callable[[npt.NDArray[np.float64]], _CoefScalarT], deg: _IntLike_co, args: tuple[()] = (), -) -> npt.NDArray[_RT]: ... +) -> npt.NDArray[_CoefScalarT]: ... @overload def chebinterpolate( - func: Callable[Concatenate[npt.NDArray[np.float64], ...], _RT], + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefScalarT], deg: _IntLike_co, args: Iterable[Any], -) -> npt.NDArray[_RT]: ... +) -> npt.NDArray[_CoefScalarT]: ... class Chebyshev(ABCPolyBase[L["T"]]): basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] @@ -168,10 +163,7 @@ class Chebyshev(ABCPolyBase[L["T"]]): @classmethod def interpolate( cls, - func: Callable[ - Concatenate[npt.NDArray[np.float64], ...], - _CoefSeries, - ], + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefSeries], deg: _IntLike_co, domain: _SeriesLikeCoef_co | None = None, *, @@ -181,10 +173,7 @@ class Chebyshev(ABCPolyBase[L["T"]]): @classmethod def interpolate( cls, - func: Callable[ - Concatenate[npt.NDArray[np.float64], ...], - _CoefSeries, - ], + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefSeries], deg: _IntLike_co, domain: _SeriesLikeCoef_co | None, args: Iterable[Any], diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 9d8a63a6b95d..04b8238735dd 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,6 +1,7 @@ from typing import Any, ClassVar, Final, Literal as L, TypeVar import numpy as np +from numpy._typing import _Shape from ._polybase import ABCPolyBase from ._polytypes import ( @@ -62,6 +63,8 @@ __all__ = [ "hermweight", ] +_ShapeT = TypeVar("_ShapeT", bound=_Shape) + poly2herm: Final[_FuncPoly2Ortho] = ... herm2poly: Final[_FuncUnOp] = ... @@ -92,11 +95,7 @@ hermfit: Final[_FuncFit] = ... hermcompanion: Final[_FuncCompanion] = ... hermroots: Final[_FuncRoots] = ... -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -def _normed_hermite_n( - x: np.ndarray[_ShapeT, np.dtype[np.float64]], - n: int | np.intp, -) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... +def _normed_hermite_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... hermgauss: Final[_FuncGauss] = ... hermweight: Final[_FuncWeight] = ... diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index b93975f246da..b996de52c6da 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,6 +1,7 @@ from typing import Any, ClassVar, Final, Literal as L, TypeVar import numpy as np +from numpy._typing import _Shape from ._polybase import ABCPolyBase from ._polytypes import ( @@ -62,6 +63,8 @@ __all__ = [ "hermeweight", ] +_ShapeT = TypeVar("_ShapeT", bound=_Shape) + poly2herme: Final[_FuncPoly2Ortho] = ... herme2poly: Final[_FuncUnOp] = ... @@ -92,11 +95,7 @@ hermefit: Final[_FuncFit] = ... hermecompanion: Final[_FuncCompanion] = ... hermeroots: Final[_FuncRoots] = ... -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -def _normed_hermite_e_n( - x: np.ndarray[_ShapeT, np.dtype[np.float64]], - n: int | np.intp, -) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... +def _normed_hermite_e_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... hermegauss: Final[_FuncGauss] = ... hermeweight: Final[_FuncWeight] = ... diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index ddd10fa4282e..86f288468a15 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -84,35 +84,15 @@ polyval2d: Final[_FuncVal2D] = ... polyval3d: Final[_FuncVal3D] = ... @overload -def polyvalfromroots( - x: _FloatLike_co, - r: _FloatLike_co, - tensor: bool = True, -) -> np.float64 | Any: ... +def polyvalfromroots(x: _FloatLike_co, r: _FloatLike_co, tensor: bool = True) -> np.float64 | Any: ... @overload -def polyvalfromroots( - x: _NumberLike_co, - r: _NumberLike_co, - tensor: bool = True, -) -> np.complex128 | Any: ... +def polyvalfromroots(x: _NumberLike_co, r: _NumberLike_co, tensor: bool = True) -> np.complex128 | Any: ... @overload -def polyvalfromroots( - x: _ArrayLikeFloat_co, - r: _ArrayLikeFloat_co, - tensor: bool = True, -) -> npt.NDArray[np.float64 | Any]: ... +def polyvalfromroots(x: _ArrayLikeFloat_co, r: _ArrayLikeFloat_co, tensor: bool = True) -> npt.NDArray[np.float64 | Any]: ... @overload -def polyvalfromroots( - x: _ArrayLikeNumber_co, - r: _ArrayLikeNumber_co, - tensor: bool = True, -) -> npt.NDArray[np.complex128 | Any]: ... +def polyvalfromroots(x: _ArrayLikeNumber_co, r: _ArrayLikeNumber_co, tensor: bool = True) -> npt.NDArray[np.complex128 | Any]: ... @overload -def polyvalfromroots( - x: _ArrayLikeCoef_co, - r: _ArrayLikeCoef_co, - tensor: bool = True, -) -> npt.NDArray[np.object_ | Any]: ... +def polyvalfromroots(x: _ArrayLikeCoef_co, r: _ArrayLikeCoef_co, tensor: bool = True) -> npt.NDArray[np.object_ | Any]: ... polygrid2d: Final[_FuncVal2D] = ... polygrid3d: Final[_FuncVal3D] = ... diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index fd818ce0c90d..5487bfa4c2ec 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -13,7 +13,6 @@ from numpy._typing import ( from ._polytypes import ( _AnyInt, - _AnyValF, _Array2, _ArrayLikeCoef_co, _CoefArray, @@ -36,190 +35,96 @@ from ._polytypes import ( _Tuple2, ) -__all__: Final[Sequence[str]] = [ - "as_series", - "format_float", - "getdomain", - "mapdomain", - "mapparms", - "trimcoef", - "trimseq", -] +__all__ = ["as_series", "format_float", "getdomain", "mapdomain", "mapparms", "trimcoef", "trimseq"] +_SeqT = TypeVar("_SeqT", bound=_CoefArray | Sequence[_CoefLike_co]) + +_AnyValF: TypeAlias = Callable[[npt.ArrayLike, npt.ArrayLike, bool], _CoefArray] _AnyLineF: TypeAlias = Callable[[_CoefLike_co, _CoefLike_co], _CoefArray] _AnyMulF: TypeAlias = Callable[[npt.ArrayLike, npt.ArrayLike], _CoefArray] _AnyVanderF: TypeAlias = Callable[[npt.ArrayLike, SupportsIndex], _CoefArray] @overload -def as_series( - alist: npt.NDArray[np.integer] | _FloatArray, - trim: bool = True, -) -> list[_FloatSeries]: ... -@overload -def as_series( - alist: _ComplexArray, - trim: bool = True, -) -> list[_ComplexSeries]: ... -@overload -def as_series( - alist: _ObjectArray, - trim: bool = True, -) -> list[_ObjectSeries]: ... -@overload -def as_series( # type: ignore[overload-overlap] - alist: Iterable[_FloatArray | npt.NDArray[np.integer]], - trim: bool = True, -) -> list[_FloatSeries]: ... -@overload -def as_series( - alist: Iterable[_ComplexArray], - trim: bool = True, -) -> list[_ComplexSeries]: ... -@overload -def as_series( - alist: Iterable[_ObjectArray], - trim: bool = True, -) -> list[_ObjectSeries]: ... -@overload -def as_series( # type: ignore[overload-overlap] - alist: Iterable[_SeriesLikeFloat_co | float], - trim: bool = True, -) -> list[_FloatSeries]: ... -@overload -def as_series( - alist: Iterable[_SeriesLikeComplex_co | complex], - trim: bool = True, -) -> list[_ComplexSeries]: ... -@overload -def as_series( - alist: Iterable[_SeriesLikeCoef_co | object], - trim: bool = True, -) -> list[_ObjectSeries]: ... +def as_series(alist: npt.NDArray[np.integer] | _FloatArray, trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: _ComplexArray, trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: _ObjectArray, trim: bool = True) -> list[_ObjectSeries]: ... +@overload +def as_series(alist: Iterable[_FloatArray | npt.NDArray[np.integer]], trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: Iterable[_ComplexArray], trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: Iterable[_ObjectArray], trim: bool = True) -> list[_ObjectSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeFloat_co | float], trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeComplex_co | complex], trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeCoef_co | object], trim: bool = True) -> list[_ObjectSeries]: ... -_T_seq = TypeVar("_T_seq", bound=_CoefArray | Sequence[_CoefLike_co]) -def trimseq(seq: _T_seq) -> _T_seq: ... +# +def trimseq(seq: _SeqT) -> _SeqT: ... +# @overload -def trimcoef( # type: ignore[overload-overlap] - c: npt.NDArray[np.integer] | _FloatArray, - tol: _FloatLike_co = 0, -) -> _FloatSeries: ... +def trimcoef(c: npt.NDArray[np.integer] | _FloatArray, tol: _FloatLike_co = 0) -> _FloatSeries: ... @overload -def trimcoef( - c: _ComplexArray, - tol: _FloatLike_co = 0, -) -> _ComplexSeries: ... +def trimcoef(c: _ComplexArray, tol: _FloatLike_co = 0) -> _ComplexSeries: ... @overload -def trimcoef( - c: _ObjectArray, - tol: _FloatLike_co = 0, -) -> _ObjectSeries: ... +def trimcoef(c: _ObjectArray, tol: _FloatLike_co = 0) -> _ObjectSeries: ... @overload -def trimcoef( # type: ignore[overload-overlap] - c: _SeriesLikeFloat_co | float, - tol: _FloatLike_co = 0, -) -> _FloatSeries: ... +def trimcoef(c: _SeriesLikeFloat_co | float, tol: _FloatLike_co = 0) -> _FloatSeries: ... @overload -def trimcoef( - c: _SeriesLikeComplex_co | complex, - tol: _FloatLike_co = 0, -) -> _ComplexSeries: ... +def trimcoef(c: _SeriesLikeComplex_co | complex, tol: _FloatLike_co = 0) -> _ComplexSeries: ... @overload -def trimcoef( - c: _SeriesLikeCoef_co | object, - tol: _FloatLike_co = 0, -) -> _ObjectSeries: ... +def trimcoef(c: _SeriesLikeCoef_co | object, tol: _FloatLike_co = 0) -> _ObjectSeries: ... +# @overload -def getdomain( # type: ignore[overload-overlap] - x: _FloatArray | npt.NDArray[np.integer], -) -> _Array2[np.float64]: ... +def getdomain(x: _FloatArray | npt.NDArray[np.integer]) -> _Array2[np.float64]: ... @overload -def getdomain( - x: _ComplexArray, -) -> _Array2[np.complex128]: ... +def getdomain(x: _ComplexArray) -> _Array2[np.complex128]: ... @overload -def getdomain( - x: _ObjectArray, -) -> _Array2[np.object_]: ... +def getdomain(x: _ObjectArray) -> _Array2[np.object_]: ... @overload -def getdomain( # type: ignore[overload-overlap] - x: _SeriesLikeFloat_co | float, -) -> _Array2[np.float64]: ... +def getdomain(x: _SeriesLikeFloat_co | float) -> _Array2[np.float64]: ... @overload -def getdomain( - x: _SeriesLikeComplex_co | complex, -) -> _Array2[np.complex128]: ... +def getdomain(x: _SeriesLikeComplex_co | complex) -> _Array2[np.complex128]: ... @overload -def getdomain( - x: _SeriesLikeCoef_co | object, -) -> _Array2[np.object_]: ... +def getdomain(x: _SeriesLikeCoef_co | object) -> _Array2[np.object_]: ... +# @overload -def mapparms( # type: ignore[overload-overlap] - old: npt.NDArray[np.floating | np.integer], - new: npt.NDArray[np.floating | np.integer], -) -> _Tuple2[np.floating]: ... +def mapparms(old: npt.NDArray[np.floating | np.integer], new: npt.NDArray[np.floating | np.integer]) -> _Tuple2[np.floating]: ... @overload -def mapparms( - old: npt.NDArray[np.number], - new: npt.NDArray[np.number], -) -> _Tuple2[np.complexfloating]: ... +def mapparms(old: npt.NDArray[np.number], new: npt.NDArray[np.number]) -> _Tuple2[np.complexfloating]: ... @overload -def mapparms( - old: npt.NDArray[np.object_ | np.number], - new: npt.NDArray[np.object_ | np.number], -) -> _Tuple2[object]: ... -@overload -def mapparms( # type: ignore[overload-overlap] - old: Sequence[float], - new: Sequence[float], -) -> _Tuple2[float]: ... -@overload -def mapparms( - old: Sequence[complex], - new: Sequence[complex], -) -> _Tuple2[complex]: ... -@overload -def mapparms( - old: _SeriesLikeFloat_co, - new: _SeriesLikeFloat_co, -) -> _Tuple2[np.floating]: ... -@overload -def mapparms( - old: _SeriesLikeComplex_co, - new: _SeriesLikeComplex_co, -) -> _Tuple2[np.complexfloating]: ... -@overload -def mapparms( - old: _SeriesLikeCoef_co, - new: _SeriesLikeCoef_co, -) -> _Tuple2[object]: ... +def mapparms(old: npt.NDArray[np.object_ | np.number], new: npt.NDArray[np.object_ | np.number]) -> _Tuple2[object]: ... +@overload +def mapparms(old: Sequence[float], new: Sequence[float]) -> _Tuple2[float]: ... +@overload +def mapparms(old: Sequence[complex], new: Sequence[complex]) -> _Tuple2[complex]: ... +@overload +def mapparms(old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> _Tuple2[np.floating]: ... +@overload +def mapparms(old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> _Tuple2[np.complexfloating]: ... +@overload +def mapparms(old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> _Tuple2[object]: ... +# @overload -def mapdomain( # type: ignore[overload-overlap] - x: _FloatLike_co, - old: _SeriesLikeFloat_co, - new: _SeriesLikeFloat_co, -) -> np.floating: ... +def mapdomain(x: _FloatLike_co, old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> np.floating: ... @overload -def mapdomain( - x: _NumberLike_co, - old: _SeriesLikeComplex_co, - new: _SeriesLikeComplex_co, -) -> np.complexfloating: ... +def mapdomain(x: _NumberLike_co, old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> np.complexfloating: ... @overload -def mapdomain( # type: ignore[overload-overlap] +def mapdomain( x: npt.NDArray[np.floating | np.integer], old: npt.NDArray[np.floating | np.integer], new: npt.NDArray[np.floating | np.integer], ) -> _FloatSeries: ... @overload -def mapdomain( - x: npt.NDArray[np.number], - old: npt.NDArray[np.number], - new: npt.NDArray[np.number], -) -> _ComplexSeries: ... +def mapdomain(x: npt.NDArray[np.number], old: npt.NDArray[np.number], new: npt.NDArray[np.number]) -> _ComplexSeries: ... @overload def mapdomain( x: npt.NDArray[np.object_ | np.number], @@ -227,34 +132,16 @@ def mapdomain( new: npt.NDArray[np.object_ | np.number], ) -> _ObjectSeries: ... @overload -def mapdomain( # type: ignore[overload-overlap] - x: _SeriesLikeFloat_co, - old: _SeriesLikeFloat_co, - new: _SeriesLikeFloat_co, -) -> _FloatSeries: ... +def mapdomain(x: _SeriesLikeFloat_co, old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload -def mapdomain( - x: _SeriesLikeComplex_co, - old: _SeriesLikeComplex_co, - new: _SeriesLikeComplex_co, -) -> _ComplexSeries: ... +def mapdomain(x: _SeriesLikeComplex_co, old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload -def mapdomain( - x: _SeriesLikeCoef_co, - old: _SeriesLikeCoef_co, - new: _SeriesLikeCoef_co, -) -> _ObjectSeries: ... +def mapdomain(x: _SeriesLikeCoef_co, old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> _ObjectSeries: ... @overload -def mapdomain( - x: _CoefLike_co, - old: _SeriesLikeCoef_co, - new: _SeriesLikeCoef_co, -) -> object: ... +def mapdomain(x: _CoefLike_co, old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> object: ... -def _nth_slice( - i: SupportsIndex, - ndim: SupportsIndex, -) -> tuple[slice | None, ...]: ... +# +def _nth_slice(i: SupportsIndex, ndim: SupportsIndex) -> tuple[slice | None, ...]: ... # keep in sync with `vander_nd_flat` @overload @@ -310,168 +197,68 @@ def _vander_nd_flat( # keep in sync with `._polytypes._FuncFromRoots` @overload -def _fromroots( # type: ignore[overload-overlap] - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeFloat_co, -) -> _FloatSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload -def _fromroots( - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeComplex_co, -) -> _ComplexSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload -def _fromroots( - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeObject_co, -) -> _ObjectSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeObject_co) -> _ObjectSeries: ... @overload -def _fromroots( - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeCoef_co, -) -> _CoefSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeCoef_co) -> _CoefSeries: ... # keep in sync with `_gridnd` @overload -def _valnd( - val_f: _AnyValF, - c: _SeriesLikeFloat_co, - *args: _FloatLike_co, -) -> np.floating: ... -@overload -def _valnd( - val_f: _AnyValF, - c: _SeriesLikeComplex_co, - *args: _NumberLike_co, -) -> np.complexfloating: ... -@overload -def _valnd( - val_f: _AnyValF, - c: _ArrayLikeFloat_co, - *args: _ArrayLikeFloat_co, -) -> _FloatArray: ... +def _valnd(val_f: _AnyValF, c: _SeriesLikeFloat_co, *args: _FloatLike_co) -> np.floating: ... @overload -def _valnd( - val_f: _AnyValF, - c: _ArrayLikeComplex_co, - *args: _ArrayLikeComplex_co, -) -> _ComplexArray: ... +def _valnd(val_f: _AnyValF, c: _SeriesLikeComplex_co, *args: _NumberLike_co) -> np.complexfloating: ... @overload -def _valnd( - val_f: _AnyValF, - c: _SeriesLikeObject_co, - *args: _CoefObjectLike_co, -) -> _SupportsCoefOps[Any]: ... +def _valnd(val_f: _AnyValF, c: _ArrayLikeFloat_co, *args: _ArrayLikeFloat_co) -> _FloatArray: ... @overload -def _valnd( - val_f: _AnyValF, - c: _ArrayLikeCoef_co, - *args: _ArrayLikeCoef_co, -) -> _ObjectArray: ... +def _valnd(val_f: _AnyValF, c: _ArrayLikeComplex_co, *args: _ArrayLikeComplex_co) -> _ComplexArray: ... +@overload +def _valnd(val_f: _AnyValF, c: _SeriesLikeObject_co, *args: _CoefObjectLike_co) -> _SupportsCoefOps[Any]: ... +@overload +def _valnd(val_f: _AnyValF, c: _ArrayLikeCoef_co, *args: _ArrayLikeCoef_co) -> _ObjectArray: ... # keep in sync with `_valnd` @overload -def _gridnd( - val_f: _AnyValF, - c: _SeriesLikeFloat_co, - *args: _FloatLike_co, -) -> np.floating: ... -@overload -def _gridnd( - val_f: _AnyValF, - c: _SeriesLikeComplex_co, - *args: _NumberLike_co, -) -> np.complexfloating: ... -@overload -def _gridnd( - val_f: _AnyValF, - c: _ArrayLikeFloat_co, - *args: _ArrayLikeFloat_co, -) -> _FloatArray: ... +def _gridnd(val_f: _AnyValF, c: _SeriesLikeFloat_co, *args: _FloatLike_co) -> np.floating: ... @overload -def _gridnd( - val_f: _AnyValF, - c: _ArrayLikeComplex_co, - *args: _ArrayLikeComplex_co, -) -> _ComplexArray: ... +def _gridnd(val_f: _AnyValF, c: _SeriesLikeComplex_co, *args: _NumberLike_co) -> np.complexfloating: ... @overload -def _gridnd( - val_f: _AnyValF, - c: _SeriesLikeObject_co, - *args: _CoefObjectLike_co, -) -> _SupportsCoefOps[Any]: ... +def _gridnd(val_f: _AnyValF, c: _ArrayLikeFloat_co, *args: _ArrayLikeFloat_co) -> _FloatArray: ... @overload -def _gridnd( - val_f: _AnyValF, - c: _ArrayLikeCoef_co, - *args: _ArrayLikeCoef_co, -) -> _ObjectArray: ... +def _gridnd(val_f: _AnyValF, c: _ArrayLikeComplex_co, *args: _ArrayLikeComplex_co) -> _ComplexArray: ... +@overload +def _gridnd(val_f: _AnyValF, c: _SeriesLikeObject_co, *args: _CoefObjectLike_co) -> _SupportsCoefOps[Any]: ... +@overload +def _gridnd(val_f: _AnyValF, c: _ArrayLikeCoef_co, *args: _ArrayLikeCoef_co) -> _ObjectArray: ... # keep in sync with `_polytypes._FuncBinOp` @overload -def _div( # type: ignore[overload-overlap] - mul_f: _AnyMulF, - c1: _SeriesLikeFloat_co, - c2: _SeriesLikeFloat_co, -) -> _Tuple2[_FloatSeries]: ... -@overload -def _div( - mul_f: _AnyMulF, - c1: _SeriesLikeComplex_co, - c2: _SeriesLikeComplex_co, -) -> _Tuple2[_ComplexSeries]: ... -@overload -def _div( - mul_f: _AnyMulF, - c1: _SeriesLikeObject_co, - c2: _SeriesLikeObject_co, -) -> _Tuple2[_ObjectSeries]: ... -@overload -def _div( - mul_f: _AnyMulF, - c1: _SeriesLikeCoef_co, - c2: _SeriesLikeCoef_co, -) -> _Tuple2[_CoefSeries]: ... +def _div(mul_f: _AnyMulF, c1: _SeriesLikeFloat_co, c2: _SeriesLikeFloat_co) -> _Tuple2[_FloatSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeComplex_co, c2: _SeriesLikeComplex_co) -> _Tuple2[_ComplexSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeObject_co, c2: _SeriesLikeObject_co) -> _Tuple2[_ObjectSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeCoef_co, c2: _SeriesLikeCoef_co) -> _Tuple2[_CoefSeries]: ... _add: Final[_FuncBinOp] = ... _sub: Final[_FuncBinOp] = ... # keep in sync with `_polytypes._FuncPow` @overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeFloat_co, - pow: _AnyInt, - maxpower: _AnyInt | None, -) -> _FloatSeries: ... +def _pow(mul_f: _AnyMulF, c: _SeriesLikeFloat_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _FloatSeries: ... @overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeComplex_co, - pow: _AnyInt, - maxpower: _AnyInt | None, -) -> _ComplexSeries: ... -@overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeObject_co, - pow: _AnyInt, - maxpower: _AnyInt | None, -) -> _ObjectSeries: ... +def _pow(mul_f: _AnyMulF, c: _SeriesLikeComplex_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _ComplexSeries: ... @overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeCoef_co, - pow: _AnyInt, - maxpower: _AnyInt | None, -) -> _CoefSeries: ... +def _pow(mul_f: _AnyMulF, c: _SeriesLikeObject_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _ObjectSeries: ... +@overload +def _pow(mul_f: _AnyMulF, c: _SeriesLikeCoef_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _CoefSeries: ... # keep in sync with `_polytypes._FuncFit` @overload -def _fit( # type: ignore[overload-overlap] +def _fit( vander_f: _AnyVanderF, x: _SeriesLikeFloat_co, y: _ArrayLikeFloat_co, @@ -522,5 +309,8 @@ def _fit( w: _SeriesLikeCoef_co | None = None, ) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... +# def _as_int(x: SupportsIndex, desc: str) -> int: ... + +# def format_float(x: _FloatLike_co, parens: bool = False) -> str: ... From b6811bab48feab327498b91095fc41ea6848a8d8 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 19:29:10 +0200 Subject: [PATCH 0740/1018] CI: Ignore stubtest distutils errors on py311 --- .spin/cmds.py | 4 +++- tools/stubtest/allowlist_py311.txt | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 tools/stubtest/allowlist_py311.txt diff --git a/.spin/cmds.py b/.spin/cmds.py index 925991c5c804..453faa77d932 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -561,7 +561,9 @@ def stubtest(*, concise: bool, build_dir: str) -> None: stubtest_dir = curdir.parent / 'tools' / 'stubtest' mypy_config = stubtest_dir / 'mypy.ini' allowlists = [stubtest_dir / 'allowlist.txt'] - if sys.version_info >= (3, 12): + if sys.version_info < (3, 12): + allowlists.append(stubtest_dir / 'allowlist_py311.txt') + else: allowlists.append(stubtest_dir / 'allowlist_py312.txt') cmd = [ diff --git a/tools/stubtest/allowlist_py311.txt b/tools/stubtest/allowlist_py311.txt new file mode 100644 index 000000000000..4413f164f582 --- /dev/null +++ b/tools/stubtest/allowlist_py311.txt @@ -0,0 +1,3 @@ +# python == 3.11.* + +numpy\.distutils\..* From 33d52b80c194264d8c2e7a5225100b9177f22349 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 23 Oct 2025 11:33:24 -0600 Subject: [PATCH 0741/1018] BUG: fix int left shift UB in CPU feature detection (#30058) --- numpy/_core/src/common/npy_cpu_features.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 46ac4a9d8362..91dafa96de0a 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -554,7 +554,9 @@ npy__cpu_init_features(void) // Skylake-X npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] = (reg[1] & (1 << 17)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] = (reg[1] & (1 << 30)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512VL] = (reg[1] & (1 << 31)) != 0; + // cast and use of unsigned int literal silences UBSan warning: + // "runtime error: left shift of 1 by 31 places cannot be represented in type 'int'" + npy__cpu_have[NPY_CPU_FEATURE_AVX512VL] = (reg[1] & (int)(1u << 31)) != 0; // Cascade Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI] = (reg[2] & (1 << 11)) != 0; // Cannon Lake From ca79fc7861e711a437639539ecfce149fe90de37 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 24 Oct 2025 03:12:08 +0200 Subject: [PATCH 0742/1018] TYP: ``polynomial.polyutils``: fix callable type signatures (#30060) --- numpy/polynomial/polyutils.pyi | 53 ++++++++++++++-------------------- 1 file changed, 22 insertions(+), 31 deletions(-) diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 5487bfa4c2ec..79ca317c12b0 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,5 +1,14 @@ from collections.abc import Callable, Iterable, Sequence -from typing import Any, Final, Literal, SupportsIndex, TypeAlias, TypeVar, overload +from typing import ( + Final, + Literal, + Protocol, + SupportsIndex, + TypeAlias, + TypeVar, + overload, + type_check_only, +) import numpy as np import numpy.typing as npt @@ -17,7 +26,6 @@ from ._polytypes import ( _ArrayLikeCoef_co, _CoefArray, _CoefLike_co, - _CoefObjectLike_co, _CoefSeries, _ComplexArray, _ComplexSeries, @@ -31,18 +39,23 @@ from ._polytypes import ( _SeriesLikeFloat_co, _SeriesLikeInt_co, _SeriesLikeObject_co, - _SupportsCoefOps, _Tuple2, ) __all__ = ["as_series", "format_float", "getdomain", "mapdomain", "mapparms", "trimcoef", "trimseq"] +_T = TypeVar("_T") _SeqT = TypeVar("_SeqT", bound=_CoefArray | Sequence[_CoefLike_co]) -_AnyValF: TypeAlias = Callable[[npt.ArrayLike, npt.ArrayLike, bool], _CoefArray] -_AnyLineF: TypeAlias = Callable[[_CoefLike_co, _CoefLike_co], _CoefArray] -_AnyMulF: TypeAlias = Callable[[npt.ArrayLike, npt.ArrayLike], _CoefArray] -_AnyVanderF: TypeAlias = Callable[[npt.ArrayLike, SupportsIndex], _CoefArray] +_AnyLineF: TypeAlias = Callable[[float, float], _CoefArray] +_AnyMulF: TypeAlias = Callable[[np.ndarray | list[int], np.ndarray], _CoefArray] +_AnyVanderF: TypeAlias = Callable[[np.ndarray, int], _CoefArray] + +@type_check_only +class _ValFunc(Protocol[_T]): + def __call__(self, x: np.ndarray, c: _T, /, *, tensor: bool = True) -> _T: ... + +### @overload def as_series(alist: npt.NDArray[np.integer] | _FloatArray, trim: bool = True) -> list[_FloatSeries]: ... @@ -206,32 +219,10 @@ def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeObject_co) def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeCoef_co) -> _CoefSeries: ... # keep in sync with `_gridnd` -@overload -def _valnd(val_f: _AnyValF, c: _SeriesLikeFloat_co, *args: _FloatLike_co) -> np.floating: ... -@overload -def _valnd(val_f: _AnyValF, c: _SeriesLikeComplex_co, *args: _NumberLike_co) -> np.complexfloating: ... -@overload -def _valnd(val_f: _AnyValF, c: _ArrayLikeFloat_co, *args: _ArrayLikeFloat_co) -> _FloatArray: ... -@overload -def _valnd(val_f: _AnyValF, c: _ArrayLikeComplex_co, *args: _ArrayLikeComplex_co) -> _ComplexArray: ... -@overload -def _valnd(val_f: _AnyValF, c: _SeriesLikeObject_co, *args: _CoefObjectLike_co) -> _SupportsCoefOps[Any]: ... -@overload -def _valnd(val_f: _AnyValF, c: _ArrayLikeCoef_co, *args: _ArrayLikeCoef_co) -> _ObjectArray: ... +def _valnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... # keep in sync with `_valnd` -@overload -def _gridnd(val_f: _AnyValF, c: _SeriesLikeFloat_co, *args: _FloatLike_co) -> np.floating: ... -@overload -def _gridnd(val_f: _AnyValF, c: _SeriesLikeComplex_co, *args: _NumberLike_co) -> np.complexfloating: ... -@overload -def _gridnd(val_f: _AnyValF, c: _ArrayLikeFloat_co, *args: _ArrayLikeFloat_co) -> _FloatArray: ... -@overload -def _gridnd(val_f: _AnyValF, c: _ArrayLikeComplex_co, *args: _ArrayLikeComplex_co) -> _ComplexArray: ... -@overload -def _gridnd(val_f: _AnyValF, c: _SeriesLikeObject_co, *args: _CoefObjectLike_co) -> _SupportsCoefOps[Any]: ... -@overload -def _gridnd(val_f: _AnyValF, c: _ArrayLikeCoef_co, *args: _ArrayLikeCoef_co) -> _ObjectArray: ... +def _gridnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... # keep in sync with `_polytypes._FuncBinOp` @overload From 852091a09cc58e2eddd41f713665e449a209b1fa Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 24 Oct 2025 03:12:54 +0200 Subject: [PATCH 0743/1018] TYP: ``polynomial``: Simplify ``chebpts{1,2}`` function stubs (#30065) --- numpy/polynomial/_polytypes.pyi | 4 ---- numpy/polynomial/chebyshev.pyi | 6 +++--- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 58482aea1be9..b5a603b6ca85 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -499,7 +499,3 @@ class _FuncWeight(Protocol): def __call__(self, /, x: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128]: ... @overload def __call__(self, /, x: _ArrayLikeCoef_co) -> _ObjectArray: ... - -@type_check_only -class _FuncPts(Protocol): - def __call__(self, /, npts: _AnyInt) -> _Series[np.float64]: ... diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index d70842020ebe..1cfb27829b2e 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,3 +1,4 @@ +from _typeshed import ConvertibleToInt from collections.abc import Callable, Iterable from typing import ( Any, @@ -29,7 +30,6 @@ from ._polytypes import ( _FuncLine, _FuncPoly2Ortho, _FuncPow, - _FuncPts, _FuncRoots, _FuncUnOp, _FuncVal, @@ -122,8 +122,8 @@ chebcompanion: Final[_FuncCompanion] = ... chebroots: Final[_FuncRoots] = ... chebgauss: Final[_FuncGauss] = ... chebweight: Final[_FuncWeight] = ... -chebpts1: Final[_FuncPts] = ... -chebpts2: Final[_FuncPts] = ... +def chebpts1(npts: ConvertibleToInt) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +def chebpts2(npts: ConvertibleToInt) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... # keep in sync with `Chebyshev.interpolate` (minus `domain` parameter) @overload From df32a985df1459e2e1ab4aab94d56341ccb24b37 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 24 Oct 2025 03:14:18 +0200 Subject: [PATCH 0744/1018] TYP: ``numpy.ma``: Annotate 27 functions related to masks and fill-values (#30067) * TYP: Annotate ``ma.make_mask()`` and ``ma.make_mask_descr()`` * TYP: Annotate ``ma.make_mask_none()`` * TYP: ``ma.core``: move some of the classes to the top (for the sake of sanity) * TYP: ``ma.core``: introduce a type alias for boolean mask arrays * TYP: Annotate ``ma.getmaskarray()`` * TYP: Annotate ``ma.getdata()`` * TYP: update type-tests for ``ma.getmask()`` to use the literal `False` bool type * TYP: Annotate ``ma.mask_or()`` * TYP: Annotate ``ma.flatten_mask()`` * TYP: Annotate ``ma.flatten_structured_array()`` * TYP: Annotate ``ma.masked_invalid()`` * TYP: Annotate ``ma.masked_where()`` * TYP: Annotate ``ma.masked_object()`` and ``ma.masked_values()`` * TYP: Annotate ``ma.fix_invalid()`` * TYP: Annotate ``ma.*_fill_value()`` --- numpy/ma/core.pyi | 414 +++++++++++++++++++++++--- numpy/typing/tests/data/reveal/ma.pyi | 10 +- 2 files changed, 373 insertions(+), 51 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index e3ef71adb09f..78114931cbac 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -91,6 +91,7 @@ from numpy._typing import ( _CharLike_co, _DTypeLike, _DTypeLikeBool, + _DTypeLikeVoid, _IntLike_co, _NestedSequence, _ScalarLike_co, @@ -281,6 +282,7 @@ __all__ = [ ] _ShapeT = TypeVar("_ShapeT", bound=_Shape) +_ShapeOrAnyT = TypeVar("_ShapeOrAnyT", bound=_Shape, default=_AnyShape) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) _DTypeT = TypeVar("_DTypeT", bound=dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) @@ -314,31 +316,19 @@ _ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsInd _ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None _ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +_NoMaskType: TypeAlias = np.bool_[Literal[False]] # type of `np.False_` +_MaskArray: TypeAlias = np.ndarray[_ShapeOrAnyT, np.dtype[np.bool_]] + ### -MaskType = bool_ -nomask: bool_[Literal[False]] +MaskType = np.bool_ + +nomask: Final[_NoMaskType] = ... class MaskedArrayFutureWarning(FutureWarning): ... class MAError(Exception): ... class MaskError(MAError): ... -def default_fill_value(obj): ... -def minimum_fill_value(obj): ... -def maximum_fill_value(obj): ... -def set_fill_value(a, fill_value): ... -def common_fill_value(a, b): ... -@overload -def filled(a: ndarray[_ShapeT_co, _DTypeT_co], fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... -@overload -def filled(a: _ArrayLike[_ScalarT_co], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT_co]: ... -@overload -def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Any]: ... -def getdata(a, subok=True): ... -get_data = getdata - -def fix_invalid(a, mask=..., copy=True, fill_value=None): ... - class _MaskedUFunc: f: Any __doc__: Any @@ -367,6 +357,15 @@ class _DomainedBinaryOperation(_MaskedUFunc): def __init__(self, dbfunc, domain, fillx=..., filly=...): ... def __call__(self, a, b, *args, **kwargs): ... +class _MaskedPrintOption: + def __init__(self, display): ... + def display(self): ... + def set_display(self, s): ... + def enabled(self): ... + def enable(self, shrink=1): ... + +masked_print_option: Final[_MaskedPrintOption] = ... + exp: _MaskedUnaryOperation conjugate: _MaskedUnaryOperation sin: _MaskedUnaryOperation @@ -422,50 +421,371 @@ remainder: _DomainedBinaryOperation fmod: _DomainedBinaryOperation mod: _DomainedBinaryOperation -def make_mask_descr(ndtype): ... +# `obj` can be anything (even `object()`), and is too "flexible", so we can't +# meaningfully annotate it, or its return type. +def default_fill_value(obj: object) -> Any: ... +def minimum_fill_value(obj: object) -> Any: ... +def maximum_fill_value(obj: object) -> Any: ... + +# +@overload # returns `a.fill_value` if `a` is a `MaskedArray` +def get_fill_value(a: _MaskedArray[_ScalarT]) -> _ScalarT: ... +@overload # otherwise returns `default_fill_value(a)` +def get_fill_value(a: object) -> Any: ... +# this is a noop if `a` isn't a `MaskedArray`, so we only accept `MaskedArray` input +def set_fill_value(a: MaskedArray, fill_value: _ScalarLike_co) -> None: ... + +# the return type depends on the *values* of `a` and `b` (which cannot be known +# statically), which is why we need to return an awkward `_ | None` +@overload +def common_fill_value(a: _MaskedArray[_ScalarT], b: MaskedArray) -> _ScalarT | None: ... @overload -def getmask(a: _ScalarLike_co) -> bool_: ... +def common_fill_value(a: object, b: object) -> Any: ... + +# keep in sync with `fix_invalid`, but return `ndarray` instead of `MaskedArray` @overload -def getmask(a: MaskedArray[_ShapeT_co, Any]) -> np.ndarray[_ShapeT_co, dtype[bool_]] | bool_: ... +def filled(a: ndarray[_ShapeT, _DTypeT], fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT, _DTypeT]: ... @overload -def getmask(a: ArrayLike) -> NDArray[bool_] | bool_: ... +def filled(a: _ArrayLike[_ScalarT], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT]: ... +@overload +def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Incomplete]: ... + +# keep in sync with `filled`, but return `MaskedArray` instead of `ndarray` +@overload +def fix_invalid( + a: np.ndarray[_ShapeT, _DTypeT], + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload +def fix_invalid( + a: _ArrayLike[_ScalarT], + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def fix_invalid( + a: ArrayLike, + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> _MaskedArray[Incomplete]: ... + +# +@overload +def getdata(a: np.ndarray[_ShapeT, _DTypeT], subok: bool = True) -> np.ndarray[_ShapeT, _DTypeT]: ... +@overload +def getdata(a: _ArrayLike[_ScalarT], subok: bool = True) -> NDArray[_ScalarT]: ... +@overload +def getdata(a: ArrayLike, subok: bool = True) -> NDArray[Incomplete]: ... + +get_data = getdata + +# +@overload +def getmask(a: _ScalarLike_co) -> _NoMaskType: ... +@overload +def getmask(a: MaskedArray[_ShapeT, Any]) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload +def getmask(a: ArrayLike) -> _MaskArray | _NoMaskType: ... get_mask = getmask -def getmaskarray(arr): ... +# like `getmask`, but instead of `nomask` returns `make_mask_none(arr, arr.dtype?)` +@overload +def getmaskarray(arr: _ScalarLike_co) -> _MaskArray[tuple[()]]: ... +@overload +def getmaskarray(arr: np.ndarray[_ShapeT, Any]) -> _MaskArray[_ShapeT]: ... # It's sufficient for `m` to have dtype with type: `type[np.bool_]`, # which isn't necessarily a ndarray. Please open an issue if this causes issues. def is_mask(m: object) -> TypeIs[NDArray[bool_]]: ... -def make_mask(m, copy=False, shrink=True, dtype=...): ... -def make_mask_none(newshape, dtype=None): ... -def mask_or(m1, m2, copy=False, shrink=True): ... -def flatten_mask(mask): ... -def masked_where(condition, a, copy=True): ... -def masked_greater(x, value, copy=True): ... -def masked_greater_equal(x, value, copy=True): ... -def masked_less(x, value, copy=True): ... -def masked_less_equal(x, value, copy=True): ... -def masked_not_equal(x, value, copy=True): ... -def masked_equal(x, value, copy=True): ... -def masked_inside(x, v1, v2, copy=True): ... -def masked_outside(x, v1, v2, copy=True): ... -def masked_object(x, value, copy=True, shrink=True): ... -def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): ... -def masked_invalid(a, copy=True): ... +# +@overload +def make_mask_descr(ndtype: _VoidDTypeLike) -> np.dtype[np.void]: ... +@overload +def make_mask_descr(ndtype: _DTypeLike[np.generic] | str | type) -> np.dtype[np.bool_]: ... -class _MaskedPrintOption: - def __init__(self, display): ... - def display(self): ... - def set_display(self, s): ... - def enabled(self): ... - def enable(self, shrink=1): ... +# +@overload # m is nomask +def make_mask( + m: _NoMaskType, + copy: bool = False, + shrink: bool = True, + dtype: _DTypeLikeBool = ..., +) -> _NoMaskType: ... +@overload # m: ndarray, shrink=True (default), dtype: bool-like (default) +def make_mask( + m: np.ndarray[_ShapeT], + copy: bool = False, + shrink: Literal[True] = True, + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload # m: ndarray, shrink=False (kwarg), dtype: bool-like (default) +def make_mask( + m: np.ndarray[_ShapeT], + copy: bool = False, + *, + shrink: Literal[False], + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[_ShapeT]: ... +@overload # m: ndarray, dtype: void-like +def make_mask( + m: np.ndarray[_ShapeT], + copy: bool = False, + shrink: bool = True, + *, + dtype: _DTypeLikeVoid, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload # m: array-like, shrink=True (default), dtype: bool-like (default) +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: Literal[True] = True, + dtype: _DTypeLikeBool = ..., +) -> _MaskArray | _NoMaskType: ... +@overload # m: array-like, shrink=False (kwarg), dtype: bool-like (default) +def make_mask( + m: ArrayLike, + copy: bool = False, + *, + shrink: Literal[False], + dtype: _DTypeLikeBool = ..., +) -> _MaskArray: ... +@overload # m: array-like, dtype: void-like +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: bool = True, + *, + dtype: _DTypeLikeVoid, +) -> NDArray[np.void]: ... +@overload # fallback +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: bool = True, + *, + dtype: DTypeLike = ..., +) -> NDArray[Incomplete] | _NoMaskType: ... -masked_print_option: _MaskedPrintOption +# +@overload # known shape, dtype: unstructured (default) +def make_mask_none(newshape: _ShapeT, dtype: np.dtype | type | str | None = None) -> _MaskArray[_ShapeT]: ... +@overload # known shape, dtype: structured +def make_mask_none(newshape: _ShapeT, dtype: _VoidDTypeLike) -> np.ndarray[_ShapeT, dtype[np.void]]: ... +@overload # unknown shape, dtype: unstructured (default) +def make_mask_none(newshape: _ShapeLike, dtype: np.dtype | type | str | None = None) -> _MaskArray: ... +@overload # unknown shape, dtype: structured +def make_mask_none(newshape: _ShapeLike, dtype: _VoidDTypeLike) -> NDArray[np.void]: ... -def flatten_structured_array(a): ... +# +@overload # nomask, scalar-like, shrink=True (default) +def mask_or( + m1: _NoMaskType | Literal[False], + m2: _ScalarLike_co, + copy: bool = False, + shrink: Literal[True] = True, +) -> _NoMaskType: ... +@overload # nomask, scalar-like, shrink=False (kwarg) +def mask_or( + m1: _NoMaskType | Literal[False], + m2: _ScalarLike_co, + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[tuple[()]]: ... +@overload # scalar-like, nomask, shrink=True (default) +def mask_or( + m1: _ScalarLike_co, + m2: _NoMaskType | Literal[False], + copy: bool = False, + shrink: Literal[True] = True, +) -> _NoMaskType: ... +@overload # scalar-like, nomask, shrink=False (kwarg) +def mask_or( + m1: _ScalarLike_co, + m2: _NoMaskType | Literal[False], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[tuple[()]]: ... +@overload # ndarray, ndarray | nomask, shrink=True (default) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + copy: bool = False, + shrink: Literal[True] = True, +) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload # ndarray, ndarray | nomask, shrink=False (kwarg) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[_ShapeT]: ... +@overload # ndarray | nomask, ndarray, shrink=True (default) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + copy: bool = False, + shrink: Literal[True] = True, +) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload # ndarray | nomask, ndarray, shrink=False (kwarg) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[_ShapeT]: ... + +# +@overload +def flatten_mask(mask: np.ndarray[_ShapeT]) -> _MaskArray[_ShapeT]: ... +@overload +def flatten_mask(mask: ArrayLike) -> _MaskArray: ... + +# NOTE: we currently don't know the field types of `void` dtypes, so it's not possible +# to know the output dtype of the returned array. +@overload +def flatten_structured_array(a: MaskedArray[_ShapeT, np.dtype[np.void]]) -> MaskedArray[_ShapeT]: ... +@overload +def flatten_structured_array(a: np.ndarray[_ShapeT, np.dtype[np.void]]) -> np.ndarray[_ShapeT]: ... +@overload # for some reason this accepts unstructured array-likes, hence this fallback overload +def flatten_structured_array(a: ArrayLike) -> np.ndarray: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_invalid(a: ndarray[_ShapeT, _DTypeT], copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_invalid(a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_invalid(a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # array-like of known scalar-type +def masked_where( + condition: _ArrayLikeBool_co, a: ndarray[_ShapeT, _DTypeT], copy: bool = True +) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_where(condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_where(condition: _ArrayLikeBool_co, a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_greater(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_greater(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_greater(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_greater_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_greater_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_greater_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_less(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_less(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_less(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_less_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_less_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_less_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_not_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_not_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_not_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_inside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_inside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_inside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_outside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_outside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_outside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# only intended for object arrays, so we assume that's how it's always used in practice +@overload +def masked_object( + x: np.ndarray[_ShapeT, np.dtype[np.object_]], + value: object, + copy: bool = True, + shrink: bool = True, +) -> MaskedArray[_ShapeT, np.dtype[np.object_]]: ... +@overload +def masked_object( + x: _ArrayLikeObject_co, + value: object, + copy: bool = True, + shrink: bool = True, +) -> _MaskedArray[np.object_]: ... + +# keep roughly in sync with `filled` +@overload +def masked_values( + x: np.ndarray[_ShapeT, _DTypeT], + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True +) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload +def masked_values( + x: _ArrayLike[_ScalarT], + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True +) -> _MaskedArray[_ScalarT]: ... +@overload +def masked_values( + x: ArrayLike, + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True +) -> _MaskedArray[Incomplete]: ... # TODO: Support non-boolean mask dtypes, such as `np.void`. This will require adding an # additional generic type parameter to (at least) `MaskedArray` and `MaskedIterator` to diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index a6857ef0a3dd..e47e690ff0e7 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -6,7 +6,9 @@ from numpy._typing import NDArray, _AnyShape _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) + MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] +_NoMaskType: TypeAlias = np.bool[Literal[False]] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] class MaskedArraySubclass(MaskedArray[_ScalarT_co]): ... @@ -347,13 +349,13 @@ assert_type(np.ma.allclose(AR_f4, MAR_f4, rtol=.4, atol=.3), bool) assert_type(MAR_2d_f4.ravel(), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) assert_type(MAR_1d.ravel(order="A"), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) -assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | np.bool) +assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | _NoMaskType) # PyRight detects this one correctly, but mypy doesn't: # `Revealed type is "Union[numpy.ndarray[Any, Any], numpy.bool[Any]]"` assert_type(np.ma.getmask(MAR_1d), np.ndarray[tuple[int], np.dtype[np.bool]] | np.bool) # type: ignore[assert-type] -assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | np.bool) -assert_type(np.ma.getmask([1, 2]), NDArray[np.bool] | np.bool) -assert_type(np.ma.getmask(np.int64(1)), np.bool) +assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | _NoMaskType) +assert_type(np.ma.getmask([1, 2]), NDArray[np.bool] | _NoMaskType) +assert_type(np.ma.getmask(np.int64(1)), _NoMaskType) assert_type(np.ma.is_mask(MAR_1d), bool) assert_type(np.ma.is_mask(AR_b), bool) From 1d053b3482b178ed057474402ae94c80701796e0 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Fri, 24 Oct 2025 11:17:29 -0400 Subject: [PATCH 0745/1018] TST: Remove unneeded test__datasource thread-unsafe markers (#30064) --- numpy/lib/tests/test__datasource.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py index 4c866511a5de..2dd19410bbf0 100644 --- a/numpy/lib/tests/test__datasource.py +++ b/numpy/lib/tests/test__datasource.py @@ -88,7 +88,6 @@ def invalid_httpfile(): return http_fakefile -@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestDataSourceOpen: def test_ValidHTTP(self, tmp_path): ds = datasource.DataSource(tmp_path) @@ -157,7 +156,6 @@ def test_ValidBz2File(self, tmp_path): assert_equal(magic_line, result) -@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestDataSourceExists: def test_ValidHTTP(self, tmp_path): ds = datasource.DataSource(tmp_path) @@ -184,7 +182,6 @@ def test_InvalidFile(self, tmp_path): assert_equal(ds.exists(tmpfile), False) -@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestDataSourceAbspath: def test_ValidHTTP(self, tmp_path): ds = datasource.DataSource(tmp_path) @@ -247,7 +244,6 @@ def test_windows_os_sep(self, tmp_path): os.sep = orig_os_sep -@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestRepositoryAbspath: def test_ValidHTTP(self, tmp_path): repos = datasource.Repository(valid_baseurl(), tmp_path) @@ -275,7 +271,6 @@ def test_windows_os_sep(self, tmp_path): os.sep = orig_os_sep -@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestRepositoryExists: def test_ValidFile(self, tmp_path): # Create local temp file @@ -305,7 +300,6 @@ def test_CachedHTTPFile(self, tmp_path): assert_(repos.exists(tmpfile)) -@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestOpenFunc: def test_DataSourceOpen(self, tmp_path): local_file = valid_textfile(tmp_path) From 07ef0177960daf11bd66541077a7bc98d76b883a Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Fri, 24 Oct 2025 14:36:42 -0700 Subject: [PATCH 0746/1018] DOC: Correct a typo: an 1d -> a 1d [skip actions][skip azp][skip cirrus] --- numpy/lib/_function_base_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 9d5553cc7a34..b712269dda9c 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -706,7 +706,7 @@ def piecewise(x, condlist, funclist, *args, **kw): is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding - condition is True. It should take a 1d array as input and give an 1d + condition is True. It should take a 1d array as input and give a 1d array or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. From 41cebd0f891f54d3090d4544d3f7a932ec0d4782 Mon Sep 17 00:00:00 2001 From: abhi210 <27881020+Abhi210@users.noreply.github.com> Date: Fri, 24 Oct 2025 12:59:15 +0530 Subject: [PATCH 0747/1018] This PR reference #11521. It removes deprecated functionality and cleans up outdated code paths: - Removed the deprecated `style` parameter from `array2string` in `numpy/_core/arrayprint.py`, along with its related documentation and tests. - Replaced the deprecation warning for `np.sum` called on generators with a `TypeError` in `numpy/_core/fromnumeric.py`, since this behavior is now unsupported. - Removed a leftover docstring that was intended to be deleted in PR #26268. --- .../upcoming_changes/30068.expired.rst | 8 ++ numpy/_core/arrayprint.py | 18 +---- numpy/_core/arrayprint.pyi | 79 ------------------- numpy/_core/fromnumeric.py | 27 +------ numpy/_core/tests/test_arrayprint.py | 5 -- numpy/_core/tests/test_deprecations.py | 6 -- numpy/typing/tests/data/fail/arrayprint.pyi | 11 ++- 7 files changed, 19 insertions(+), 135 deletions(-) create mode 100644 doc/release/upcoming_changes/30068.expired.rst diff --git a/doc/release/upcoming_changes/30068.expired.rst b/doc/release/upcoming_changes/30068.expired.rst new file mode 100644 index 000000000000..b651371f7061 --- /dev/null +++ b/doc/release/upcoming_changes/30068.expired.rst @@ -0,0 +1,8 @@ +``numpy.array2string`` and ``numpy.sum`` deprecations finalized +--------------------------------------------- + +The following long-deprecated APIs have been removed or converted to errors: + +* The ``style`` parameter has been removed from ``numpy.array2string``. This argument had no effect since Numpy 1.14.0. + +* Calling ``np.sum(generator)`` directly on a generator object now raises a `TypeError`. This behavior was deprecated in NumPy 1.15.0. Use ``np.sum(np.fromiter(generator))`` or the python ``sum`` builtin instead. diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 9eda8db40dfc..ec17d3063c09 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -619,7 +619,7 @@ def _array2string(a, options, separator=' ', prefix=""): def _array2string_dispatcher( a, max_line_width=None, precision=None, suppress_small=None, separator=None, prefix=None, - style=None, formatter=None, threshold=None, + formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix=None, *, legacy=None): return (a,) @@ -628,7 +628,7 @@ def _array2string_dispatcher( @array_function_dispatch(_array2string_dispatcher, module='numpy') def array2string(a, max_line_width=None, precision=None, suppress_small=None, separator=' ', prefix="", - style=np._NoValue, formatter=None, threshold=None, + formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix="", *, legacy=None): """ @@ -663,10 +663,6 @@ def array2string(a, max_line_width=None, precision=None, wrapping is forced at the column ``max_line_width - len(suffix)``. It should be noted that the content of prefix and suffix strings are not included in the output. - style : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.14.0 formatter : dict of callables, optional If not None, the keys should indicate the type(s) that the respective formatting function applies to. Callables should return a string. @@ -786,16 +782,8 @@ def array2string(a, max_line_width=None, precision=None, options.update(overrides) if options['legacy'] <= 113: - if style is np._NoValue: - style = repr - if a.shape == () and a.dtype.names is None: - return style(a.item()) - elif style is not np._NoValue: - # Deprecation 11-9-2017 v1.14 - warnings.warn("'style' argument is deprecated and no longer functional" - " except in 1.13 'legacy' mode", - DeprecationWarning, stacklevel=2) + return repr(a.item()) if options['legacy'] > 113: options['linewidth'] -= len(suffix) diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 57e2e1248c5e..6834565da97f 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -10,13 +10,10 @@ from typing import ( SupportsIndex, TypeAlias, TypedDict, - overload, type_check_only, ) -from typing_extensions import deprecated import numpy as np -from numpy._globals import _NoValueType from numpy._typing import NDArray, _CharLike_co, _FloatLike_co __all__ = [ @@ -94,7 +91,6 @@ def set_printoptions( def get_printoptions() -> _FormatOptions: ... # public numpy export -@overload # no style def array2string( a: NDArray[Any], max_line_width: int | None = None, @@ -102,7 +98,6 @@ def array2string( suppress_small: bool | None = None, separator: str = " ", prefix: str = "", - style: _NoValueType = ..., formatter: _FormatDict | None = None, threshold: int | None = None, edgeitems: int | None = None, @@ -112,80 +107,6 @@ def array2string( *, legacy: _Legacy | None = None, ) -> str: ... -@overload # style= (positional), legacy="1.13" -def array2string( - a: NDArray[Any], - max_line_width: int | None, - precision: SupportsIndex | None, - suppress_small: bool | None, - separator: str, - prefix: str, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - *, - legacy: Literal["1.13"], -) -> str: ... -@overload # style= (keyword), legacy="1.13" -def array2string( - a: NDArray[Any], - max_line_width: int | None = None, - precision: SupportsIndex | None = None, - suppress_small: bool | None = None, - separator: str = " ", - prefix: str = "", - *, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - legacy: Literal["1.13"], -) -> str: ... -@overload # style= (positional), legacy!="1.13" -@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") -def array2string( - a: NDArray[Any], - max_line_width: int | None, - precision: SupportsIndex | None, - suppress_small: bool | None, - separator: str, - prefix: str, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - *, - legacy: _LegacyNoStyle | None = None, -) -> str: ... -@overload # style= (keyword), legacy="1.13" -@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") -def array2string( - a: NDArray[Any], - max_line_width: int | None = None, - precision: SupportsIndex | None = None, - suppress_small: bool | None = None, - separator: str = " ", - prefix: str = "", - *, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - legacy: _LegacyNoStyle | None = None, -) -> str: ... def format_float_scientific( x: _FloatLike_co, diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 90493ef77626..33fb9ec4b39f 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -4,7 +4,6 @@ import functools import math import types -import warnings import numpy as np from numpy._utils import set_module @@ -755,8 +754,6 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): provided with a sequence of k-th it will partition all elements indexed by k-th of them into their sorted position at once. - .. deprecated:: 1.22.0 - Passing booleans as index is deprecated. axis : int or None, optional Axis along which to sort. If None, the array is flattened before sorting. The default is -1, which sorts along the last axis. @@ -868,8 +865,6 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): sequence of k-th it will partition all of them into their sorted position at once. - .. deprecated:: 1.22.0 - Passing booleans as index is deprecated. axis : int or None, optional Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used. @@ -2008,15 +2003,6 @@ def nonzero(a): To group the indices by element, rather than dimension, use `argwhere`, which returns a row for each non-zero element. - .. note:: - - When called on a zero-d array or scalar, ``nonzero(a)`` is treated - as ``nonzero(atleast_1d(a))``. - - .. deprecated:: 1.17.0 - - Use `atleast_1d` explicitly if this behavior is deliberate. - Parameters ---------- a : array_like @@ -2430,19 +2416,12 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, """ if isinstance(a, _gentype): # 2018-02-25, 1.15.0 - warnings.warn( - "Calling np.sum(generator) is deprecated, and in the future will " - "give a different result. Use np.sum(np.fromiter(generator)) or " + raise TypeError( + "Calling np.sum(generator) is deprecated." + "Use np.sum(np.fromiter(generator)) or " "the python sum builtin instead.", - DeprecationWarning, stacklevel=2 ) - res = _sum_(a) - if out is not None: - out[...] = res - return out - return res - return _wrapreduction( a, np.add, 'sum', axis, dtype, out, keepdims=keepdims, initial=initial, where=where diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index bad1aae78d5d..009a42fdb1b9 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -735,11 +735,6 @@ def test_0d_arrays(self): # str is unaffected assert_equal(str(x), "1") - # check `style` arg raises - pytest.warns(DeprecationWarning, np.array2string, - np.array(1.), style=repr) - # but not in legacy mode - np.array2string(np.array(1.), style=repr, legacy='1.13') # gh-10934 style was broken in legacy mode, check it works np.array2string(np.array(1.), legacy='1.13') diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 37fee7705504..7cb1fee9b890 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -145,12 +145,6 @@ def test_bincount_bad_list(self, badlist): self.assert_deprecated(lambda: np.bincount(badlist)) -class TestGeneratorSum(_DeprecationTestCase): - # 2018-02-25, 1.15.0 - def test_generator_sum(self): - self.assert_deprecated(np.sum, args=((i for i in range(5)),)) - - class BuiltInRoundComplexDType(_DeprecationTestCase): # 2020-03-31 1.19.0 deprecated_types = [np.csingle, np.cdouble, np.clongdouble] diff --git a/numpy/typing/tests/data/fail/arrayprint.pyi b/numpy/typing/tests/data/fail/arrayprint.pyi index 224a4105b8a6..3c9dc9330a2b 100644 --- a/numpy/typing/tests/data/fail/arrayprint.pyi +++ b/numpy/typing/tests/data/fail/arrayprint.pyi @@ -8,9 +8,8 @@ AR: npt.NDArray[np.float64] func1: Callable[[Any], str] func2: Callable[[np.integer], str] -np.array2string(AR, style=None) # type: ignore[call-overload] -np.array2string(AR, legacy="1.14") # type: ignore[call-overload] -np.array2string(AR, sign="*") # type: ignore[call-overload] -np.array2string(AR, floatmode="default") # type: ignore[call-overload] -np.array2string(AR, formatter={"A": func1}) # type: ignore[call-overload] -np.array2string(AR, formatter={"float": func2}) # type: ignore[call-overload] +np.array2string(AR, legacy="1.14") # type: ignore[arg-type] +np.array2string(AR, sign="*") # type: ignore[arg-type] +np.array2string(AR, floatmode="default") # type: ignore[arg-type] +np.array2string(AR, formatter={"A": func1}) # type: ignore[arg-type] +np.array2string(AR, formatter={"float": func2}) # type: ignore[arg-type] From 9e42499c6088e895b99fb593a2d4b194948d62fb Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sat, 25 Oct 2025 03:43:48 -0400 Subject: [PATCH 0748/1018] DOC: Fix a couple typos in generalized-ufuncs.rst. (#30074) [skip actions] [skip azp] [skip cirrus] --- doc/source/reference/c-api/generalized-ufuncs.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/c-api/generalized-ufuncs.rst b/doc/source/reference/c-api/generalized-ufuncs.rst index b4750688b5e6..c55336092c71 100644 --- a/doc/source/reference/c-api/generalized-ufuncs.rst +++ b/doc/source/reference/c-api/generalized-ufuncs.rst @@ -239,7 +239,7 @@ In this case, the ufunc author might define the function like this: .. code-block:: c - int minmax_process_core_dims(PyUFuncObject ufunc, + int minmax_process_core_dims(PyUFuncObject *ufunc, npy_intp *core_dim_sizes) { npy_intp n = core_dim_sizes[0]; @@ -267,7 +267,7 @@ dimension size will result in an exception being raised. With the can set the output size to whatever is appropriate for the ufunc. In the array passed to the "hook" function, core dimensions that -were not determined by the input are indicating by having the value -1 +were not determined by the input are indicated by having the value -1 in the ``core_dim_sizes`` array. The function can replace the -1 with whatever value is appropriate for the ufunc, based on the core dimensions that occurred in the input arrays. From 8afe11889d9216807cfa0644991f36df31279e64 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sat, 25 Oct 2025 10:33:14 -0600 Subject: [PATCH 0749/1018] BUG: avoid negating INT_MIN in PyArray_Round implementation (#30071) --- numpy/_core/src/multiarray/calculation.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/calculation.c b/numpy/_core/src/multiarray/calculation.c index 0d855281d57a..b95b37987f8e 100644 --- a/numpy/_core/src/multiarray/calculation.c +++ b/numpy/_core/src/multiarray/calculation.c @@ -652,7 +652,15 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) else { op1 = n_ops.true_divide; op2 = n_ops.multiply; - decimals = -decimals; + if (decimals == INT_MIN) { + // not technically correct but it doesn't matter because no one in + // this millenium is using floating point numbers with enough + // accuracy for this to matter + decimals = INT_MAX; + } + else { + decimals = -decimals; + } } if (!out) { if (PyArray_ISINTEGER(a)) { From 0959a545c464211191686120dad8a6e38453d84c Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Sun, 26 Oct 2025 12:30:31 +0200 Subject: [PATCH 0750/1018] BUG: prefer passing a pointer to the helper function to avoid punning aliasing (#30077) --- .../multiarray/lowlevel_strided_loops.c.src | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 368d299d5d01..050207ea188c 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -958,7 +958,9 @@ npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ * Check various modes of failure to accurately cast src_value to dst */ static GCC_CAST_OPT_LEVEL int -@prefix@_check_same_value_@name1@_to_@name2@(@rtype1@ src_value) { +@prefix@_check_same_value_@name1@_to_@name2@(@rtype1@ *src_valueP) { + + @rtype1@ src_value = *src_valueP; /* 1. NaN/Infs always work for float to float and otherwise never */ #if (@is_float1@ || @is_emu_half1@ || @is_double1@ || @is_native_half1@) @@ -1097,10 +1099,10 @@ static GCC_CAST_OPT_LEVEL int dst_value[0] = _CONVERT_FN(src_value[0]); dst_value[1] = _CONVERT_FN(src_value[1]); # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[0]) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[0]) < 0) { return -1; } - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[1]) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[1]) < 0) { return -1; } # endif //same_value @@ -1110,7 +1112,7 @@ static GCC_CAST_OPT_LEVEL int # else dst_value = _CONVERT_FN(src_value[0]); # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[0]) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[0]) < 0) { return -1; } if (src_value[1] != 0) { @@ -1125,7 +1127,7 @@ static GCC_CAST_OPT_LEVEL int # else *(_TYPE2 *)dst = _CONVERT_FN(src_value[0]); # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[0]) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[0]) < 0) { return -1; } if (src_value[1] != 0) { @@ -1140,14 +1142,14 @@ static GCC_CAST_OPT_LEVEL int # if !@aligned@ dst_value[0] = _CONVERT_FN(src_value); # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value) < 0) { return -1; } # endif //same_value # else //!aligned dst_value[0] = _CONVERT_FN(*(_TYPE1 *)src); # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)src) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)src) < 0) { return -1; } # endif //same_value @@ -1157,7 +1159,7 @@ static GCC_CAST_OPT_LEVEL int dst_value = _CONVERT_FN(src_value); # if !@is_bool2@ # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value) < 0) { return -1; } # endif //same_value @@ -1166,7 +1168,7 @@ static GCC_CAST_OPT_LEVEL int *(_TYPE2 *)dst = _CONVERT_FN(*(_TYPE1 *)src); # if !@is_bool2@ # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*((@rtype1@ *)src)) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@(((@rtype1@ *)src)) < 0) { return -1; } # endif //same_value From 0218a25f39d957a39c7f392cfefb97a143f2ef5c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 12:30:04 -0600 Subject: [PATCH 0751/1018] MAINT: Bump actions/upload-artifact from 4.6.2 to 5.0.0 (#30080) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.6.2 to 5.0.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/ea165f8d65b6e75b540449e92b4886f43607fa02...330a01c490aca151604b8cf639adc76d48f6c5d4) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cygwin.yml | 2 +- .github/workflows/mypy_primer.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 153f18d2956d..c010cd5c9783 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -67,7 +67,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 8dfeac05d4ba..d8e4a7eb5817 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -74,7 +74,7 @@ jobs: run: | echo ${{ github.event.pull_request.number }} | tee pr_number.txt - name: Upload mypy_primer diff + PR number - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: ${{ matrix.shard-index == 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -82,7 +82,7 @@ jobs: diff_${{ matrix.shard-index }}.txt pr_number.txt - name: Upload mypy_primer diff - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: ${{ matrix.shard-index != 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -96,7 +96,7 @@ jobs: contents: read steps: - name: Merge artifacts - uses: actions/upload-artifact/merge@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact/merge@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: mypy_primer_diffs pattern: mypy_primer_diffs-* diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 9587fc4e026d..5e31a49b08bd 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index e53e4bbefc57..8049b79edef0 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -98,7 +98,7 @@ jobs: env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl From b1b9bbd5882c5cb17547d4b1254e6fddb9f87d12 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 19:38:33 +0000 Subject: [PATCH 0752/1018] MAINT: Bump astral-sh/setup-uv from 7.1.1 to 7.1.2 Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.1.1 to 7.1.2. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/2ddd2b9cb38ad8efd50337e8ab201519a34c9f24...85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.1.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy.yml | 2 +- .github/workflows/stubtest.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 753b75330aea..5fbc4afba8e9 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -58,7 +58,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@2ddd2b9cb38ad8efd50337e8ab201519a34c9f24 # v7.1.1 + - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 13554bfc427c..f5b2fbfbbbae 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@2ddd2b9cb38ad8efd50337e8ab201519a34c9f24 # v7.1.1 + - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2 with: python-version: ${{ matrix.py }} activate-environment: true From 46503eec3f0c8ffd7ff59edb2637e2f51225e864 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 13:53:50 -0600 Subject: [PATCH 0753/1018] MAINT: Bump github/codeql-action from 4.30.9 to 4.31.0 (#30081) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.30.9 to 4.31.0. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/16140ae1a102900babc80a33c44059580f687047...4e94bd11f71e507f7f87df81788dff88d1dacbfb) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 4c033a26e15c..b147f3e78c1a 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@16140ae1a102900babc80a33c44059580f687047 # v4.30.9 + uses: github/codeql-action/init@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@16140ae1a102900babc80a33c44059580f687047 # v4.30.9 + uses: github/codeql-action/autobuild@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@16140ae1a102900babc80a33c44059580f687047 # v4.30.9 + uses: github/codeql-action/analyze@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 5e31a49b08bd..e0d5de712c0f 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@16140ae1a102900babc80a33c44059580f687047 # v2.1.27 + uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v2.1.27 with: sarif_file: results.sarif From 6cf1fddea79029c238dbd3c165b45132d2f1f867 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Mon, 27 Oct 2025 19:16:46 -0400 Subject: [PATCH 0754/1018] DOC: Fix the first small 'process_core_dims()' example. (#30083) [skip actions] [skip azp] [skip cirrus] --- doc/source/reference/c-api/generalized-ufuncs.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/c-api/generalized-ufuncs.rst b/doc/source/reference/c-api/generalized-ufuncs.rst index c55336092c71..755ab2141cbd 100644 --- a/doc/source/reference/c-api/generalized-ufuncs.rst +++ b/doc/source/reference/c-api/generalized-ufuncs.rst @@ -244,8 +244,9 @@ In this case, the ufunc author might define the function like this: { npy_intp n = core_dim_sizes[0]; if (n == 0) { - PyExc_SetString("minmax requires the core dimension " - "to be at least 1."); + PyErr_SetString(PyExc_ValueError, + "minmax requires the core dimension to " + "be at least 1."); return -1; } return 0; From a3a2f2fc2283f0b643e65b38b28592b63a9a786d Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Tue, 28 Oct 2025 06:29:19 -0400 Subject: [PATCH 0755/1018] MAINT: replace use of asanyarray with ... to keep arrays (#29951) With gh-28576, it has become possible to ensure that when calling a ufunc, the output is guaranteed to be an array. This PR uses that to replace np.asanyarray calls in some functions. I'm not sure this is complete -- these were just parts of code for which test failed if np.anyarray(scalar) is made to return a read-only array (see gh-29876). I do think there are all small improvements, though. --- numpy/_core/_methods.py | 13 ++++++------- numpy/lib/_function_base_impl.py | 10 +++++----- numpy/lib/_ufunclike_impl.py | 2 +- numpy/lib/tests/test_function_base.py | 2 +- numpy/linalg/_linalg.py | 8 ++------ 5 files changed, 15 insertions(+), 20 deletions(-) diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index b013fd952beb..1c29831bca20 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -185,15 +185,14 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, # Compute sum of squared deviations from mean # Note that x may not be inexact and that we need it to be an array, # not a scalar. - x = asanyarray(arr - arrmean) - + x = um.subtract(arr, arrmean, out=...) if issubclass(arr.dtype.type, (nt.floating, nt.integer)): - x = um.multiply(x, x, out=x) + x = um.square(x, out=x) # Fast-paths for built-in complex types - elif x.dtype in _complex_to_float: - xv = x.view(dtype=(_complex_to_float[x.dtype], (2,))) - um.multiply(xv, xv, out=xv) - x = um.add(xv[..., 0], xv[..., 1], out=x.real).real + elif (_float_dtype := _complex_to_float.get(x.dtype)) is not None: + xv = x.view(dtype=(_float_dtype, (2,))) + um.square(xv, out=xv) + x = um.add(xv[..., 0], xv[..., 1], out=x.real) # Most general case; includes handling object arrays containing imaginary # numbers and complex types with non-native byteorder else: diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index b712269dda9c..29288388f9ed 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -41,6 +41,7 @@ arctan2, cos, exp, + floor, frompyfunc, less_equal, minimum, @@ -4568,9 +4569,8 @@ def _lerp(a, b, t, out=None): out : array_like Output array. """ - diff_b_a = subtract(b, a) - # asanyarray is a stop-gap until gh-13105 - lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out)) + diff_b_a = b - a + lerp_interpolation = add(a, diff_b_a * t, out=... if out is None else out) subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5, casting='unsafe', dtype=type(lerp_interpolation.dtype)) if lerp_interpolation.ndim == 0 and out is None: @@ -4662,8 +4662,8 @@ def _get_indexes(arr, virtual_indexes, valid_values_count): (previous_indexes, next_indexes): Tuple A Tuple of virtual_indexes neighbouring indexes """ - previous_indexes = np.asanyarray(np.floor(virtual_indexes)) - next_indexes = np.asanyarray(previous_indexes + 1) + previous_indexes = floor(virtual_indexes, out=...) + next_indexes = add(previous_indexes, 1, out=...) indexes_above_bounds = virtual_indexes >= valid_values_count - 1 # When indexes is above max index, take the max value of the array if indexes_above_bounds.any(): diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 695aab1b8922..79cec5aa08b6 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -57,7 +57,7 @@ def fix(x, out=None): """ # promote back to an array if flattened - res = nx.asanyarray(nx.ceil(x, out=out)) + res = nx.ceil(x, out=... if out is None else out) res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) # when no out argument is passed and no subclasses are involved, flatten diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 1d4026d8562f..57228e363e71 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -3925,7 +3925,7 @@ def test_fraction(self): q = np.quantile(x, .5) assert_equal(q, 1.75) - assert_equal(type(q), np.float64) + assert isinstance(q, float) q = np.quantile(x, Fraction(1, 2)) assert_equal(q, Fraction(7, 4)) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 8dde643781b8..c3cbf3d0ef98 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -2016,18 +2016,14 @@ def cond(x, p=None): r = r.astype(result_t, copy=False) # Convert nans to infs unless the original array had nan entries - r = asarray(r) nan_mask = isnan(r) if nan_mask.any(): nan_mask &= ~isnan(x).any(axis=(-2, -1)) if r.ndim > 0: r[nan_mask] = inf elif nan_mask: - r[()] = inf - - # Convention is to return scalars instead of 0d arrays - if r.ndim == 0: - r = r[()] + # Convention is to return scalars instead of 0d arrays. + r = r.dtype.type(inf) return r From 2d73f33a0263584f1966be6ebb843be571ed47b4 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 28 Oct 2025 17:56:42 +0100 Subject: [PATCH 0756/1018] TYP: ``numpy.ma``: Annotate the callable wrapper classes (#30084) * TYP: stub ``ma.isMaskedArray`` * TYP: stub (subclasses of) `ma.core._MaskedUFunc` * TYP: work around false postiive stubtest errors for `ma.isarray` and `ma.isMA` * TYP: stub `ma.masked_print_option` * TYP: stub `ma.core._frommethod` (partially) * TYP: "dot" the remaining callable wrapper instances --- numpy/ma/core.pyi | 391 +++++++++++++++++++++++++++++----------------- 1 file changed, 244 insertions(+), 147 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 78114931cbac..ae66384098d8 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2,9 +2,10 @@ import datetime as dt from _typeshed import Incomplete -from collections.abc import Sequence +from collections.abc import Callable, Sequence from typing import ( Any, + Concatenate, Final, Generic, Literal, @@ -16,9 +17,10 @@ from typing import ( SupportsIndex, SupportsInt, TypeAlias, + final, overload, ) -from typing_extensions import TypeIs, TypeVar, override +from typing_extensions import ParamSpec, TypeIs, TypeVar, override import numpy as np from numpy import ( @@ -292,6 +294,15 @@ _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) +_UFuncT_co = TypeVar( + "_UFuncT_co", + # the `| Callable` simplifies self-binding to the ufunc's callable signature + bound=np.ufunc | Callable[..., object], + default=np.ufunc, + covariant=True, +) +_Pss = ParamSpec("_Pss") +_T = TypeVar("_T") _Ignored: TypeAlias = object @@ -319,6 +330,10 @@ _ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | numbe _NoMaskType: TypeAlias = np.bool_[Literal[False]] # type of `np.False_` _MaskArray: TypeAlias = np.ndarray[_ShapeOrAnyT, np.dtype[np.bool_]] +_FillValue: TypeAlias = complex | None # int | float | complex | None +_FillValueCallable: TypeAlias = Callable[[np.dtype | ArrayLike], _FillValue] +_DomainCallable: TypeAlias = Callable[..., NDArray[np.bool_]] + ### MaskType = np.bool_ @@ -329,97 +344,171 @@ class MaskedArrayFutureWarning(FutureWarning): ... class MAError(Exception): ... class MaskError(MAError): ... -class _MaskedUFunc: - f: Any - __doc__: Any - __name__: Any - def __init__(self, ufunc): ... - -class _MaskedUnaryOperation(_MaskedUFunc): - fill: Any - domain: Any - def __init__(self, mufunc, fill=..., domain=...): ... - def __call__(self, a, *args, **kwargs): ... - -class _MaskedBinaryOperation(_MaskedUFunc): - fillx: Any - filly: Any - def __init__(self, mbfunc, fillx=..., filly=...): ... - def __call__(self, a, b, *args, **kwargs): ... - def reduce(self, target, axis=0, dtype=None): ... - def outer(self, a, b): ... - def accumulate(self, target, axis=0): ... - -class _DomainedBinaryOperation(_MaskedUFunc): - domain: Any - fillx: Any - filly: Any - def __init__(self, dbfunc, domain, fillx=..., filly=...): ... - def __call__(self, a, b, *args, **kwargs): ... +# not generic at runtime +class _MaskedUFunc(Generic[_UFuncT_co]): + f: _UFuncT_co # readonly + def __init__(self, /, ufunc: _UFuncT_co) -> None: ... + +# not generic at runtime +class _MaskedUnaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + fill: Final[_FillValue] + domain: Final[_DomainCallable | None] + + def __init__(self, /, mufunc: _UFuncT_co, fill: _FillValue = 0, domain: _DomainCallable | None = None) -> None: ... + + # NOTE: This might not work with overloaded callable signatures might not work on + # pyright, which is a long-standing issue, and is unique to pyright: + # https://github.com/microsoft/pyright/issues/9663 + # https://github.com/microsoft/pyright/issues/10849 + # https://github.com/microsoft/pyright/issues/10899 + # https://github.com/microsoft/pyright/issues/11049 + def __call__( + self: _MaskedUnaryOperation[Callable[Concatenate[Any, _Pss], _T]], + /, + a: ArrayLike, + *args: _Pss.args, + **kwargs: _Pss.kwargs, + ) -> _T: ... + +# not generic at runtime +class _MaskedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + fillx: Final[_FillValue] + filly: Final[_FillValue] + + def __init__(self, /, mbfunc: _UFuncT_co, fillx: _FillValue = 0, filly: _FillValue = 0) -> None: ... + + # NOTE: See the comment in `_MaskedUnaryOperation.__call__` + def __call__( + self: _MaskedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + /, + a: ArrayLike, + b: ArrayLike, + *args: _Pss.args, + **kwargs: _Pss.kwargs, + ) -> _T: ... + + # NOTE: We cannot meaningfully annotate the return (d)types of these methods until + # the signatures of the corresponding `numpy.ufunc` methods are specified. + def reduce(self, /, target: ArrayLike, axis: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Incomplete: ... + def outer(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + def accumulate(self, /, target: ArrayLike, axis: SupportsIndex = 0) -> _MaskedArray[Incomplete]: ... + +# not generic at runtime +class _DomainedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + domain: Final[_DomainCallable] + fillx: Final[_FillValue] + filly: Final[_FillValue] + + def __init__( + self, + /, + dbfunc: _UFuncT_co, + domain: _DomainCallable, + fillx: _FillValue = 0, + filly: _FillValue = 0, + ) -> None: ... + + # NOTE: See the comment in `_MaskedUnaryOperation.__call__` + def __call__( + self: _DomainedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + /, + a: ArrayLike, + b: ArrayLike, + *args: _Pss.args, + **kwargs: _Pss.kwargs, + ) -> _T: ... + +# not generic at runtime +class _extrema_operation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + compare: Final[_MaskedBinaryOperation] + fill_value_func: Final[_FillValueCallable] + def __init__( + self, + /, + ufunc: _UFuncT_co, + compare: _MaskedBinaryOperation, + fill_value: _FillValueCallable, + ) -> None: ... + + # NOTE: This class is only used internally for `maximum` and `minimum`, so we are + # able to annotate the `__call__` method specifically for those two functions. + @overload + def __call__(self, /, a: _ArrayLike[_ScalarT], b: _ArrayLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... + @overload + def __call__(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + + # NOTE: We cannot meaningfully annotate the return (d)types of these methods until + # the signatures of the corresponding `numpy.ufunc` methods are specified. + def reduce(self, /, target: ArrayLike, axis: SupportsIndex | _NoValueType = ...) -> Incomplete: ... + def outer(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + +@final class _MaskedPrintOption: - def __init__(self, display): ... - def display(self): ... - def set_display(self, s): ... - def enabled(self): ... - def enable(self, shrink=1): ... + _display: str + _enabled: bool | Literal[0, 1] + def __init__(self, /, display: str) -> None: ... + def display(self, /) -> str: ... + def set_display(self, /, s: str) -> None: ... + def enabled(self, /) -> bool: ... + def enable(self, /, shrink: bool | Literal[0, 1] = 1) -> None: ... masked_print_option: Final[_MaskedPrintOption] = ... -exp: _MaskedUnaryOperation -conjugate: _MaskedUnaryOperation -sin: _MaskedUnaryOperation -cos: _MaskedUnaryOperation -arctan: _MaskedUnaryOperation -arcsinh: _MaskedUnaryOperation -sinh: _MaskedUnaryOperation -cosh: _MaskedUnaryOperation -tanh: _MaskedUnaryOperation -abs: _MaskedUnaryOperation -absolute: _MaskedUnaryOperation -angle: _MaskedUnaryOperation -fabs: _MaskedUnaryOperation -negative: _MaskedUnaryOperation -floor: _MaskedUnaryOperation -ceil: _MaskedUnaryOperation -around: _MaskedUnaryOperation -logical_not: _MaskedUnaryOperation -sqrt: _MaskedUnaryOperation -log: _MaskedUnaryOperation -log2: _MaskedUnaryOperation -log10: _MaskedUnaryOperation -tan: _MaskedUnaryOperation -arcsin: _MaskedUnaryOperation -arccos: _MaskedUnaryOperation -arccosh: _MaskedUnaryOperation -arctanh: _MaskedUnaryOperation - -add: _MaskedBinaryOperation -subtract: _MaskedBinaryOperation -multiply: _MaskedBinaryOperation -arctan2: _MaskedBinaryOperation -equal: _MaskedBinaryOperation -not_equal: _MaskedBinaryOperation -less_equal: _MaskedBinaryOperation -greater_equal: _MaskedBinaryOperation -less: _MaskedBinaryOperation -greater: _MaskedBinaryOperation -logical_and: _MaskedBinaryOperation +exp: _MaskedUnaryOperation = ... +conjugate: _MaskedUnaryOperation = ... +sin: _MaskedUnaryOperation = ... +cos: _MaskedUnaryOperation = ... +arctan: _MaskedUnaryOperation = ... +arcsinh: _MaskedUnaryOperation = ... +sinh: _MaskedUnaryOperation = ... +cosh: _MaskedUnaryOperation = ... +tanh: _MaskedUnaryOperation = ... +abs: _MaskedUnaryOperation = ... +absolute: _MaskedUnaryOperation = ... +angle: _MaskedUnaryOperation = ... +fabs: _MaskedUnaryOperation = ... +negative: _MaskedUnaryOperation = ... +floor: _MaskedUnaryOperation = ... +ceil: _MaskedUnaryOperation = ... +around: _MaskedUnaryOperation = ... +logical_not: _MaskedUnaryOperation = ... +sqrt: _MaskedUnaryOperation = ... +log: _MaskedUnaryOperation = ... +log2: _MaskedUnaryOperation = ... +log10: _MaskedUnaryOperation = ... +tan: _MaskedUnaryOperation = ... +arcsin: _MaskedUnaryOperation = ... +arccos: _MaskedUnaryOperation = ... +arccosh: _MaskedUnaryOperation = ... +arctanh: _MaskedUnaryOperation = ... + +add: _MaskedBinaryOperation = ... +subtract: _MaskedBinaryOperation = ... +multiply: _MaskedBinaryOperation = ... +arctan2: _MaskedBinaryOperation = ... +equal: _MaskedBinaryOperation = ... +not_equal: _MaskedBinaryOperation = ... +less_equal: _MaskedBinaryOperation = ... +greater_equal: _MaskedBinaryOperation = ... +less: _MaskedBinaryOperation = ... +greater: _MaskedBinaryOperation = ... +logical_and: _MaskedBinaryOperation = ... def alltrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... -logical_or: _MaskedBinaryOperation +logical_or: _MaskedBinaryOperation = ... def sometrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... -logical_xor: _MaskedBinaryOperation -bitwise_and: _MaskedBinaryOperation -bitwise_or: _MaskedBinaryOperation -bitwise_xor: _MaskedBinaryOperation -hypot: _MaskedBinaryOperation - -divide: _DomainedBinaryOperation -true_divide: _DomainedBinaryOperation -floor_divide: _DomainedBinaryOperation -remainder: _DomainedBinaryOperation -fmod: _DomainedBinaryOperation -mod: _DomainedBinaryOperation +logical_xor: _MaskedBinaryOperation = ... +bitwise_and: _MaskedBinaryOperation = ... +bitwise_or: _MaskedBinaryOperation = ... +bitwise_xor: _MaskedBinaryOperation = ... +hypot: _MaskedBinaryOperation = ... + +divide: _DomainedBinaryOperation = ... +true_divide: _DomainedBinaryOperation = ... +floor_divide: _DomainedBinaryOperation = ... +remainder: _DomainedBinaryOperation = ... +fmod: _DomainedBinaryOperation = ... +mod: _DomainedBinaryOperation = ... # `obj` can be anything (even `object()`), and is too "flexible", so we can't # meaningfully annotate it, or its return type. @@ -2266,9 +2355,9 @@ class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def filled(self, fill_value=None): ... def tolist(self): ... # type: ignore[override] -def isMaskedArray(x): ... -isarray = isMaskedArray -isMA = isMaskedArray +def isMaskedArray(x: object) -> TypeIs[MaskedArray]: ... +def isarray(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray +def isMA(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray # 0D float64 array class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): @@ -2362,16 +2451,6 @@ def asanyarray(a: object, dtype: DTypeLike | None = None) -> _MaskedArray[_Scala # def is_masked(x: object) -> bool: ... -class _extrema_operation(_MaskedUFunc): - compare: Any - fill_value_func: Any - def __init__(self, ufunc, compare, fill_value): ... - # NOTE: in practice `b` has a default value, but users should - # explicitly provide a value here as the default is deprecated - def __call__(self, a, b): ... - def reduce(self, target, axis=...): ... - def outer(self, a, b): ... - @overload def min( obj: _ArrayLike[_ScalarT], @@ -2474,38 +2553,58 @@ def ptp( keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... +@final class _frommethod: - __name__: Any - __doc__: Any - reversed: Any - def __init__(self, methodname, reversed=...): ... - def getdoc(self): ... - def __call__(self, a, *args, **params): ... - -all: _frommethod -anomalies: _frommethod -anom: _frommethod -any: _frommethod -compress: _frommethod -cumprod: _frommethod -cumsum: _frommethod -copy: _frommethod -diagonal: _frommethod -harden_mask: _frommethod -ids: _frommethod -mean: _frommethod -nonzero: _frommethod -prod: _frommethod -product: _frommethod -ravel: _frommethod -repeat: _frommethod -soften_mask: _frommethod -std: _frommethod -sum: _frommethod -swapaxes: _frommethod -trace: _frommethod -var: _frommethod - + __name__: str + __doc__: str | None + reversed: Final[bool] + + def __init__(self, /, methodname: str, reversed: bool = False) -> None: ... + # TODO: specializable callable signatures + def __call__(self, /, a: Incomplete, *args: Incomplete, **params: Incomplete) -> Incomplete: ... + def getdoc(self, /) -> str | None: ... + +# (self, /) +ids: _frommethod = ... +nonzero: _frommethod = ... +harden_mask: _frommethod = ... +soften_mask: _frommethod = ... +shrink_mask: _frommethod = ... +# (self, /, order='C') +ravel: _frommethod = ... +# (self, /, *args, **params) +copy: _frommethod = ... +diagonal: _frommethod = ... +repeat: _frommethod = ... +swapaxes: _frommethod = ... +# (self, /, axis=None, dtype=None) +anomalies: _frommethod = ... +anom: _frommethod = ... # == anomalies +# (self, /, axis=None, out=None, keepdims=) +all: _frommethod = ... +any: _frommethod = ... +# (condition, self, /, axis=None, out=None) +# NOTE: this is the only `_frommethod` instance with `reversed=True` +compress: _frommethod = ... +# (self, /, axis=None, dtype=None, out=None) +cumprod: _frommethod = ... +cumsum: _frommethod = ... +# (self, /, axis=None, dtype=None, out=None, keepdims=) +mean: _frommethod = ... +prod: _frommethod = ... +product: _frommethod = ... +sum: _frommethod = ... +# (self, /, offset=0, axis1=0, axis2=1, dtype=None, out=None) +trace: _frommethod = ... +# (self, /, axis=None, dtype=None, out=None, ddof=0, keepdims=, mean=) +std: _frommethod = ... +var: _frommethod = ... + +# (a, b) +minimum: _extrema_operation = ... +maximum: _extrema_operation = ... + +# NOTE: this is a `_frommethod` instance at runtime @overload def count(a: ArrayLike, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... @overload @@ -2515,6 +2614,7 @@ def count(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: Literal[Tru @overload def count(a: ArrayLike, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... +# NOTE: this is a `_frommethod` instance at runtime @overload def argmin( a: ArrayLike, @@ -2552,7 +2652,7 @@ def argmin( keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... -# +# keep in sync with `argmin` @overload def argmax( a: ArrayLike, @@ -2590,9 +2690,6 @@ def argmax( keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... -minimum: _extrema_operation -maximum: _extrema_operation - @overload def take( a: _ArrayLike[_ScalarT], @@ -2709,19 +2806,19 @@ class _convert2ma: def __call__(self, /, *args: object, **params: object) -> Any: ... def getdoc(self, /, np_ret: str, np_ma_ret: str) -> str | None: ... -arange: _convert2ma -clip: _convert2ma -empty: _convert2ma -empty_like: _convert2ma -frombuffer: _convert2ma -fromfunction: _convert2ma -identity: _convert2ma -indices: _convert2ma -ones: _convert2ma -ones_like: _convert2ma -squeeze: _convert2ma -zeros: _convert2ma -zeros_like: _convert2ma +arange: _convert2ma = ... +clip: _convert2ma = ... +empty: _convert2ma = ... +empty_like: _convert2ma = ... +frombuffer: _convert2ma = ... +fromfunction: _convert2ma = ... +identity: _convert2ma = ... +indices: _convert2ma = ... +ones: _convert2ma = ... +ones_like: _convert2ma = ... +squeeze: _convert2ma = ... +zeros: _convert2ma = ... +zeros_like: _convert2ma = ... def append(a, b, axis=None): ... def dot(a, b, strict=False, out=None): ... From e6478c508435e5dcaba256561eed4e117121a9d9 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 28 Oct 2025 19:14:20 +0100 Subject: [PATCH 0757/1018] BUG: Fix resize when it contains references (#29970) --- numpy/_core/src/multiarray/shape.c | 29 ++++++++++++++++++---------- numpy/_core/tests/test_multiarray.py | 14 ++++++++++++++ 2 files changed, 33 insertions(+), 10 deletions(-) diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index 340fe7289ac8..83de9f19a574 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -121,6 +121,15 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, "no memory handler found but OWNDATA flag set"); return NULL; } + if (newnbytes < oldnbytes) { + /* Clear now removed data (if dtype has references) */ + if (PyArray_ClearBuffer( + PyArray_DESCR(self), PyArray_BYTES(self) + newnbytes, + elsize, oldsize-newsize, PyArray_ISALIGNED(self)) < 0) { + return NULL; + } + } + new_data = PyDataMem_UserRENEW(PyArray_DATA(self), newnbytes == 0 ? elsize : newnbytes, handler); @@ -130,17 +139,17 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, return NULL; } ((PyArrayObject_fields *)self)->data = new_data; - } - if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) { - /* Fill new memory with zeros (PyLong zero for object arrays) */ - npy_intp stride = elsize; - npy_intp size = newsize - oldsize; - char *data = PyArray_BYTES(self) + oldnbytes; - int aligned = PyArray_ISALIGNED(self); - if (PyArray_ZeroContiguousBuffer(PyArray_DESCR(self), data, - stride, size, aligned) < 0) { - return NULL; + if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) { + /* Fill new memory with zeros (PyLong zero for object arrays) */ + npy_intp stride = elsize; + npy_intp size = newsize - oldsize; + char *data = PyArray_BYTES(self) + oldnbytes; + int aligned = PyArray_ISALIGNED(self); + if (PyArray_ZeroContiguousBuffer(PyArray_DESCR(self), data, + stride, size, aligned) < 0) { + return NULL; + } } } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index ca34756e5882..c62fb5b4e905 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -6312,6 +6312,20 @@ def test_obj_obj(self): assert_array_equal(a['k'][-5:], 0) assert_array_equal(a['k'][:-5], 1) + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.parametrize("dtype", ["O", "O,O"]) + def test_obj_obj_shrinking(self, dtype): + # check that memory is freed when shrinking an array. + test_obj = object() + expected = sys.getrefcount(test_obj) + a = np.array([test_obj, test_obj, test_obj], dtype=dtype) + assert a.size == 3 + a.resize((2, 1)) # two elements, not three! + assert a.size == 2 + del a + # if all is well, then we reclaimed all references + assert sys.getrefcount(test_obj) == expected + def test_empty_view(self): # check that sizes containing a zero don't trigger a reallocate for # already empty arrays From b205e91730e5be25a1da917a7d765b077a9f800f Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 28 Oct 2025 22:27:24 +0100 Subject: [PATCH 0758/1018] BUG: Fix ``ma.core._frommethod`` function signatures --- numpy/ma/core.py | 61 ++++++++++++++++++------------------- numpy/ma/tests/test_core.py | 14 +++++++++ 2 files changed, 44 insertions(+), 31 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 621dbd94640b..7803556fb660 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -7047,7 +7047,7 @@ def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): ############################################################################## -class _frommethod: +def _frommethod(methodname: str, reversed: bool = False): """ Define functions from existing MaskedArray methods. @@ -7055,44 +7055,47 @@ class _frommethod: ---------- methodname : str Name of the method to transform. - + reversed : bool, optional + Whether to reverse the first two arguments of the method. Default is False. """ + method = getattr(MaskedArray, methodname) + assert callable(method) - def __init__(self, methodname, reversed=False): - self.__name__ = methodname - self.__qualname__ = methodname - self.__doc__ = self.getdoc() - self.reversed = reversed + signature = inspect.signature(method) + params = list(signature.parameters.values()) + params[0] = params[0].replace(name="a") # rename 'self' to 'a' - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - meth = getattr(MaskedArray, self.__name__, None) or\ - getattr(np, self.__name__, None) - signature = self.__name__ + get_object_signature(meth) - if meth is not None: - doc = f""" {signature} -{getattr(meth, '__doc__', None)}""" - return doc + if reversed: + assert len(params) >= 2 + params[0], params[1] = params[1], params[0] + + def wrapper(a, b, *args, **params): + return getattr(asanyarray(b), methodname)(a, *args, **params) - def __call__(self, a, *args, **params): - if self.reversed: - args = list(args) - a, args[0] = args[0], a + else: + def wrapper(a, *args, **params): + return getattr(asanyarray(a), methodname)(*args, **params) + + wrapper.__signature__ = signature.replace(parameters=params) + wrapper.__name__ = wrapper.__qualname__ = methodname - marr = asanyarray(a) - method_name = self.__name__ - method = getattr(type(marr), method_name, None) - if method is None: - # use the corresponding np function - method = getattr(np, method_name) + # __doc__ is None when using `python -OO ...` + if method.__doc__ is not None: + str_signature = f"{methodname}{signature}" + # TODO: For methods with a docstring "Parameters" section, that do not already + # mention `a` (see e.g. `MaskedArray.var.__doc__`), it should be inserted there. + wrapper.__doc__ = f" {str_signature}\n{method.__doc__}" - return method(marr, *args, **params) + return wrapper all = _frommethod('all') anomalies = anom = _frommethod('anom') any = _frommethod('any') +argmax = _frommethod('argmax') +argmin = _frommethod('argmin') compress = _frommethod('compress', reversed=True) +count = _frommethod('count') cumprod = _frommethod('cumprod') cumsum = _frommethod('cumsum') copy = _frommethod('copy') @@ -7116,7 +7119,6 @@ def __call__(self, a, *args, **params): trace = _frommethod('trace') var = _frommethod('var') -count = _frommethod('count') def take(a, indices, axis=None, out=None, mode='raise'): """ @@ -7205,9 +7207,6 @@ def power(a, b, third=None): return result -argmin = _frommethod('argmin') -argmax = _frommethod('argmax') - def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None, *, stable=None): "Function version of the eponymous method." diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 2da03158d317..bf7b94d720ce 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -6,6 +6,7 @@ __author__ = "Pierre GF Gerard-Marchant" import copy +import inspect import itertools import operator import pickle @@ -5963,3 +5964,16 @@ def test_uint_fill_value_and_filled(): # And this ensures things like filled work: np.testing.assert_array_equal( a.filled(), np.array([999999, 1]).astype("uint16"), strict=True) + + +@pytest.mark.parametrize( + ('fn', 'signature'), + [ + (np.ma.nonzero, "(a)"), + (np.ma.anomalies, "(a, axis=None, dtype=None)"), + (np.ma.cumsum, "(a, axis=None, dtype=None, out=None)"), + (np.ma.compress, "(condition, a, axis=None, out=None)"), + ] +) +def test_frommethod_signature(fn, signature): + assert str(inspect.signature(fn)) == signature From aaf43cab66ef41b22f8e7533d83d3483c78502e4 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 29 Oct 2025 00:19:13 +0100 Subject: [PATCH 0759/1018] TYP: remove unused stubtest allowlist entries for ``numpy.ma`` --- tools/stubtest/allowlist.txt | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index 77ff582c9f66..2cfa30d3c0b1 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -182,8 +182,3 @@ numpy\.linalg\._umath_linalg\.qr_complete numpy\.linalg\._umath_linalg\.qr_reduced numpy\.linalg\._umath_linalg\.solve numpy\.linalg\._umath_linalg\.solve1 - -# ma.core._frommethod callables -numpy\.ma\.(core\.)?argmax -numpy\.ma\.(core\.)?argmin -numpy\.ma\.(core\.)?count From f0e732b6c06626675bb7680e1d0e3e979d55ab1d Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 29 Oct 2025 02:21:58 +0100 Subject: [PATCH 0760/1018] TYP: Annotate the ``ma.core._frommethod`` functions --- numpy/_core/fromnumeric.pyi | 5 + numpy/lib/_function_base_impl.pyi | 1 + numpy/ma/core.pyi | 435 ++++++++++++++++++++++++++---- 3 files changed, 388 insertions(+), 53 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index d0e0a2a1f67c..a2c4f47ecb2f 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -269,6 +269,7 @@ def choose( mode: _ModeKind = "raise", ) -> _ArrayT: ... +# keep in sync with `ma.core.repeat` @overload def repeat( a: _ArrayLike[_ScalarT], @@ -294,6 +295,7 @@ def repeat( axis: SupportsIndex, ) -> NDArray[Any]: ... +# def put( a: NDArray[Any], ind: _ArrayLikeInt_co, @@ -301,6 +303,7 @@ def put( mode: _ModeKind = "raise", ) -> None: ... +# keep in sync with `ma.core.swapaxes` @overload def swapaxes( a: _ArrayLike[_ScalarT], @@ -507,6 +510,7 @@ def squeeze( axis: _ShapeLike | None = None, ) -> NDArray[Any]: ... +# keep in sync with `ma.core.diagonal` @overload def diagonal( a: _ArrayLike[_ScalarT], @@ -522,6 +526,7 @@ def diagonal( axis2: SupportsIndex = 1, # >= 2D array ) -> NDArray[Any]: ... +# keep in sync with `ma.core.trace` @overload def trace( a: ArrayLike, # >= 2D array diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 651ece6b3447..bad00641072c 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -251,6 +251,7 @@ def select( default: ArrayLike = 0, ) -> NDArray[Any]: ... +# keep roughly in sync with `ma.core.copy` @overload def copy( a: _ArrayT, diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index ae66384098d8..8e6309504445 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,6 +1,7 @@ # pyright: reportIncompatibleMethodOverride=false import datetime as dt +import types from _typeshed import Incomplete from collections.abc import Callable, Sequence from typing import ( @@ -1117,6 +1118,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... + # keep roughly in sync with `ma.core.compress`, but swap the first two arguments @overload # type: ignore[override] def compress( self, @@ -1615,6 +1617,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + # keep roughly in sync with `ma.core.ravel` def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... # Keep in sync with `ndarray.reshape` @@ -1705,6 +1708,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def ids(self) -> tuple[int, int]: ... def iscontiguous(self) -> bool: ... + # Keep in sync with `ma.core.all` @overload # type: ignore[override] def all( self, @@ -1750,6 +1754,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... + # Keep in sync with `ma.core.any` @overload # type: ignore[override] def any( self, @@ -1795,9 +1800,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... - def nonzero(self) -> tuple[_Array1D[intp], ...]: ... - - # Keep in sync with `ndarray.trace` + # Keep in sync with `ndarray.trace` and `ma.core.trace` @overload def trace( self, # >= 2D MaskedArray @@ -1833,6 +1836,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = False) -> _ArrayT: ... + # Keep in sync with `ma.core.sum` @overload # type: ignore[override] def sum( self, @@ -1862,7 +1866,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... - # Keep in sync with `ndarray.cumsum` + # Keep in sync with `ndarray.cumsum` and `ma.core.cumsum` @overload # out: None (default) def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... @overload # out: ndarray @@ -1870,6 +1874,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + # Keep in sync with `ma.core.prod` @overload # type: ignore[override] def prod( self, @@ -1899,9 +1904,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... - product: Any + product = prod - # Keep in sync with `ndarray.cumprod` + # Keep in sync with `ndarray.cumprod` and `ma.core.cumprod` @overload # out: None (default) def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... @overload # out: ndarray @@ -1909,6 +1914,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + # Keep in sync with `ma.core.mean` @overload # type: ignore[override] def mean( self, @@ -1937,6 +1943,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... + # keep roughly in sync with `ma.core.anom` @overload def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... @overload @@ -1944,6 +1951,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + # keep in sync with `std` and `ma.core.var` @overload # type: ignore[override] def var( self, @@ -1976,6 +1984,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> _ArrayT: ... + # keep in sync with `var` and `ma.core.std` @overload # type: ignore[override] def std( self, @@ -2299,6 +2308,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): diagonal: Any flatten: Any + # keep in sync with `ndarray.repeat` @overload def repeat( self, @@ -2314,11 +2324,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): squeeze: Any + # keep in sync with `ndarray.swapaxes` def swapaxes( self, axis1: SupportsIndex, axis2: SupportsIndex, - / + /, ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # @@ -2553,52 +2564,370 @@ def ptp( keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... -@final -class _frommethod: - __name__: str - __doc__: str | None - reversed: Final[bool] - - def __init__(self, /, methodname: str, reversed: bool = False) -> None: ... - # TODO: specializable callable signatures - def __call__(self, /, a: Incomplete, *args: Incomplete, **params: Incomplete) -> Incomplete: ... - def getdoc(self, /) -> str | None: ... - -# (self, /) -ids: _frommethod = ... -nonzero: _frommethod = ... -harden_mask: _frommethod = ... -soften_mask: _frommethod = ... -shrink_mask: _frommethod = ... -# (self, /, order='C') -ravel: _frommethod = ... -# (self, /, *args, **params) -copy: _frommethod = ... -diagonal: _frommethod = ... -repeat: _frommethod = ... -swapaxes: _frommethod = ... -# (self, /, axis=None, dtype=None) -anomalies: _frommethod = ... -anom: _frommethod = ... # == anomalies -# (self, /, axis=None, out=None, keepdims=) -all: _frommethod = ... -any: _frommethod = ... -# (condition, self, /, axis=None, out=None) -# NOTE: this is the only `_frommethod` instance with `reversed=True` -compress: _frommethod = ... -# (self, /, axis=None, dtype=None, out=None) -cumprod: _frommethod = ... -cumsum: _frommethod = ... -# (self, /, axis=None, dtype=None, out=None, keepdims=) -mean: _frommethod = ... -prod: _frommethod = ... -product: _frommethod = ... -sum: _frommethod = ... -# (self, /, offset=0, axis1=0, axis2=1, dtype=None, out=None) -trace: _frommethod = ... -# (self, /, axis=None, dtype=None, out=None, ddof=0, keepdims=, mean=) -std: _frommethod = ... -var: _frommethod = ... +# we cannot meaningfully annotate `frommethod` further, because the callable signature +# of the return type fully depends on the *value* of `methodname` and `reversed` in +# a way that cannot be expressed in the Python type system. +def _frommethod(methodname: str, reversed: bool = False) -> types.FunctionType: ... + +# NOTE: The following `*_mask` functions will accept any array-like input runtime, but +# since their use-cases are specific to masks, they only accept `MaskedArray` inputs. + +# keep in sync with `MaskedArray.harden_mask` +def harden_mask(a: _MArrayT) -> _MArrayT: ... +# keep in sync with `MaskedArray.soften_mask` +def soften_mask(a: _MArrayT) -> _MArrayT: ... +# keep in sync with `MaskedArray.shrink_mask` +def shrink_mask(a: _MArrayT) -> _MArrayT: ... + +# keep in sync with `MaskedArray.ids` +def ids(a: ArrayLike) -> tuple[int, int]: ... + +# keep in sync with `ndarray.nonzero` +def nonzero(a: ArrayLike) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... + +# keep first overload in sync with `MaskedArray.ravel` +@overload +def ravel(a: np.ndarray[Any, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT]: ... +@overload +def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + +# keep roughly in sync with `lib._function_base_impl.copy` +@overload +def copy(a: _MArrayT, order: _OrderKACF = "C") -> _MArrayT: ... +@overload +def copy(a: np.ndarray[_ShapeT, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload +def copy(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _MaskedArray[_ScalarT]: ... +@overload +def copy(a: ArrayLike, order: _OrderKACF = "C") -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.diagonal` +@overload +def diagonal( + a: _ArrayLike[_ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[_ScalarT]: ... +@overload +def diagonal( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.repeat` +@overload +def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[_ScalarT]]: ... +@overload +def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +@overload +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[Incomplete]]: ... +@overload +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.swapaxes` +@overload +def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +@overload +def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[Incomplete]: ... + +# NOTE: The `MaskedArray.anom` definition is specific to `MaskedArray`, so we need +# additional overloads to cover the array-like input here. +@overload # a: MaskedArray, dtype=None +def anom(a: _MArrayT, axis: SupportsIndex | None = None, dtype: None = None) -> _MArrayT: ... +@overload # a: array-like, dtype=None +def anom(a: _ArrayLike[_ScalarT], axis: SupportsIndex | None = None, dtype: None = None) -> _MaskedArray[_ScalarT]: ... +@overload # a: unknown array-like, dtype: dtype-like (positional) +def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +@overload # a: unknown array-like, dtype: dtype-like (keyword) +def anom(a: ArrayLike, axis: SupportsIndex | None = None, *, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +@overload # a: unknown array-like, dtype: unknown dtype-like (positional) +def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... +@overload # a: unknown array-like, dtype: unknown dtype-like (keyword) +def anom(a: ArrayLike, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... + +anomalies = anom + +# Keep in sync with `any` and `MaskedArray.all` +@overload +def all(a: ArrayLike, axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ...) -> np.bool: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def all( + a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... +) -> np.bool | _MaskedArray[np.bool]: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... + +# Keep in sync with `all` and `MaskedArray.any` +@overload +def any(a: ArrayLike, axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ...) -> np.bool: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def any( + a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... +) -> np.bool | _MaskedArray[np.bool]: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... + +# NOTE: The `MaskedArray.compress` definition uses its `DTypeT_co` type parameter, +# which wouldn't work here for array-like inputs, so we need additional overloads. +@overload +def compress( + condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: None = None, out: None = None +) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: _ShapeLike | None = None, out: None = None +) -> MaskedArray[_AnyShape, np.dtype[_ScalarT]]: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None = None, out: None = None) -> MaskedArray[tuple[int]]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, out: None = None +) -> _MaskedArray[Incomplete]: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + +# Keep in sync with `cumprod` and `MaskedArray.cumsum` +@overload # out: None (default) +def cumsum( + a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None +) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def cumsum(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +@overload # out: ndarray (kwarg) +def cumsum(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + +# Keep in sync with `cumsum` and `MaskedArray.cumsum` +@overload # out: None (default) +def cumprod( + a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None +) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def cumprod(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +@overload # out: ndarray (kwarg) +def cumprod(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + +# Keep in sync with `sum`, `prod`, `product`, and `MaskedArray.mean` +@overload +def mean( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def mean( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def mean( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `mean`, `prod`, `product`, and `MaskedArray.sum` +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `product` and `MaskedArray.prod` +@overload +def prod( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def prod( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def prod( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `prod` and `MaskedArray.prod` +@overload +def product( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def product( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def product( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `MaskedArray.trace` and `_core.fromnumeric.trace` +@overload +def trace( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, +) -> Incomplete: ... +@overload +def trace( + a: ArrayLike, + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def trace( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... + +# keep in sync with `std` and `MaskedArray.var` +@overload +def std( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def std( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def std( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... + +# keep in sync with `std` and `MaskedArray.var` +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... # (a, b) minimum: _extrema_operation = ... From 959d38fdf1004b601f1e375ea78ffd3debda321d Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 29 Oct 2025 02:48:41 +0100 Subject: [PATCH 0761/1018] DOC: fix cross-reference warning for ambiguous `.ravel` --- doc/source/user/basics.copies.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user/basics.copies.rst b/doc/source/user/basics.copies.rst index ec373673d815..6d8e78488e7e 100644 --- a/doc/source/user/basics.copies.rst +++ b/doc/source/user/basics.copies.rst @@ -123,8 +123,8 @@ shape attribute of the array. For example:: AttributeError: Incompatible shape for in-place modification. Use `.reshape()` to make a copy with the desired shape. -Taking the example of another operation, :func:`.ravel` returns a contiguous -flattened view of the array wherever possible. On the other hand, +Taking the example of another operation, :func:`numpy.ravel` returns a +contiguous flattened view of the array wherever possible. On the other hand, :meth:`.ndarray.flatten` always returns a flattened copy of the array. However, to guarantee a view in most cases, ``x.reshape(-1)`` may be preferable. From d01d4a709d773c484afd62f4cb3d294330f6d377 Mon Sep 17 00:00:00 2001 From: star1327p Date: Tue, 28 Oct 2025 19:48:30 -0700 Subject: [PATCH 0762/1018] DOC: Correct grammatical usage like a/an [skip actions][skip azp][skip cirrus] --- numpy/_core/src/common/simd/simd.h | 2 +- numpy/_core/src/multiarray/scalartypes.c.src | 2 +- numpy/_core/src/umath/fast_loop_macros.h | 2 +- numpy/_core/tests/test_ufunc.py | 2 +- numpy/distutils/exec_command.py | 2 +- numpy/f2py/symbolic.py | 4 ++-- numpy/lib/_index_tricks_impl.py | 6 +++--- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/_core/src/common/simd/simd.h b/numpy/_core/src/common/simd/simd.h index fe4ca4da92f5..fd1be6e0c867 100644 --- a/numpy/_core/src/common/simd/simd.h +++ b/numpy/_core/src/common/simd/simd.h @@ -23,7 +23,7 @@ extern "C" { /* * clang commit an aggressive optimization behaviour when flag `-ftrapping-math` * isn't fully supported that's present at -O1 or greater. When partially loading a - * vector register for a operations that requires to fill up the remaining lanes + * vector register for an operation that requires to fill up the remaining lanes * with certain value for example divide operation needs to fill the remaining value * with non-zero integer to avoid fp exception divide-by-zero. * clang optimizer notices that the entire register is not needed for the store diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 14d8bf7585e4..2d63dd6e3602 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -160,7 +160,7 @@ gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds, * valid self (a generic scalar) and an other item. * May fill self_item and/or other_arr (but not both) with non-NULL values. * - * Why this dance? When the other object is a exactly Python scalar something + * Why this dance? When the other object is exactly a Python scalar something * awkward happens historically in NumPy. * NumPy doesn't define a result, but the ufunc would cast to `astype(object)` * which is the same as `scalar.item()`. And that operation converts e.g. diff --git a/numpy/_core/src/umath/fast_loop_macros.h b/numpy/_core/src/umath/fast_loop_macros.h index 5143f414606e..42c2c9d8d04f 100644 --- a/numpy/_core/src/umath/fast_loop_macros.h +++ b/numpy/_core/src/umath/fast_loop_macros.h @@ -16,7 +16,7 @@ /* * largest simd vector size in bytes numpy supports - * it is currently a extremely large value as it is only used for memory + * it is currently an extremely large value as it is only used for memory * overlap checks */ #if NPY_SIMD > 0 diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 2b01d37989cc..0211449c577f 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1922,7 +1922,7 @@ def test_identityless_reduction_nonreorderable(self): assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) def test_reduce_zero_axis(self): - # If we have a n x m array and do a reduction with axis=1, then we are + # If we have an n x m array and do a reduction with axis=1, then we are # doing n reductions, and each reduction takes an m-element array. For # a reduction operation without an identity, then: # n > 0, m > 0: fine diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py index 2d06585a1497..c701465d9ade 100644 --- a/numpy/distutils/exec_command.py +++ b/numpy/distutils/exec_command.py @@ -114,7 +114,7 @@ def get_pythonexe(): return pythonexe def find_executable(exe, path=None, _cache={}): - """Return full path of a executable or None. + """Return full path of an executable or None. Symbolic links are not followed. """ diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py index 820ae2ec9b9f..409b84da531c 100644 --- a/numpy/f2py/symbolic.py +++ b/numpy/f2py/symbolic.py @@ -155,14 +155,14 @@ def ewarn(message): class Expr: - """Represents a Fortran expression as a op-data pair. + """Represents a Fortran expression as an op-data pair. Expr instances are hashable and sortable. """ @staticmethod def parse(s, language=Language.C): - """Parse a Fortran expression to a Expr. + """Parse a Fortran expression to an Expr. """ return fromstring(s, language=language) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 666171f7c2aa..1d9358d6f8d2 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -472,9 +472,9 @@ class RClass(AxisConcatenator): Optional character strings placed as the first element of the index expression can be used to change the output. The strings 'r' or 'c' result in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) - matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 - (column) matrix is produced. If the result is 2-D then both provide the - same matrix result. + matrix is produced. If the result is 1-D and 'c' is specified, then + an N x 1 (column) matrix is produced. + If the result is 2-D then both provide the same matrix result. A string integer specifies which axis to stack multiple comma separated arrays along. A string of two comma-separated integers allows indication From d8e5d849789f501d890250430e48fdbcb43879b1 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Tue, 28 Oct 2025 20:21:18 -0700 Subject: [PATCH 0763/1018] Update numpy/lib/_index_tricks_impl.py [skip actions][skip azp][skip cirrus] Co-authored-by: Joren Hammudoglu --- numpy/lib/_index_tricks_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 1d9358d6f8d2..40fef85b1853 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -473,8 +473,8 @@ class RClass(AxisConcatenator): expression can be used to change the output. The strings 'r' or 'c' result in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) matrix is produced. If the result is 1-D and 'c' is specified, then - an N x 1 (column) matrix is produced. - If the result is 2-D then both provide the same matrix result. + an N x 1 (column) matrix is produced. + If the result is 2-D then both provide the same matrix result. A string integer specifies which axis to stack multiple comma separated arrays along. A string of two comma-separated integers allows indication From b20568e62a39c79ee1a8eecdd1a36ccb35dbbbc0 Mon Sep 17 00:00:00 2001 From: Riku Sakamoto Date: Thu, 30 Oct 2025 00:26:04 +0900 Subject: [PATCH 0764/1018] CI: Update ARM job (armhf_test) to use Ubuntu 24.04 Update the GitHub Actions image for the `armhf_test` job to Ubuntu 24.04-arm. (See gh-30086) --- .github/workflows/linux.yml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 5a9fb69c5178..b749c60f3cc3 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -166,7 +166,7 @@ jobs: # running on aarch64 (ARM 64-bit) GitHub runners. needs: [smoke_test] if: github.repository == 'numpy/numpy' - runs-on: ubuntu-22.04-arm + runs-on: ubuntu-24.04-arm steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: @@ -177,11 +177,9 @@ jobs: - name: Creates new container run: | docker run --name the_container --interactive \ - -v $(pwd):/numpy arm32v7/ubuntu:22.04 /bin/linux32 /bin/bash -c " + -v $(pwd):/numpy arm32v7/ubuntu:24.04 /bin/linux32 /bin/bash -c " apt update && - apt install -y ninja-build cmake git python3 python-is-python3 python3-dev python3-pip python3-venv && - python -m pip install -r /numpy/requirements/build_requirements.txt && - python -m pip install -r /numpy/requirements/test_requirements.txt + apt install -y ninja-build cmake git python3 python-is-python3 python3-dev python3-pip python3-venv " docker commit the_container the_container @@ -190,7 +188,12 @@ jobs: docker run --rm -e "TERM=xterm-256color" \ -v $(pwd):/numpy the_container \ /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build + cd /numpy && + python -m venv venv && + source venv/bin/activate && + python -m pip install -r /numpy/requirements/build_requirements.txt && + python -m pip install -r /numpy/requirements/test_requirements.txt && + spin build '" - name: Meson Log @@ -202,7 +205,7 @@ jobs: docker run --rm -e "TERM=xterm-256color" \ -v $(pwd):/numpy the_container \ /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin test -m full -- --timeout=600 --durations=10 + cd /numpy && source venv/bin/activate && spin test -m full -- --timeout=600 --durations=10 '" benchmark: From fa84b04c51ae903caf8bcff66e47f74b514582af Mon Sep 17 00:00:00 2001 From: Koki Watanabe <56009584+math-hiyoko@users.noreply.github.com> Date: Thu, 30 Oct 2025 10:48:47 +0900 Subject: [PATCH 0765/1018] ENH: np.unique: support hash based unique for complex dtype (#29537) * save: editting * enh: implement unique_complex * fix: spin build * enh: impl unique_numeric for integer, float and complex * fix: case when complex is nan * fix: case when order is not guaranteed * fix: lint * fix: type of data * fix: error * enh: change template type for unordered_set * enh: set function * enh: refactoring * enh: refactoring * enh: refactor test * enh: apply inline * doc: performance and change * enh: test check -0.0 and -np.nan * enh: use fnv-hash for complex * fix: add const * fix: use memcpy * fix: use static_cast * fix: remove support for float * fix: change variable name * fix: inline * fix: template type * fix: padding buffer * fix: padding buffer * fix: use buffer * fix: add constexpr * fix: copy of long double * fix: valie_real -> value_imag * fix: reinterpret value pointer * fix: use NPY_BITSOF_* * fix: use fnv magic prime * enh: refactoring * doc: fix docs * doc: fix docs * enh: add comment * fix: use sizeof to hash complex * fix: use reinterpret_cast * fix: impl hash_complex_large * fix: use constexpr * fix: define hash_complex_clongdouble * fix: use NPY_SIZEOF_LONGDOUBLE * fix: use IEEEl2bitsrep * fix: remove designated initializers * fix: comment * fix: small fix * fix: change case for handling bit expression * fix: refactoring --- doc/release/upcoming_changes/29537.change.rst | 7 + .../upcoming_changes/29537.performance.rst | 9 + numpy/_core/meson.build | 1 + numpy/_core/src/multiarray/unique.cpp | 218 ++++++++++++++++-- numpy/lib/tests/test_arraysetops.py | 27 ++- 5 files changed, 233 insertions(+), 29 deletions(-) create mode 100644 doc/release/upcoming_changes/29537.change.rst create mode 100644 doc/release/upcoming_changes/29537.performance.rst diff --git a/doc/release/upcoming_changes/29537.change.rst b/doc/release/upcoming_changes/29537.change.rst new file mode 100644 index 000000000000..63abbbb5a347 --- /dev/null +++ b/doc/release/upcoming_changes/29537.change.rst @@ -0,0 +1,7 @@ +``unique_values`` for complex dtypes may return unsorted data +------------------------------------------------------------- +np.unique now supports hash‐based duplicate removal for complex dtypes. +This enhancement extends the hash‐table algorithm +to all complex types ('c'), and their extended precision variants. +The hash‐based method provides faster extraction of unique values +but does not guarantee that the result will be sorted. diff --git a/doc/release/upcoming_changes/29537.performance.rst b/doc/release/upcoming_changes/29537.performance.rst new file mode 100644 index 000000000000..8c78dc202a2e --- /dev/null +++ b/doc/release/upcoming_changes/29537.performance.rst @@ -0,0 +1,9 @@ +Performance improvements to ``np.unique`` for complex dtypes +------------------------------------------------------------ +The hash-based algorithm for unique extraction now also supports +complex dtypes, offering noticeable performance gains. + +In our benchmarks on complex128 arrays with 200,000 elements, +the hash-based approach was about 1.4–1.5× faster +than the sort-based baseline when there were 20% of unique values, +and about 5× faster when there were 0.2% of unique values. diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index a3d8416cdffa..dc07586bcf8e 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1282,6 +1282,7 @@ unique_hash_so = static_library( include_directories: [ 'include', 'src/common', + 'src/npymath', ], dependencies: [ py_dep, diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index 979fb8fae0a4..910c5f7c731e 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -17,6 +17,8 @@ extern "C" { #include "fnv.h" #include "npy_argparse.h" + #include "numpy/npy_math.h" + #include "numpy/halffloat.h" } // This is to use RAII pattern to handle cpp exceptions while avoiding memory leaks. @@ -34,11 +36,135 @@ FinalAction finally(F f) { } template +size_t hash_integer(const T *value, npy_bool equal_nan) { + return std::hash{}(*value); +} + +template +size_t hash_complex(const T *value, npy_bool equal_nan) { + S value_real = real(*value); + S value_imag = imag(*value); + int hasnan = npy_isnan(value_real) || npy_isnan(value_imag); + if (equal_nan && hasnan) { + return 0; + } + + // Now, equal_nan is false or neither of the values is not NaN. + // So we don't need to worry about NaN here. + const unsigned char* value_bytes = reinterpret_cast(value); + size_t hash = npy_fnv1a(value_bytes, sizeof(T)); + + return hash; +} + +size_t hash_complex_clongdouble(const npy_clongdouble *value, npy_bool equal_nan) { + npy_longdouble value_real = npy_creall(*value); + npy_longdouble value_imag = npy_cimagl(*value); + int hasnan = npy_isnan(value_real) || npy_isnan(value_imag); + if (equal_nan && hasnan) { + return 0; + } + + // Now, equal_nan is false or neither of the values is not NaN. + // So we don't need to worry about NaN here. + // Some floating-point complex dtypes (e.g., npy_complex256) include undefined or + // unused bits in their binary representation + // (see: https://github.com/numpy/numpy/blob/main/numpy/_core/src/npymath/npy_math_private.h#L254-L261). + // Because hashing the raw bit pattern would make the hash depend on those + // undefined bits, we extract the mantissa, exponent, and sign components + // explicitly and pack them into a buffer to ensure the hash is well-defined. + #if defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE) || \ + defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE) || \ + defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE) + constexpr size_t SIZEOF_LDOUBLE_MAN = sizeof(ldouble_man_t); + constexpr size_t SIZEOF_LDOUBLE_EXP = sizeof(ldouble_exp_t); + constexpr size_t SIZEOF_LDOUBLE_SIGN = sizeof(ldouble_sign_t); + constexpr size_t SIZEOF_BUFFER = 2 * (SIZEOF_LDOUBLE_MAN + SIZEOF_LDOUBLE_MAN + SIZEOF_LDOUBLE_EXP + SIZEOF_LDOUBLE_SIGN); + unsigned char buffer[SIZEOF_BUFFER]; + + union IEEEl2bitsrep bits_real{value_real}, bits_imag{value_imag}; + size_t offset = 0; + + for (const IEEEl2bitsrep &bits: {bits_real, bits_imag}) { + ldouble_man_t manh = GET_LDOUBLE_MANH(bits); + ldouble_man_t manl = GET_LDOUBLE_MANL(bits); + ldouble_exp_t exp = GET_LDOUBLE_EXP(bits); + ldouble_sign_t sign = GET_LDOUBLE_SIGN(bits); + + std::memcpy(buffer + offset, &manh, SIZEOF_LDOUBLE_MAN); + offset += SIZEOF_LDOUBLE_MAN; + std::memcpy(buffer + offset, &manl, SIZEOF_LDOUBLE_MAN); + offset += SIZEOF_LDOUBLE_MAN; + std::memcpy(buffer + offset, &exp, SIZEOF_LDOUBLE_EXP); + offset += SIZEOF_LDOUBLE_EXP; + std::memcpy(buffer + offset, &sign, SIZEOF_LDOUBLE_SIGN); + offset += SIZEOF_LDOUBLE_SIGN; + } + #else + constexpr size_t SIZEOF_BUFFER = NPY_SIZEOF_CLONGDOUBLE; + const unsigned char* buffer = reinterpret_cast(value); + #endif + + size_t hash = npy_fnv1a(buffer, SIZEOF_BUFFER); + + return hash; +} + +template +int equal_integer(const T *lhs, const T *rhs, npy_bool equal_nan) { + return *lhs == *rhs; +} + +template +int equal_complex(const T *lhs, const T *rhs, npy_bool equal_nan) { + S lhs_real = real(*lhs); + S lhs_imag = imag(*lhs); + int lhs_isnan = npy_isnan(lhs_real) || npy_isnan(lhs_imag); + S rhs_real = real(*rhs); + S rhs_imag = imag(*rhs); + int rhs_isnan = npy_isnan(rhs_real) || npy_isnan(rhs_imag); + + if (lhs_isnan && rhs_isnan) { + return equal_nan; + } + if (lhs_isnan || rhs_isnan) { + return false; + } + // Now both lhs and rhs are not NaN. + return (lhs_real == rhs_real) && (lhs_imag == rhs_imag); +} + +template +void copy_integer(char *data, T *value) { + std::copy_n(value, 1, (T *)data); + return; +} + +template < + typename S, + typename T, + S (*real)(T), + S (*imag)(T), + void (*setreal)(T *, const S), + void (*setimag)(T *, const S) +> +void copy_complex(char *data, T *value) { + setreal((T *)data, real(*value)); + setimag((T *)data, imag(*value)); + return; +} + +template < + typename T, + size_t (*hash_func)(const T *, npy_bool), + int (*equal_func)(const T *, const T *, npy_bool), + void (*copy_func)(char *, T *) +> static PyObject* -unique_integer(PyArrayObject *self, npy_bool equal_nan) +unique_numeric(PyArrayObject *self, npy_bool equal_nan) { /* - * Returns a new NumPy array containing the unique values of the input array of integer. + * Returns a new NumPy array containing the unique values of the input array of numeric (integer or complex). * This function uses hashing to identify uniqueness efficiently. */ NPY_ALLOW_C_API_DEF; @@ -52,18 +178,27 @@ unique_integer(PyArrayObject *self, npy_bool equal_nan) // number of elements in the input array npy_intp isize = PyArray_SIZE(self); + auto hash = [equal_nan](const T *value) -> size_t { + return hash_func(value, equal_nan); + }; + auto equal = [equal_nan](const T *lhs, const T *rhs) -> bool { + return equal_func(lhs, rhs, equal_nan); + }; + // Reserve hashset capacity in advance to minimize reallocations and collisions. // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: // - Reserving for all elements (isize) may over-allocate when there are few unique values. // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 - std::unordered_set hashset(std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS)); + std::unordered_set hashset( + std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal + ); // Input array is one-dimensional, enabling efficient iteration using strides. char *idata = PyArray_BYTES(self); npy_intp istride = PyArray_STRIDES(self)[0]; for (npy_intp i = 0; i < isize; i++, idata += istride) { - hashset.insert(*(T *)idata); + hashset.insert((T *)idata); } npy_intp length = hashset.size(); @@ -95,7 +230,7 @@ unique_integer(PyArrayObject *self, npy_bool equal_nan) npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; // Output array is one-dimensional, enabling efficient iteration using strides. for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { - *(T *)odata = *it; + copy_func(odata, *it); } return res_obj; @@ -311,25 +446,60 @@ unique_vstring(PyArrayObject *self, npy_bool equal_nan) // this map contains the functions used for each item size. typedef std::function function_type; std::unordered_map unique_funcs = { - {NPY_BYTE, unique_integer}, - {NPY_UBYTE, unique_integer}, - {NPY_SHORT, unique_integer}, - {NPY_USHORT, unique_integer}, - {NPY_INT, unique_integer}, - {NPY_UINT, unique_integer}, - {NPY_LONG, unique_integer}, - {NPY_ULONG, unique_integer}, - {NPY_LONGLONG, unique_integer}, - {NPY_ULONGLONG, unique_integer}, - {NPY_INT8, unique_integer}, - {NPY_INT16, unique_integer}, - {NPY_INT32, unique_integer}, - {NPY_INT64, unique_integer}, - {NPY_UINT8, unique_integer}, - {NPY_UINT16, unique_integer}, - {NPY_UINT32, unique_integer}, - {NPY_UINT64, unique_integer}, - {NPY_DATETIME, unique_integer}, + {NPY_BYTE, unique_numeric, equal_integer, copy_integer>}, + {NPY_UBYTE, unique_numeric, equal_integer, copy_integer>}, + {NPY_SHORT, unique_numeric, equal_integer, copy_integer>}, + {NPY_USHORT, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT, unique_numeric, equal_integer, copy_integer>}, + {NPY_LONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_ULONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_LONGLONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_ULONGLONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_CFLOAT, unique_numeric< + npy_cfloat, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_CDOUBLE, unique_numeric< + npy_cdouble, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_CLONGDOUBLE, unique_numeric< + npy_clongdouble, + hash_complex_clongdouble, + equal_complex, + copy_complex + > + }, + {NPY_INT8, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT16, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT32, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT64, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT8, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT16, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT32, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT64, unique_numeric, equal_integer, copy_integer>}, + {NPY_DATETIME, unique_numeric, equal_integer, copy_integer>}, + {NPY_COMPLEX64, unique_numeric< + npy_complex64, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_COMPLEX128, unique_numeric< + npy_complex128, + hash_complex, + equal_complex, + copy_complex + > + }, {NPY_STRING, unique_string}, {NPY_UNICODE, unique_string}, {NPY_VSTRING, unique_vstring}, diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 86d93a569d98..9d5a4d693fa5 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -726,7 +726,10 @@ def test_unique_1d(self): # test for ticket #2799 aa = [1. + 0.j, 1 - 1.j, 1] - assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) + assert_array_equal( + np.sort(np.unique(aa)), + [1. - 1.j, 1.], + ) # test for ticket #4785 a = [(1, 2), (1, 2), (2, 3)] @@ -761,7 +764,8 @@ def test_unique_1d(self): ua_idx = [2, 0, 1] ua_inv = [1, 2, 0, 2] ua_cnt = [1, 1, 2] - assert_equal(np.unique(a), ua) + # order of unique values is not guaranteed + assert_equal(np.sort(np.unique(a)), np.sort(ua)) assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) @@ -772,7 +776,8 @@ def test_unique_1d(self): ua_idx = [2, 0, 3] ua_inv = [1, 2, 0, 2, 2] ua_cnt = [1, 1, 3] - assert_equal(np.unique(a), ua) + # order of unique values is not guaranteed + assert_equal(np.sort(np.unique(a)), np.sort(ua)) assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) @@ -1199,7 +1204,13 @@ def test_unique_nanequals(self): assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan])) def test_unique_array_api_functions(self): - arr = np.array([np.nan, 1, 4, 1, 3, 4, np.nan, 5, 1]) + arr = np.array( + [ + np.nan, 1.0, 0.0, 4.0, -np.nan, + -0.0, 1.0, 3.0, 4.0, np.nan, + 5.0, -0.0, 1.0, -np.nan, 0.0, + ], + ) for res_unique_array_api, res_unique in [ ( @@ -1226,8 +1237,14 @@ def test_unique_array_api_functions(self): ) ]: assert len(res_unique_array_api) == len(res_unique) + if not isinstance(res_unique_array_api, tuple): + res_unique_array_api = (res_unique_array_api,) + if not isinstance(res_unique, tuple): + res_unique = (res_unique,) + for actual, expected in zip(res_unique_array_api, res_unique): - assert_array_equal(actual, expected) + # Order of output is not guaranteed + assert_equal(np.sort(actual), np.sort(expected)) def test_unique_inverse_shape(self): # Regression test for https://github.com/numpy/numpy/issues/25552 From 32887277983a3e8ff0f24413ba48e6da1f2b4337 Mon Sep 17 00:00:00 2001 From: Abhishek Tiwari Date: Thu, 30 Oct 2025 09:40:22 +0530 Subject: [PATCH 0766/1018] Update doc/release/upcoming_changes/30068.expired.rst Co-authored-by: Joren Hammudoglu --- doc/release/upcoming_changes/30068.expired.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/30068.expired.rst b/doc/release/upcoming_changes/30068.expired.rst index b651371f7061..a5363771c4b7 100644 --- a/doc/release/upcoming_changes/30068.expired.rst +++ b/doc/release/upcoming_changes/30068.expired.rst @@ -1,5 +1,5 @@ ``numpy.array2string`` and ``numpy.sum`` deprecations finalized ---------------------------------------------- +--------------------------------------------------------------- The following long-deprecated APIs have been removed or converted to errors: From dc3549896bdc187d96275534b371c50778b6bf6f Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 30 Oct 2025 07:41:17 +0200 Subject: [PATCH 0767/1018] BLD: use blobless checkout on CircleCI [skip actions][skip azp][skip cirrus] --- .circleci/config.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1760eca727dd..62c29203dac1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -17,7 +17,8 @@ jobs: build: <<: *defaults steps: - - checkout + - checkout: + method: blobless - run: name: check skip @@ -110,7 +111,8 @@ jobs: deploy: <<: *defaults steps: - - checkout + - checkout: + method: blobless - attach_workspace: at: ~/repo From 823c9a85f3c33052beb0292d66319faa1139f431 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 30 Oct 2025 11:25:07 +0100 Subject: [PATCH 0768/1018] ENH: Make FPE blas check a runtime check for all arm systems If an arm system uses SME some BLAS versions just set FPEs spuriously. The culprit is really Accelerate so we might limit this to Mac OS as well (and then with an SME check also -- see changes prior to this commit). However, some OpenBLAS versions also caused this, although OpenBLAS is likely to clear the FPEs on their side. Closes gh-29820 --- numpy/_core/src/common/blas_utils.c | 108 ++++++------------ numpy/_core/src/common/blas_utils.h | 10 +- numpy/_core/src/multiarray/multiarraymodule.c | 19 ++- numpy/testing/_private/utils.py | 9 +- 4 files changed, 59 insertions(+), 87 deletions(-) diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c index 409d3818ae0f..1c2ffdebbcac 100644 --- a/numpy/_core/src/common/blas_utils.c +++ b/numpy/_core/src/common/blas_utils.c @@ -1,6 +1,9 @@ +#include + #include "numpy/npy_math.h" // npy_get_floatstatus_barrier #include "numpy/numpyconfig.h" // NPY_VISIBILITY_HIDDEN #include "blas_utils.h" +#include "npy_cblas.h" #include #include @@ -11,92 +14,50 @@ #endif #if NPY_BLAS_CHECK_FPE_SUPPORT - -/* Return whether we're running on macOS 15.4 or later +/* + * Static variable to cache runtime check of BLAS FPE support. */ -static inline bool -is_macOS_version_15_4_or_later(void){ -#if !defined(__APPLE__) - return false; -#else - char *osProductVersion = NULL; - size_t size = 0; - bool ret = false; - - // Query how large OS version string should be - if(-1 == sysctlbyname("kern.osproductversion", NULL, &size, NULL, 0)){ - goto cleanup; - } - - osProductVersion = malloc(size + 1); - - // Get the OS version string - if(-1 == sysctlbyname("kern.osproductversion", osProductVersion, &size, NULL, 0)){ - goto cleanup; - } - - osProductVersion[size] = '\0'; - - // Parse the version string - int major = 0, minor = 0; - if(2 > sscanf(osProductVersion, "%d.%d", &major, &minor)) { - goto cleanup; - } - - if (major > 15 || (major == 15 && minor >= 4)) { - ret = true; - } + static bool blas_supports_fpe = true; -cleanup: - if(osProductVersion){ - free(osProductVersion); - } - - return ret; -#endif -} - -/* ARM Scalable Matrix Extension (SME) raises all floating-point error flags +/* + * ARM Scalable Matrix Extension (SME) raises all floating-point error flags * when it's used regardless of values or operations. As a consequence, * when SME is used, all FPE state is lost and special handling is needed. * * For NumPy, SME is not currently used directly, but can be used via * BLAS / LAPACK libraries. This function does a runtime check for whether * BLAS / LAPACK can use SME and special handling around FPE is required. + * + * This may be an Accelerate bug (at least OpenBLAS consider it that way) + * but when we find an ARM system with SVE we do a runtime check for whether + * FPEs are spuriously given. */ -static inline bool -BLAS_can_use_ARM_SME(void) +static inline int +set_BLAS_causes_spurious_FPEs(void) { -#if defined(__APPLE__) && defined(__aarch64__) && defined(ACCELERATE_NEW_LAPACK) - // ARM SME can be used by Apple's Accelerate framework for BLAS / LAPACK - // - macOS 15.4+ - // - Apple silicon M4+ - - // Does OS / Accelerate support ARM SME? - if(!is_macOS_version_15_4_or_later()){ - return false; + // These are all small, so just work on stack to not worry about error + // handling. + double *x = PyMem_Malloc(20*20*3*sizeof(double)); + if (x == NULL) { + PyErr_NoMemory(); + return -1; } + double *y = x + 20*20; + double *res = y + 20*20; - // Does hardware support SME? - int has_SME = 0; - size_t size = sizeof(has_SME); - if(-1 == sysctlbyname("hw.optional.arm.FEAT_SME", &has_SME, &size, NULL, 0)){ - return false; - } + npy_clear_floatstatus_barrier((char *)x); - if(has_SME){ - return true; - } -#endif + CBLAS_FUNC(cblas_dgemm)( + CblasRowMajor, CblasNoTrans, CblasNoTrans, 20, 20, 20, 1., + x, 20, y, 20, 0., res, 20); + PyMem_Free(x); - // default assume SME is not used - return false; + int fpe_status = npy_get_floatstatus_barrier((char *)x); + // Entries were all zero, so we shouldn't see any FPEs + blas_supports_fpe = fpe_status != 0; + return 0; } -/* Static variable to cache runtime check of BLAS FPE support. - */ -static bool blas_supports_fpe = true; - #endif // NPY_BLAS_CHECK_FPE_SUPPORT @@ -110,19 +71,20 @@ npy_blas_supports_fpe(void) #endif } -NPY_VISIBILITY_HIDDEN void +NPY_VISIBILITY_HIDDEN int npy_blas_init(void) { #if NPY_BLAS_CHECK_FPE_SUPPORT - blas_supports_fpe = !BLAS_can_use_ARM_SME(); + return set_BLAS_causes_spurious_FPEs(); #endif + return 0; } NPY_VISIBILITY_HIDDEN int npy_get_floatstatus_after_blas(void) { #if NPY_BLAS_CHECK_FPE_SUPPORT - if(!blas_supports_fpe){ + if (!blas_supports_fpe){ // BLAS does not support FPE and we need to return FPE state. // Instead of clearing and then grabbing state, just return // that no flags are set. diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h index 34d6321c2920..115e60576557 100644 --- a/numpy/_core/src/common/blas_utils.h +++ b/numpy/_core/src/common/blas_utils.h @@ -2,10 +2,14 @@ #include -/* NPY_BLAS_CHECK_FPE_SUPPORT controls whether we need a runtime check +/* + * NPY_BLAS_CHECK_FPE_SUPPORT controls whether we need a runtime check * for floating-point error (FPE) support in BLAS. + * The known culprit right now is SVM likely only on mac, but that is not + * quite clear. + * This checks always on all ARM (it is a small check overall). */ -#if defined(__APPLE__) && defined(__aarch64__) && defined(ACCELERATE_NEW_LAPACK) +#if defined(__aarch64__) #define NPY_BLAS_CHECK_FPE_SUPPORT 1 #else #define NPY_BLAS_CHECK_FPE_SUPPORT 0 @@ -13,7 +17,7 @@ /* Initialize BLAS environment, if needed */ -NPY_VISIBILITY_HIDDEN void +NPY_VISIBILITY_HIDDEN int npy_blas_init(void); /* Runtime check if BLAS supports floating-point errors. diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 4ab3f5bae02c..44ac8a678bbb 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4448,6 +4448,17 @@ _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) } +static PyObject * +_blas_supports_fpe(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { + if (npy_blas_supports_fpe()) { + Py_RETURN_TRUE; + } + else { + Py_RETURN_FALSE; + } +} + + static PyObject * _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { #if !defined(PYPY_VERSION) @@ -4688,6 +4699,8 @@ static struct PyMethodDef array_module_methods[] = { METH_NOARGS, NULL}, {"_set_madvise_hugepage", (PyCFunction)_set_madvise_hugepage, METH_O, NULL}, + {"_blas_supports_fpe", (PyCFunction)_blas_supports_fpe, + METH_NOARGS, NULL}, {"_reload_guard", (PyCFunction)_reload_guard, METH_NOARGS, "Give a warning on reload and big warning in sub-interpreters."}, @@ -4904,9 +4917,9 @@ _multiarray_umath_exec(PyObject *m) { return -1; } -#if NPY_BLAS_CHECK_FPE_SUPPORT - npy_blas_init(); -#endif + if (npy_blas_init() < 0) { + return -1; + } #if defined(MS_WIN64) && defined(__GNUC__) PyErr_WarnEx(PyExc_Warning, diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 9be98f9d2fbe..967d67e14a13 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -90,14 +90,7 @@ class KnownFailureException(Exception): IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON -BLAS_SUPPORTS_FPE = True -if platform.system() == 'Darwin' or platform.machine() == 'arm64': - try: - blas = np.__config__.CONFIG['Build Dependencies']['blas'] - if blas['name'] == 'accelerate': - BLAS_SUPPORTS_FPE = False - except KeyError: - pass +BLAS_SUPPORTS_FPE = np._core._multiarray_umath._blas_supports_fpe() HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 From 7bc099dc9f3ce73d08caf001af57cb825691fa81 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 30 Oct 2025 11:48:53 +0100 Subject: [PATCH 0769/1018] Guard for HAVE_CBLAS and always compile the helpers --- numpy/_core/meson.build | 2 +- numpy/_core/src/common/blas_utils.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index dc07586bcf8e..6dcbaea0cf1a 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1109,6 +1109,7 @@ endforeach # ------------------------------ src_multiarray_umath_common = [ 'src/common/array_assign.c', + 'src/common/blas_utils.c', 'src/common/gil_utils.c', 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', @@ -1123,7 +1124,6 @@ src_multiarray_umath_common = [ ] if have_blas src_multiarray_umath_common += [ - 'src/common/blas_utils.c', 'src/common/cblasfuncs.c', 'src/common/python_xerbla.c', ] diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h index 115e60576557..3f5bb735281d 100644 --- a/numpy/_core/src/common/blas_utils.h +++ b/numpy/_core/src/common/blas_utils.h @@ -9,7 +9,7 @@ * quite clear. * This checks always on all ARM (it is a small check overall). */ -#if defined(__aarch64__) +#if defined(__aarch64__) && defined(HAVE_CBLAS) #define NPY_BLAS_CHECK_FPE_SUPPORT 1 #else #define NPY_BLAS_CHECK_FPE_SUPPORT 0 From fbd08f47676bea39aae58fe7b89dd72642be4644 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 30 Oct 2025 12:31:19 +0100 Subject: [PATCH 0770/1018] ooops, needs to be a calloc of course. --- numpy/_core/src/common/blas_utils.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c index 1c2ffdebbcac..365289067b88 100644 --- a/numpy/_core/src/common/blas_utils.c +++ b/numpy/_core/src/common/blas_utils.c @@ -37,13 +37,13 @@ set_BLAS_causes_spurious_FPEs(void) { // These are all small, so just work on stack to not worry about error // handling. - double *x = PyMem_Malloc(20*20*3*sizeof(double)); + double *x = PyMem_Calloc(20 * 20 * 3, sizeof(double)); if (x == NULL) { PyErr_NoMemory(); return -1; } - double *y = x + 20*20; - double *res = y + 20*20; + double *y = x + 20 * 20; + double *res = y + 20 * 20; npy_clear_floatstatus_barrier((char *)x); From 56243c991894dfbae2a6f9142927441e98c0f3f3 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 30 Oct 2025 13:41:35 +0200 Subject: [PATCH 0771/1018] BLD: update scipy-openblas, use -Dpkg_config_path (#30049) --- .github/workflows/linux_blas.yml | 5 +---- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index c282365ab012..7b7647044002 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -89,16 +89,13 @@ jobs: fi mkdir -p ./.openblas python -c"import scipy_openblas32 as ob32; print(ob32.get_pkg_config())" > ./.openblas/scipy-openblas.pc - echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $GITHUB_ENV - ld_library_path=$(python -c"import scipy_openblas32 as ob32; print(ob32.get_lib_dir())") - echo "LD_LIBRARY_PATH=$ld_library_path" >> $GITHUB_ENV - name: Build shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: TERM: xterm-256color run: - spin build -- --werror -Dallow-noblas=false + spin build -- --werror -Dallow-noblas=false -Dpkg_config_path=${PWD}/.openblas - name: Check build-internal dependencies run: diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 4cb678d5d047..19e6b119b73f 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin==0.15 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.30.0.1 +scipy-openblas32==0.3.30.0.6 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index aab147bf8c17..605132ee720a 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin==0.15 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.30.0.1 -scipy-openblas64==0.3.30.0.1 +scipy-openblas32==0.3.30.0.6 +scipy-openblas64==0.3.30.0.6 From 2f291c2899d59a974a96a875c1f4ffd7c7215a00 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Thu, 30 Oct 2025 13:27:27 -0400 Subject: [PATCH 0772/1018] TST: Add thread-safe testing guidelines (#30101) * DOC: Add parallel testing guidelines * DOC: Fix code example * STY: Fix header length --- doc/TESTS.rst | 97 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 71 insertions(+), 26 deletions(-) diff --git a/doc/TESTS.rst b/doc/TESTS.rst index b74dc700b1b9..e57cf7711da8 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -88,8 +88,17 @@ To run the test suite in multiple threads:: $ spin test -p 4 # run each test under 4 threads $ spin test -p auto -- --skip-thread-unsafe=true # run ONLY tests that are thread-safe -When you write new tests (see below), it is worth testing to make sure they do not fail -under ``pytest-run-parallel``, since the CI jobs makes use of it. +When you write new tests, it is worth testing to make sure they do not fail +under ``pytest-run-parallel``, since the CI jobs make use of it. Some tips on how to +write thread-safe tests can be found `here <#writing-thread-safe-tests>`_. + +.. note:: + + Ideally you should run ``pytest-run-parallel`` using a `free-threaded build of Python + `_ that is 3.14 or + higher. If you decide to use a version of Python that is not free-threaded, you will + need to set the environment variables ``PYTHON_CONTEXT_AWARE_WARNINGS`` and + ``PYTHON_THREAD_INHERIT_CONTEXT`` to 1. Running doctests ---------------- @@ -225,36 +234,34 @@ Similarly for methods:: def test_simple(self): assert_(zzz() == 'Hello from zzz') -Easier setup and teardown functions / methods ---------------------------------------------- - -Testing looks for module-level or class method-level setup and teardown -functions by name; thus:: - - def setup_module(): - """Module-level setup""" - print('doing setup') - - def teardown_module(): - """Module-level teardown""" - print('doing teardown') +Setup and teardown methods +-------------------------- +NumPy originally used xunit setup and teardown, a feature of `pytest`. We now encourage +the usage of setup and teardown methods that are called explicitly by the tests that +need them:: class TestMe: - def setup_method(self): - """Class-level setup""" + def setup(self): print('doing setup') + return 1 - def teardown_method(): - """Class-level teardown""" + def teardown(self): print('doing teardown') + def test_xyz(self): + x = self.setup() + assert x == 1 + self.teardown() + +This approach is thread-safe, ensuring tests can run under ``pytest-run-parallel``. +Using pytest setup fixtures (such as xunit setup methods) is generally not thread-safe +and will likely cause thread-safety test failures. -Setup and teardown functions to functions and methods are known as "fixtures", -and they should be used sparingly. ``pytest`` supports more general fixture at various scopes which may be used -automatically via special arguments. For example, the special argument name -``tmpdir`` is used in test to create a temporary directory. +automatically via special arguments. For example, the special argument name +``tmp_path`` is used in tests to create temporary directories. However, +fixtures should be used sparingly. Parametric tests ---------------- @@ -397,9 +404,9 @@ Tests on random data Tests on random data are good, but since test failures are meant to expose new bugs or regressions, a test that passes most of the time but fails occasionally with no code changes is not helpful. Make the random data -deterministic by setting the random number seed before generating it. Use -either Python's ``random.seed(some_number)`` or NumPy's -``numpy.random.seed(some_number)``, depending on the source of random numbers. +deterministic by setting the random number seed before generating it. +Use ``rng = numpy.random.RandomState(some_number)`` to set a seed on a +local instance of `numpy.random.RandomState`. Alternatively, you can use `Hypothesis`_ to generate arbitrary data. Hypothesis manages both Python's and Numpy's random seeds for you, and @@ -410,6 +417,44 @@ The advantages over random generation include tools to replay and share failures without requiring a fixed seed, reporting *minimal* examples for each failure, and better-than-naive-random techniques for triggering bugs. +Writing thread-safe tests +------------------------- + +Writing thread-safe tests may require some trial-and-error. Generally you should +follow the guidelines stated so far, especially when it comes to `setup methods +<#setup-and-teardown-methods>`_ and `seeding random data <#tests-on-random-data>`_. +Explicit setup and the usage of local RNG are thread-safe practices. Here are tips +for some other common problems you may run into. + +Using ``pytest.mark.parametrize`` may occasionally cause thread-safety issues. +To fix this, you can use ``copy()``:: + + @pytest.mark.parametrize('dimensionality', [3, 10, 25]) + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_solve(dimensionality, dtype): + dimen = dimensionality.copy() + d = dtype.copy() + # use these copied variables instead + ... + +If you are testing something that is inherently thread-unsafe, you can label your +test with ``pytest.mark.thread_unsafe`` so that it will run under a single thread +and not cause test failures:: + + @pytest.mark.thread_unsafe(reason="reason this test is thread-unsafe") + def test_thread_unsafe(): + ... + +Some examples of what should be labeled as thread-unsafe: + +- Usage of ``sys.stdout`` and ``sys.stderr`` +- Mutation of global data, like docstrings, modules, garbage collectors, etc. +- Tests that require a lot of memory, since they could cause crashes. + +Additionally, some ``pytest`` fixtures are thread-unsafe, such as ``monkeypatch`` and +``capsys``. However, ``pytest-run-parallel`` will automatically mark these as +thread-unsafe if you decide to use them. Some fixtures have been patched to be +thread-safe, like ``tmp_path``. Documentation for ``numpy.test`` -------------------------------- From d8eb2256ee4f21c90e8498c6008e08c960b3d174 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Oct 2025 13:00:33 -0600 Subject: [PATCH 0773/1018] MAINT: Bump github/codeql-action from 4.31.0 to 4.31.2 (#30106) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.0 to 4.31.2. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/4e94bd11f71e507f7f87df81788dff88d1dacbfb...0499de31b99561a6d14a36a5f662c2a54f91beee) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index b147f3e78c1a..45b88b851312 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0 + uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0 + uses: github/codeql-action/autobuild@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0 + uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index e0d5de712c0f..0a65e932b12d 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v2.1.27 + uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # v2.1.27 with: sarif_file: results.sarif From 40f5f6fff96934999850337e9212f2b5470513fe Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 30 Oct 2025 16:20:30 +0100 Subject: [PATCH 0774/1018] BUG: ufunc method signatures Co-authored-by: Sebastian Berg --- numpy/_core/_add_newdocs.py | 28 +++++++++++++++++++++++-- numpy/_core/function_base.py | 16 +++++++------- numpy/_core/tests/test_function_base.py | 2 +- numpy/_core/tests/test_ufunc.py | 16 ++++++++++++++ 4 files changed, 52 insertions(+), 10 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 80f82736aeaf..98bdec4e3c74 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -4799,7 +4799,7 @@ add_newdoc('numpy._core.multiarray', 'get_handler_name', """ - get_handler_name(a: ndarray) -> str,None + get_handler_name(a: ndarray) -> str | None Return the name of the memory handler used by `a`. If not provided, return the name of the memory handler that will be used to allocate data for the @@ -5171,6 +5171,9 @@ add_newdoc('numpy._core', 'ufunc', ('reduce', """ + reduce($self, array, /, axis=0, dtype=None, out=None, **kwargs) + -- + reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=, where=True) Reduces `array`'s dimension by one, by applying ufunc along one axis. @@ -5297,6 +5300,9 @@ add_newdoc('numpy._core', 'ufunc', ('accumulate', """ + accumulate($self, array, /, axis=0, dtype=None, out=None) + -- + accumulate(array, axis=0, dtype=None, out=None) Accumulate the result of applying the operator to all elements. @@ -5375,6 +5381,9 @@ add_newdoc('numpy._core', 'ufunc', ('reduceat', """ + reduceat($self, array, /, indices, axis=0, dtype=None, out=None) + -- + reduceat(array, indices, axis=0, dtype=None, out=None) Performs a (local) reduce with specified slices over a single axis. @@ -5483,6 +5492,9 @@ add_newdoc('numpy._core', 'ufunc', ('outer', r""" + outer($self, A, B, /, **kwargs) + -- + outer(A, B, /, **kwargs) Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. @@ -5554,6 +5566,9 @@ add_newdoc('numpy._core', 'ufunc', ('at', """ + at($self, a, indices, b=None, /) + -- + at(a, indices, b=None, /) Performs unbuffered in place operation on operand 'a' for elements @@ -5605,6 +5620,9 @@ add_newdoc('numpy._core', 'ufunc', ('resolve_dtypes', """ + resolve_dtypes($self, dtypes, *, signature=None, casting=None, reduction=False) + -- + resolve_dtypes(dtypes, *, signature=None, casting=None, reduction=False) Find the dtypes NumPy will use for the operation. Both input and @@ -5677,6 +5695,9 @@ add_newdoc('numpy._core', 'ufunc', ('_resolve_dtypes_and_context', """ + _resolve_dtypes_and_context($self, dtypes, *, signature=None, casting=None, reduction=False) + -- + _resolve_dtypes_and_context(dtypes, *, signature=None, casting=None, reduction=False) See `numpy.ufunc.resolve_dtypes` for parameter information. This @@ -5700,6 +5721,9 @@ add_newdoc('numpy._core', 'ufunc', ('_get_strided_loop', """ + _get_strided_loop($self, call_info, /, *, fixed_strides=None) + -- + _get_strided_loop(call_info, /, *, fixed_strides=None) This function fills in the ``call_info`` capsule to include all @@ -6453,7 +6477,7 @@ add_newdoc('numpy._core.multiarray', 'dtype', ('__gt__', """ - __ge__(value, /) + __gt__(value, /) Return ``self > value``. diff --git a/numpy/_core/function_base.py b/numpy/_core/function_base.py index 12ab2a7ef546..7fcc6b4f770a 100644 --- a/numpy/_core/function_base.py +++ b/numpy/_core/function_base.py @@ -1,4 +1,5 @@ import functools +import inspect import operator import types import warnings @@ -477,6 +478,9 @@ def _add_docstring(obj, doc, warn_on_python): "Prefer to attach it directly to the source.", UserWarning, stacklevel=3) + + doc = inspect.cleandoc(doc) + try: add_docstring(obj, doc) except Exception: @@ -494,10 +498,10 @@ def add_newdoc(place, obj, doc, warn_on_python=True): ---------- place : str The absolute name of the module to import from - obj : str or None + obj : str | None The name of the object to add documentation to, typically a class or function name. - doc : {str, Tuple[str, str], List[Tuple[str, str]]} + doc : str | tuple[str, str] | list[tuple[str, str]] If a string, the documentation to apply to `obj` If a tuple, then the first element is interpreted as an attribute @@ -534,12 +538,10 @@ def add_newdoc(place, obj, doc, warn_on_python=True): if isinstance(doc, str): if "${ARRAY_FUNCTION_LIKE}" in doc: doc = overrides.get_array_function_like_doc(new, doc) - _add_docstring(new, doc.strip(), warn_on_python) + _add_docstring(new, doc, warn_on_python) elif isinstance(doc, tuple): attr, docstring = doc - _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) + _add_docstring(getattr(new, attr), docstring, warn_on_python) elif isinstance(doc, list): for attr, docstring in doc: - _add_docstring( - getattr(new, attr), docstring.strip(), warn_on_python - ) + _add_docstring(getattr(new, attr), docstring, warn_on_python) diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index 3a8552de2d36..c6e10397b3ff 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -492,7 +492,7 @@ def test_add_doc(self): # test that np.add_newdoc did attach a docstring successfully: tgt = "Current flat index into the array." assert_equal(np._core.flatiter.index.__doc__[:len(tgt)], tgt) - assert_(len(np._core.ufunc.identity.__doc__) > 300) + assert_(len(np._core.ufunc.identity.__doc__) > 250) assert_(len(np.lib._index_tricks_impl.mgrid.__doc__) > 300) @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 0211449c577f..a29fae0539e4 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1,4 +1,5 @@ import ctypes as ct +import inspect import itertools import pickle import sys @@ -2976,6 +2977,21 @@ def test_ufunc_input_floatingpoint_error(bad_offset): np.add(arr, arr, dtype=np.intp, casting="unsafe") +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "methodname", + ["__call__", "accumulate", "at", "outer", "reduce", "reduceat", "resolve_dtypes"], +) +def test_ufunc_method_signatures(methodname: str): + method = getattr(np.ufunc, methodname) + + try: + _ = inspect.signature(method) + except ValueError as e: + pytest.fail(e.args[0]) + + def test_trivial_loop_invalid_cast(): # This tests the fast-path "invalid cast", see gh-19904. with pytest.raises(TypeError, From 9d2f82eda826f952d06c971803fd6d004f6f33ec Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 30 Oct 2025 21:48:27 +0100 Subject: [PATCH 0775/1018] TYP: update the `ufunc` method stubs to match their new signatures --- numpy/__init__.pyi | 43 +- numpy/_typing/_ufunc.pyi | 487 +++++++++++----------- numpy/typing/tests/data/reveal/ufuncs.pyi | 38 +- 3 files changed, 304 insertions(+), 264 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 86cae3b3e48f..6f4163626c3c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -6,7 +6,7 @@ import ctypes as ct import array as _array import datetime as dt from abc import abstractmethod -from types import EllipsisType, GetSetDescriptorType, ModuleType, TracebackType, MappingProxyType, GenericAlias +from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias from decimal import Decimal from fractions import Fraction from uuid import UUID @@ -5671,7 +5671,7 @@ class ufunc: @property def __name__(self) -> LiteralString: ... @property - def __qualname__(self) -> LiteralString: ... + def __qualname__(self) -> LiteralString: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def __doc__(self) -> str: ... # type: ignore[override] @property @@ -5700,18 +5700,43 @@ class ufunc: @property def signature(self) -> LiteralString | None: ... - def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + def __call__(self, /, *args: Any, **kwargs: Any) -> Any: ... + # The next four methods will always exist, but they will just # raise a ValueError ufuncs with that don't accept two input # arguments and return one output argument. Because of that we # can't type them very precisely. - def reduce(self, /, *args: Any, **kwargs: Any) -> Any: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... - def outer(self, *args: Any, **kwargs: Any) -> Any: ... - # Similarly at won't be defined for ufuncs that return multiple + def accumulate( + self, + array: ArrayLike, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + **kwargs: Incomplete, + ) -> Incomplete: ... + def reduceat( + self, + array: ArrayLike, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def outer(self, A: ArrayLike, B: ArrayLike, /, **kwargs: Incomplete) -> NDArray[Incomplete]: ... + + # Similarly `at` won't be defined for ufuncs that return multiple # outputs, so we can't type it very precisely. - def at(self, /, *args: Any, **kwargs: Any) -> None: ... + def at(self, a: ndarray, indices: _ArrayLikeInt_co, b: ArrayLike | None = None, /) -> None: ... # def resolve_dtypes( diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index bcb423e58110..9d3fa0e5335c 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -6,12 +6,14 @@ four private subclasses, one for each combination of `~ufunc.nin` and `~ufunc.nout`. """ # noqa: PYI021 +from _typeshed import Incomplete from types import EllipsisType from typing import ( Any, Generic, Literal, LiteralString, + Never, NoReturn, Protocol, SupportsIndex, @@ -49,7 +51,7 @@ _Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) _NIn = TypeVar("_NIn", bound=int, covariant=True) _NOut = TypeVar("_NOut", bound=int, covariant=True) _ReturnType_co = TypeVar("_ReturnType_co", covariant=True) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) @type_check_only class _SupportsArrayUFunc(Protocol): @@ -69,6 +71,11 @@ class _UFunc3Kwargs(TypedDict, total=False): subok: bool signature: _3Tuple[str | None] | str | None +@type_check_only +class _ReduceKwargs(TypedDict, total=False): + initial: Incomplete # = + where: _ArrayLikeBool_co | None # = True + # NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for # ufuncs that don't accept two input arguments and return one output argument. # In such cases the respective methods return `NoReturn` @@ -86,7 +93,7 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -105,62 +112,57 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i self, x1: _ScalarLike_co, /, - out: EllipsisType | None = ..., + out: None = None, *, - where: _ArrayLikeBool_co | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., - ) -> Any: ... + ) -> Incomplete: ... @overload def __call__( self, x1: ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, - where: _ArrayLikeBool_co | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload def __call__( self, x1: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, - where: _ArrayLikeBool_co | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., - ) -> Any: ... + ) -> Incomplete: ... - def at( - self, - a: _SupportsArrayUFunc, - indices: _ArrayLikeInt_co, - /, - ) -> None: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] - def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... - def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... - def outer(self, *args: object, **kwargs: object) -> NoReturn: ... + def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -184,121 +186,141 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> Any: ... + ) -> Incomplete: ... @overload # (array-like, array) -> array def __call__( self, x1: ArrayLike, - x2: NDArray[Any], + x2: np.ndarray, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array, array-like) -> array def __call__( self, - x1: NDArray[Any], + x1: np.ndarray, x2: ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like, out=array) -> array def __call__( self, x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]], + out: np.ndarray | tuple[np.ndarray], *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like) -> array | scalar def __call__( self, x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any] | Any: ... + ) -> NDArray[Incomplete] | Incomplete: ... - def at( + def accumulate( self, - a: NDArray[Any], - indices: _ArrayLikeInt_co, - b: ArrayLike, + array: ArrayLike, /, - ) -> None: ... + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + @overload # type: ignore[override] + def reduce( # out=None (default), keepdims=False (default) + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: Literal[False] = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> Incomplete: ... + @overload # out=ndarray or out=... def reduce( self, array: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: NDArray[Any] | EllipsisType | None = ..., - keepdims: bool = ..., - initial: Any = ..., - where: _ArrayLikeBool_co = ..., - ) -> Any: ... - - def accumulate( + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: np.ndarray | EllipsisType, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[Incomplete]: ... + @overload # keepdims=True + def reduce( self, array: ArrayLike, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: NDArray[Any] | EllipsisType | None = ..., - ) -> NDArray[Any]: ... + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + *, + keepdims: Literal[True], + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[Incomplete]: ... def reduceat( self, array: ArrayLike, + /, indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: NDArray[Any] | EllipsisType | None = ..., - ) -> NDArray[Any]: ... + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... - @overload # (scalar, scalar) -> scalar - def outer( + @overload # type: ignore[override] + def outer( # (scalar, scalar) -> scalar self, A: _ScalarLike_co, B: _ScalarLike_co, /, *, - out: EllipsisType | None = None, + out: None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> Any: ... + ) -> Incomplete: ... @overload # (array-like, array) -> array def outer( self, A: ArrayLike, - B: NDArray[Any], + B: np.ndarray, /, *, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array, array-like) -> array def outer( self, - A: NDArray[Any], + A: np.ndarray, B: ArrayLike, /, *, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like, out=array) -> array def outer( self, @@ -306,10 +328,10 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[Any] | tuple[NDArray[Any]], + out: np.ndarray | tuple[np.ndarray] | EllipsisType, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like) -> array | scalar def outer( self, @@ -317,17 +339,25 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, + out: None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any] | Any: ... + ) -> NDArray[Incomplete] | Incomplete: ... + + def at( # type: ignore[override] + self, + a: np.ndarray | _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... @type_check_only class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -350,58 +380,58 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i /, *, out: EllipsisType | None = ..., - where: _ArrayLikeBool_co | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., - ) -> _2Tuple[Any]: ... + ) -> _2Tuple[Incomplete]: ... @overload def __call__( self, x1: ArrayLike, - out1: NDArray[Any] | EllipsisType | None = ..., - out2: NDArray[Any] | None = ..., + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., /, *, - out: _2Tuple[NDArray[Any]] | EllipsisType = ..., - where: _ArrayLikeBool_co | None = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., - ) -> _2Tuple[NDArray[Any]]: ... + ) -> _2Tuple[NDArray[Incomplete]]: ... @overload def __call__( self, x1: _SupportsArrayUFunc, - out1: NDArray[Any] | EllipsisType | None = ..., - out2: NDArray[Any] | None = ..., + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., /, *, - out: _2Tuple[NDArray[Any]] | EllipsisType = ..., - where: _ArrayLikeBool_co | None = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., - ) -> _2Tuple[Any]: ... + ) -> _2Tuple[Incomplete]: ... - def at(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... - def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... - def outer(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -425,43 +455,43 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i /, *, out: EllipsisType | None = ..., - where: _ArrayLikeBool_co | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _4Tuple[str | None] = ..., - ) -> _2Tuple[Any]: ... + ) -> _2Tuple[Incomplete]: ... @overload def __call__( self, x1: ArrayLike, x2: ArrayLike, - out1: NDArray[Any] | EllipsisType | None = ..., - out2: NDArray[Any] | None = ..., + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., /, *, - out: _2Tuple[NDArray[Any]] | EllipsisType = ..., - where: _ArrayLikeBool_co | None = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _4Tuple[str | None] = ..., - ) -> _2Tuple[NDArray[Any]]: ... + ) -> _2Tuple[NDArray[Incomplete]]: ... - def at(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... - def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... - def outer(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -482,36 +512,36 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] x1: ArrayLike, x2: ArrayLike, /, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, *, + dtype: DTypeLike | None = None, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., - ) -> Any: ... + ) -> Incomplete: ... @overload def __call__( self, x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType, + out: np.ndarray | tuple[np.ndarray] | EllipsisType, *, + dtype: DTypeLike | None = None, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... - def at(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... - def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... - def outer(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): @@ -569,7 +599,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _ScalarLike_co, /, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co: ... @overload @@ -577,7 +607,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: ArrayLike, /, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -593,15 +623,16 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> Any: ... + ) -> Incomplete: ... - def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, /) -> None: ... - def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + + def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] @@ -624,7 +655,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -651,151 +682,127 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _SupportsArrayUFunc, x2: _SupportsArrayUFunc | ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... @overload def __call__( self, x1: ArrayLike, x2: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... - def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, b: ArrayLike, /) -> None: ... - - @overload - def reduce( + @overload # type: ignore[override] + def accumulate( self, + array: ArrayLike, /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + ) -> NDArray[np.object_]: ... + @overload + def accumulate( + self, array: ArrayLike, - axis: _ShapeLike | None, - dtype: DTypeLike | None, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + *, out: _ArrayT, - keepdims: bool = ..., - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., ) -> _ArrayT: ... - @overload - def reduce( + + @overload # type: ignore[override] + def reduce( # out=array self, - /, array: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, *, out: _ArrayT | tuple[_ArrayT], - keepdims: bool = ..., - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], ) -> _ArrayT: ... - @overload + @overload # out=... def reduce( self, + array: ArrayLike, /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: EllipsisType, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[np.object_]: ... + @overload # keepdims=True + def reduce( + self, array: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: EllipsisType | None = ..., + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, *, keepdims: Literal[True], - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., + **kwargs: Unpack[_ReduceKwargs], ) -> NDArray[np.object_]: ... @overload def reduce( self, - /, array: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: EllipsisType | None = ..., - keepdims: bool = ..., - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], ) -> _ReturnType_co | NDArray[np.object_]: ... - @overload + @overload # type: ignore[override] def reduceat( self, - /, array: ArrayLike, - indices: _ArrayLikeInt_co, - axis: SupportsIndex, - dtype: DTypeLike | None, - out: _ArrayT, - ) -> _ArrayT: ... - @overload - def reduceat( - self, /, - array: ArrayLike, indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, *, out: _ArrayT | tuple[_ArrayT], ) -> _ArrayT: ... @overload def reduceat( self, - /, array: ArrayLike, + /, indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: EllipsisType | None = ..., + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, ) -> NDArray[np.object_]: ... @overload def reduceat( self, - /, array: _SupportsArrayUFunc, - indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., - ) -> Any: ... - - @overload - def accumulate( - self, /, - array: ArrayLike, - axis: SupportsIndex, - dtype: DTypeLike | None, - out: _ArrayT, - ) -> _ArrayT: ... - @overload - def accumulate( - self, - /, - array: ArrayLike, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - *, - out: _ArrayT | tuple[_ArrayT], - ) -> _ArrayT: ... - @overload - def accumulate( - self, - /, - array: ArrayLike, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: EllipsisType | None = ..., - ) -> NDArray[np.object_]: ... + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + ) -> Incomplete: ... - @overload + @overload # type: ignore[override] def outer( self, A: _ScalarLike_co, B: _ScalarLike_co, /, *, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -805,7 +812,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno B: ArrayLike, /, *, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -825,9 +832,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno B: _SupportsArrayUFunc | ArrayLike, /, *, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... @overload def outer( self, @@ -835,9 +842,17 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno B: _SupportsArrayUFunc | ArrayLike, /, *, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... + + def at( # type: ignore[override] + self, + a: np.ndarray | _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... @type_check_only class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # type: ignore[misc] @@ -893,15 +908,15 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> Any: ... + ) -> Incomplete: ... - def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] @type_check_only class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]): # type: ignore[misc] @@ -949,12 +964,12 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: _2PTuple[NDArray[Any]] | EllipsisType | None = ..., + out: _2PTuple[np.ndarray] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> Any: ... + ) -> Incomplete: ... - def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index 0bfe4df9ad8d..eda92f2117c6 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -99,44 +99,44 @@ assert_type(np.bitwise_count(i8), Any) assert_type(np.bitwise_count(AR_i8), npt.NDArray[Any]) def test_absolute_outer_invalid() -> None: - assert_type(np.absolute.outer(), NoReturn) + assert_type(np.absolute.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] def test_frexp_outer_invalid() -> None: - assert_type(np.frexp.outer(), NoReturn) + assert_type(np.frexp.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] def test_divmod_outer_invalid() -> None: - assert_type(np.divmod.outer(), NoReturn) + assert_type(np.divmod.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] def test_matmul_outer_invalid() -> None: - assert_type(np.matmul.outer(), NoReturn) + assert_type(np.matmul.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] def test_absolute_reduceat_invalid() -> None: - assert_type(np.absolute.reduceat(), NoReturn) + assert_type(np.absolute.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] def test_frexp_reduceat_invalid() -> None: - assert_type(np.frexp.reduceat(), NoReturn) + assert_type(np.frexp.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] def test_divmod_reduceat_invalid() -> None: - assert_type(np.divmod.reduceat(), NoReturn) + assert_type(np.divmod.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] def test_matmul_reduceat_invalid() -> None: - assert_type(np.matmul.reduceat(), NoReturn) + assert_type(np.matmul.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] def test_absolute_reduce_invalid() -> None: - assert_type(np.absolute.reduce(), NoReturn) + assert_type(np.absolute.reduce(AR_f8), NoReturn) # type: ignore[arg-type] def test_frexp_reduce_invalid() -> None: - assert_type(np.frexp.reduce(), NoReturn) + assert_type(np.frexp.reduce(AR_f8), NoReturn) # type: ignore[arg-type] def test_divmod_reduce_invalid() -> None: - assert_type(np.divmod.reduce(), NoReturn) + assert_type(np.divmod.reduce(AR_f8), NoReturn) # type: ignore[arg-type] def test_matmul_reduce_invalid() -> None: - assert_type(np.matmul.reduce(), NoReturn) + assert_type(np.matmul.reduce(AR_f8), NoReturn) # type: ignore[arg-type] def test_absolute_accumulate_invalid() -> None: - assert_type(np.absolute.accumulate(), NoReturn) + assert_type(np.absolute.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] def test_frexp_accumulate_invalid() -> None: - assert_type(np.frexp.accumulate(), NoReturn) + assert_type(np.frexp.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] def test_divmod_accumulate_invalid() -> None: - assert_type(np.divmod.accumulate(), NoReturn) + assert_type(np.divmod.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] def test_matmul_accumulate_invalid() -> None: - assert_type(np.matmul.accumulate(), NoReturn) + assert_type(np.matmul.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] def test_frexp_at_invalid() -> None: - assert_type(np.frexp.at(), NoReturn) + assert_type(np.frexp.at(AR_f8, i8), NoReturn) # type: ignore[arg-type] def test_divmod_at_invalid() -> None: - assert_type(np.divmod.at(), NoReturn) + assert_type(np.divmod.at(AR_f8, i8, AR_f8), NoReturn) # type: ignore[arg-type] def test_matmul_at_invalid() -> None: - assert_type(np.matmul.at(), NoReturn) + assert_type(np.matmul.at(AR_f8, i8, AR_f8), NoReturn) # type: ignore[arg-type] From 3150dec424f75b3504a21e7fc990ffbb3075abab Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Thu, 30 Oct 2025 23:00:31 -0400 Subject: [PATCH 0776/1018] DOC: Add a plot to the 'unwrap' docstring. [skip actions] [skip azp] [skip cirrus] --- numpy/lib/_function_base_impl.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 29288388f9ed..44db48f7a00c 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1786,6 +1786,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2 * pi): Examples -------- >>> import numpy as np + >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase @@ -1803,6 +1804,23 @@ def unwrap(p, discont=None, axis=-1, *, period=2 * pi): array([-180., -140., -100., -60., -20., 20., 60., 100., 140., 180., 220., 260., 300., 340., 380., 420., 460., 500., 540.]) + + This example plots the unwrapping of the wrapped input signal `w`. + First generate `w`, then apply `unwrap` to get `u`. + + >>> t = np.linspace(0, 25, 801) + >>> w = np.mod(1.5 * np.sin(1.1 * t + 0.26) * (1 - t / 6 + (t / 23) ** 3), 2.0) - 1 + >>> u = np.unwrap(w, period=2.0) + + Plot `w` and `u`. + + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, w, label='w (a signal wrapped to [-1, 1])') + >>> plt.plot(t, u, linewidth=2.5, alpha=0.5, label='unwrap(w, period=2)') + >>> plt.xlabel('t') + >>> plt.grid(alpha=0.6) + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.show() """ p = asarray(p) nd = p.ndim From e535e3c3b64fd77c7c816b7cb06da67a5434825b Mon Sep 17 00:00:00 2001 From: Abhishek Tiwari Date: Fri, 31 Oct 2025 12:46:53 +0530 Subject: [PATCH 0777/1018] Update numpy/_core/tests/test_arrayprint.py Co-authored-by: Matti Picus --- doc/release/upcoming_changes/30068.expired.rst | 7 +++++-- numpy/_core/tests/test_arrayprint.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/doc/release/upcoming_changes/30068.expired.rst b/doc/release/upcoming_changes/30068.expired.rst index a5363771c4b7..56678465e4b9 100644 --- a/doc/release/upcoming_changes/30068.expired.rst +++ b/doc/release/upcoming_changes/30068.expired.rst @@ -3,6 +3,9 @@ The following long-deprecated APIs have been removed or converted to errors: -* The ``style`` parameter has been removed from ``numpy.array2string``. This argument had no effect since Numpy 1.14.0. +* The ``style`` parameter has been removed from ``numpy.array2string``. + This argument had no effect since Numpy 1.14.0. -* Calling ``np.sum(generator)`` directly on a generator object now raises a `TypeError`. This behavior was deprecated in NumPy 1.15.0. Use ``np.sum(np.fromiter(generator))`` or the python ``sum`` builtin instead. +* Calling ``np.sum(generator)`` directly on a generator object now raises a `TypeError`. + This behavior was deprecated in NumPy 1.15.0. Use ``np.sum(np.fromiter(generator))`` + or the python ``sum`` builtin instead. diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 009a42fdb1b9..06d1306dd408 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -735,7 +735,7 @@ def test_0d_arrays(self): # str is unaffected assert_equal(str(x), "1") - # gh-10934 style was broken in legacy mode, check it works + # check it works np.array2string(np.array(1.), legacy='1.13') def test_float_spacing(self): From 5d92c7ac7953e060bea20803e45cc8810796cb48 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 31 Oct 2025 17:14:18 +0100 Subject: [PATCH 0778/1018] BUG, TYP: Fix ``ma.core._convert2ma`` function signatures (#30099) * BUG: Fix ``ma.core._convert2ma`` function signatures * TYP: Annotate `ma.core.arange` * TYP: Annotate `ma.core.clip` * TYP: Annotate `ma.core.{empty, zeros, ones}` * TYP: Annotate `ma.core.{empty, zeros, ones}_like` * TYP: fix incorrect `ma.empty` call in a type-test * TYP: Annotate `ma.core.identity` * TYP: Annotate `ma.core.indices` * TYP: Annotate `ma.core.squeeze` * TYP: Update stubs for `ma.core._convert2ma` * BUG: Doctest bug workaround --- numpy/ma/core.py | 123 +++-- numpy/ma/core.pyi | 531 ++++++++++++++++++- numpy/ma/tests/test_core.py | 17 + numpy/typing/tests/data/pass/recfunctions.py | 11 +- 4 files changed, 596 insertions(+), 86 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 7803556fb660..95bec501dc73 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -134,6 +134,7 @@ def doc_note(initialdoc, note): return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) +# TODO: remove/deprecate once `ma.extras._fromnxfunction.getdoc` no longer uses it def get_object_signature(obj): """ Get the signature from obj @@ -8740,78 +8741,76 @@ def fromflex(fxarray): return masked_array(fxarray['_data'], mask=fxarray['_mask']) -class _convert2ma: +def _convert2ma(funcname: str, np_ret: str, np_ma_ret: str, + params: dict[str, str] | None = None): + """Convert function from numpy to numpy.ma.""" + func = getattr(np, funcname) + params = params or {} - """ - Convert functions from numpy to numpy.ma. + @functools.wraps(func, assigned=set(functools.WRAPPER_ASSIGNMENTS) - {"__module__"}) + def wrapper(*args, **kwargs): + common_params = kwargs.keys() & params.keys() + extras = params | {p: kwargs.pop(p) for p in common_params} - Parameters - ---------- - _methodname : string - Name of the method to transform. + result = func.__call__(*args, **kwargs).view(MaskedArray) - """ - __doc__ = None + if "fill_value" in common_params: + result.fill_value = extras["fill_value"] + if "hardmask" in common_params: + result._hardmask = bool(extras["hardmask"]) - def __init__(self, funcname, np_ret, np_ma_ret, params=None): - self._func = getattr(np, funcname) - self.__doc__ = self.getdoc(np_ret, np_ma_ret) - self._extras = params or {} + return result - def getdoc(self, np_ret, np_ma_ret): - "Return the doc of the function (from the doc of the method)." - doc = getattr(self._func, '__doc__', None) - sig = get_object_signature(self._func) - if doc: - doc = self._replace_return_type(doc, np_ret, np_ma_ret) - # Add the signature of the function at the beginning of the doc - if sig: - sig = f"{self._func.__name__}{sig}\n" - doc = sig + doc - return doc + # workaround for a doctest bug in Python 3.11 that incorrectly assumes `__code__` + # exists on wrapped functions + del wrapper.__wrapped__ - def _replace_return_type(self, doc, np_ret, np_ma_ret): - """ - Replace documentation of ``np`` function's return type. + # `arange`, `empty`, `empty_like`, `frombuffer`, and `zeros` have no signature + try: + signature = inspect.signature(func) + except ValueError: + signature = inspect.Signature([ + inspect.Parameter('args', inspect.Parameter.VAR_POSITIONAL), + inspect.Parameter('kwargs', inspect.Parameter.VAR_KEYWORD), + ]) + + if params: + sig_params = list(signature.parameters.values()) + + # pop `**kwargs` if present + sig_kwargs = None + if sig_params[-1].kind is inspect.Parameter.VAR_KEYWORD: + sig_kwargs = sig_params.pop() + + # add new keyword-only parameters + for param_name, default in params.items(): + new_param = inspect.Parameter( + param_name, + inspect.Parameter.KEYWORD_ONLY, + default=default, + ) + sig_params.append(new_param) - Replaces it with the proper type for the ``np.ma`` function. + # re-append `**kwargs` if it was present + if sig_kwargs: + sig_params.append(sig_kwargs) - Parameters - ---------- - doc : str - The documentation of the ``np`` method. - np_ret : str - The return type string of the ``np`` method that we want to - replace. (e.g. "out : ndarray") - np_ma_ret : str - The return type string of the ``np.ma`` method. - (e.g. "out : MaskedArray") - """ - if np_ret not in doc: - raise RuntimeError( - f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " - f"The documentation string for return type, {np_ret}, is not " - f"found in the docstring for `np.{self._func.__name__}`. " - f"Fix the docstring for `np.{self._func.__name__}` or " - "update the expected string for return type." - ) + signature = signature.replace(parameters=sig_params) - return doc.replace(np_ret, np_ma_ret) + wrapper.__signature__ = signature - def __call__(self, *args, **params): - # Find the common parameters to the call and the definition - _extras = self._extras - common_params = set(params).intersection(_extras) - # Drop the common parameters from the call - for p in common_params: - _extras[p] = params.pop(p) - # Get the result - result = self._func.__call__(*args, **params).view(MaskedArray) - if "fill_value" in common_params: - result.fill_value = _extras.get("fill_value", None) - if "hardmask" in common_params: - result._hardmask = bool(_extras.get("hard_mask", False)) - return result + # __doc__ is None when using `python -OO ...` + if func.__doc__ is not None: + assert np_ret in func.__doc__, ( + f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " + f"The documentation string for return type, {np_ret}, is not " + f"found in the docstring for `np.{func.__name__}`. " + f"Fix the docstring for `np.{func.__name__}` or " + "update the expected string for return type." + ) + wrapper.__doc__ = inspect.cleandoc(func.__doc__).replace(np_ret, np_ma_ret) + + return wrapper arange = _convert2ma( diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 8e6309504445..1656db47fe0b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -18,10 +18,11 @@ from typing import ( SupportsIndex, SupportsInt, TypeAlias, + Unpack, final, overload, ) -from typing_extensions import ParamSpec, TypeIs, TypeVar, override +from typing_extensions import Buffer, ParamSpec, TypeIs, TypeVar, override import numpy as np from numpy import ( @@ -30,6 +31,7 @@ from numpy import ( _HasDTypeWithRealAndImag, _ModeKind, _OrderACF, + _OrderCF, _OrderKACF, _PartitionKind, _SortKind, @@ -67,6 +69,7 @@ from numpy import ( unsignedinteger, void, ) +from numpy._core.fromnumeric import _UFuncKwargs # type-check only from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, @@ -100,6 +103,8 @@ from numpy._typing import ( _ScalarLike_co, _Shape, _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, ) from numpy._typing._dtype_like import _VoidDTypeLike @@ -295,6 +300,7 @@ _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) +_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) _UFuncT_co = TypeVar( "_UFuncT_co", # the `| Callable` simplifies self-binding to the ufunc's callable signature @@ -327,6 +333,7 @@ _ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co _ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co _ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None _ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +_ArangeScalar: TypeAlias = floating | integer | datetime64 | timedelta64 _NoMaskType: TypeAlias = np.bool_[Literal[False]] # type of `np.False_` _MaskArray: TypeAlias = np.ndarray[_ShapeOrAnyT, np.dtype[np.bool_]] @@ -3130,24 +3137,508 @@ def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float def fromflex(fxarray): ... -class _convert2ma: - def __init__(self, /, funcname: str, np_ret: str, np_ma_ret: str, params: dict[str, Any] | None = None) -> None: ... - def __call__(self, /, *args: object, **params: object) -> Any: ... - def getdoc(self, /, np_ret: str, np_ma_ret: str) -> str | None: ... - -arange: _convert2ma = ... -clip: _convert2ma = ... -empty: _convert2ma = ... -empty_like: _convert2ma = ... -frombuffer: _convert2ma = ... -fromfunction: _convert2ma = ... -identity: _convert2ma = ... -indices: _convert2ma = ... -ones: _convert2ma = ... -ones_like: _convert2ma = ... -squeeze: _convert2ma = ... -zeros: _convert2ma = ... -zeros_like: _convert2ma = ... - def append(a, b, axis=None): ... def dot(a, b, strict=False, out=None): ... + +# internal wrapper functions for the functions below +def _convert2ma( + funcname: str, + np_ret: str, + np_ma_ret: str, + params: dict[str, Any] | None = None, +) -> Callable[..., Any]: ... + +# keep roughly in sync with `_core.multiarray.arange` +@overload # int, dtype=None (default) +def arange( + start: int, + stop: int = ..., + step: int = ..., + /, + dtype: None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[np.int_]]: ... +@overload # float, dtype=None (default) +def arange( + start: float, + stop: float = ..., + step: float = ..., + /, + dtype: None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[np.float64 | Any]]: ... +@overload # integer | floating | datetime64 | timedelta64, dtype=None (default) +def arange( + start: _ArangeScalarT, + stop: _ArangeScalarT = ..., + step: _ArangeScalarT = ..., + /, + dtype: None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[_ArangeScalarT]]: ... +@overload # dtype: known dtype-like (positional) +def arange( + start: _ArangeScalar, + stop: _ArangeScalar, + step: _ArangeScalar, + /, + dtype: _DTypeLike[_ScalarT], + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload # dtype: known dtype-like (keyword) +def arange( + start: _ArangeScalar, + stop: _ArangeScalar = ..., + step: _ArangeScalar = ..., + /, + *, + dtype: _DTypeLike[_ScalarT], + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload # dtype: unknown dtype +def arange( + start: _ArangeScalar, + stop: _ArangeScalar = ..., + step: _ArangeScalar = ..., + /, + dtype: DTypeLike | None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int]]: ... + +# based on `_core.fromnumeric.clip` +@overload +def clip( + a: _ScalarT, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _ScalarT: ... +@overload +def clip( + a: NDArray[_ScalarT], + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MaskedArray[_ScalarT]: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: _MArrayT, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + *, + out: _MArrayT, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> Incomplete: ... + +# keep in sync with `_core.multiarray.ones` +@overload +def empty( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[np.float64]]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], _DTypeT]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int]]: ... +@overload # known shape +def empty( + shape: _AnyShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT, np.dtype[np.float64]]: ... +@overload +def empty( + shape: _AnyShapeT, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT, _DTypeT]: ... +@overload +def empty( + shape: _AnyShapeT, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT, np.dtype[_ScalarT]]: ... +@overload +def empty( + shape: _AnyShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT]: ... +@overload # unknown shape +def empty( + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[np.float64]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShape, _DTypeT]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[_ScalarT]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: DTypeLike | None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray: ... + +# keep in sync with `_core.multiarray.empty_like` +@overload +def empty_like( + a: _MArrayT, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MArrayT: ... +@overload +def empty_like( + a: _ArrayLike[_ScalarT], + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def empty_like( + a: object, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def empty_like( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[Incomplete]: ... + +# This is a bit of a hack to avoid having to duplicate all those `empty` overloads for +# `ones` and `zeros`, that relies on the fact that empty/zeros/ones have identical +# type signatures, but may cause some type-checkers to report incorrect names in case +# of user errors. Mypy and Pyright seem to handle this just fine. +ones = empty +ones_like = empty_like +zeros = empty +zeros_like = empty_like + +# keep in sync with `_core.multiarray.frombuffer` +@overload +def frombuffer( + buffer: Buffer, + *, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[np.float64]: ... +@overload +def frombuffer( + buffer: Buffer, + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + *, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def frombuffer( + buffer: Buffer, + dtype: DTypeLike | None = float, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + *, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[Incomplete]: ... + +# keep roughly in sync with `_core.numeric.fromfunction` +def fromfunction( + function: Callable[..., np.ndarray[_ShapeT, _DTypeT]], + shape: Sequence[int], + *, + dtype: DTypeLike | None = float, + like: _SupportsArrayFunc | None = None, + **kwargs: object, +) -> MaskedArray[_ShapeT, _DTypeT]: ... + +# keep roughly in sync with `_core.numeric.identity` +@overload +def identity( + n: int, + dtype: None = None, + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[np.float64]]: ... +@overload +def identity( + n: int, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[_ScalarT]]: ... +@overload +def identity( + n: int, + dtype: DTypeLike | None = None, + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[Incomplete]]: ... + +# keep roughly in sync with `_core.numeric.indices` +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = int, + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[np.intp]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int], + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = int, + *, + sparse: Literal[True], + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_ScalarT], + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[_ScalarT]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_ScalarT], + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[_ScalarT], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None = int, + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[Incomplete]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None, + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[Incomplete], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None = int, + *, + sparse: Literal[True], + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[Incomplete], ...]: ... + +# keep roughly in sync with `_core.fromnumeric.squeeze` +@overload +def squeeze( + a: _ArrayLike[_ScalarT], + axis: _ShapeLike | None = None, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[_ScalarT]: ... +@overload +def squeeze( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[Incomplete]: ... diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index bf7b94d720ce..0e4d42da0a5b 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -5977,3 +5977,20 @@ def test_uint_fill_value_and_filled(): ) def test_frommethod_signature(fn, signature): assert str(inspect.signature(fn)) == signature + + +@pytest.mark.parametrize( + ('fn', 'signature'), + [ + (np.ma.empty, "(*args, fill_value=None, hardmask=False, **kwargs)"), + (np.ma.empty_like, "(*args, **kwargs)"), + (np.ma.squeeze, "(a, axis=None, *, fill_value=None, hardmask=False)"), + ( + np.ma.identity, + "(n, dtype=None, *, like=None, fill_value=None, hardmask=False)", + ), + ] +) +def test_convert2ma_signature(fn, signature): + assert str(inspect.signature(fn)) == signature + assert fn.__module__ == 'numpy.ma.core' diff --git a/numpy/typing/tests/data/pass/recfunctions.py b/numpy/typing/tests/data/pass/recfunctions.py index cca0c3988708..586f0502b366 100644 --- a/numpy/typing/tests/data/pass/recfunctions.py +++ b/numpy/typing/tests/data/pass/recfunctions.py @@ -150,12 +150,15 @@ def test_stack_arrays() -> None: def test_find_duplicates() -> None: ndtype = np.dtype([("a", int)]) - a = np.ma.ones(7, mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) - assert_type(rfn.find_duplicates(a), np.ma.MaskedArray[Any, np.dtype[np.void]]) + a = np.ma.ones(7).view(ndtype) + assert_type( + rfn.find_duplicates(a), + np.ma.MaskedArray[tuple[int], np.dtype[np.void]], + ) assert_type( rfn.find_duplicates(a, ignoremask=True, return_index=True), tuple[ - np.ma.MaskedArray[Any, np.dtype[np.void]], - np.ndarray[Any, np.dtype[np.int_]], + np.ma.MaskedArray[tuple[int], np.dtype[np.void]], + np.ndarray[tuple[int], np.dtype[np.int_]], ], ) From 779929c42dd5dfa32f07f1311233abea9f026310 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 31 Oct 2025 17:18:40 +0100 Subject: [PATCH 0779/1018] TYP: shape-type-aware ``swapaxes`` (#30108) --- numpy/__init__.pyi | 8 +--- numpy/_core/fromnumeric.pyi | 16 ++----- numpy/ma/core.pyi | 10 +--- numpy/typing/tests/data/reveal/ma.pyi | 2 +- .../reveal/ndarray_shape_manipulation.pyi | 48 +++++++++++-------- 5 files changed, 38 insertions(+), 46 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6f4163626c3c..fbd344ad3b43 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2229,16 +2229,12 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis: SupportsIndex | tuple[SupportsIndex, ...] | None = ..., ) -> ndarray[_AnyShape, _DTypeT_co]: ... - def swapaxes( - self, - axis1: SupportsIndex, - axis2: SupportsIndex, - ) -> ndarray[_AnyShape, _DTypeT_co]: ... + def swapaxes(self, axis1: SupportsIndex, axis2: SupportsIndex, /) -> Self: ... @overload def transpose(self, axes: _ShapeLike | None, /) -> Self: ... @overload - def transpose(self, *axes: SupportsIndex) -> Self: ... + def transpose(self, /, *axes: SupportsIndex) -> Self: ... @overload def all( diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index a2c4f47ecb2f..29431efef532 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -303,19 +303,13 @@ def put( mode: _ModeKind = "raise", ) -> None: ... -# keep in sync with `ma.core.swapaxes` +# keep in sync with `ndarray.swapaxes` and `ma.core.swapaxes` @overload -def swapaxes( - a: _ArrayLike[_ScalarT], - axis1: SupportsIndex, - axis2: SupportsIndex, -) -> NDArray[_ScalarT]: ... +def swapaxes(a: _ArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _ArrayT: ... @overload -def swapaxes( - a: ArrayLike, - axis1: SupportsIndex, - axis2: SupportsIndex, -) -> NDArray[Any]: ... +def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[_ScalarT]: ... +@overload +def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[Any]: ... @overload def transpose( diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 1656db47fe0b..84e9b0660daf 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2331,14 +2331,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): squeeze: Any - # keep in sync with `ndarray.swapaxes` - def swapaxes( - self, - axis1: SupportsIndex, - axis2: SupportsIndex, - /, - ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... - # def toflex(self) -> Incomplete: ... def torecords(self) -> Incomplete: ... @@ -2638,6 +2630,8 @@ def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _Mas # keep in sync with `_core.fromnumeric.swapaxes` @overload +def swapaxes(a: _MArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _MArrayT: ... +@overload def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[_ScalarT]: ... @overload def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[Incomplete]: ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index e47e690ff0e7..8eef32ddd593 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -374,7 +374,7 @@ assert_type(MAR_c16.imag, MaskedArray[np.float64]) assert_type(MAR_2d_f4.baseclass, type[NDArray[Any]]) assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) -assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) +assert_type(MAR_2d_f4.swapaxes(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4[AR_i8], MaskedArray[np.float32]) assert_type(MAR_2d_f4[[1, 2, 3]], MaskedArray[np.float32]) diff --git a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi index 4447bb13d2ad..0ce599a40310 100644 --- a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi @@ -1,39 +1,47 @@ -from typing import assert_type +from typing import TypeAlias, assert_type import numpy as np import numpy.typing as npt -nd: npt.NDArray[np.int64] +_ArrayND: TypeAlias = npt.NDArray[np.int64] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[np.int8]] +_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[np.bool]] + +_nd: _ArrayND +_2d: _Array2D +_3d: _Array3D # reshape -assert_type(nd.reshape(None), npt.NDArray[np.int64]) -assert_type(nd.reshape(4), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.reshape((4,)), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.reshape(2, 2), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape((2, 2)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape(None), npt.NDArray[np.int64]) +assert_type(_nd.reshape(4), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape((4,)), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape(2, 2), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape((2, 2)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape((2, 2), order="C"), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape(4, order="C"), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape((2, 2), order="C"), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape(4, order="C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # resize does not return a value # transpose -assert_type(nd.transpose(), npt.NDArray[np.int64]) -assert_type(nd.transpose(1, 0), npt.NDArray[np.int64]) -assert_type(nd.transpose((1, 0)), npt.NDArray[np.int64]) +assert_type(_nd.transpose(), npt.NDArray[np.int64]) +assert_type(_nd.transpose(1, 0), npt.NDArray[np.int64]) +assert_type(_nd.transpose((1, 0)), npt.NDArray[np.int64]) # swapaxes -assert_type(nd.swapaxes(0, 1), npt.NDArray[np.int64]) +assert_type(_nd.swapaxes(0, 1), _ArrayND) +assert_type(_2d.swapaxes(0, 1), _Array2D) +assert_type(_3d.swapaxes(0, 1), _Array3D) # flatten -assert_type(nd.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.flatten("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.flatten("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # ravel -assert_type(nd.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.ravel("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.ravel("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # squeeze -assert_type(nd.squeeze(), npt.NDArray[np.int64]) -assert_type(nd.squeeze(0), npt.NDArray[np.int64]) -assert_type(nd.squeeze((0, 2)), npt.NDArray[np.int64]) +assert_type(_nd.squeeze(), npt.NDArray[np.int64]) +assert_type(_nd.squeeze(0), npt.NDArray[np.int64]) +assert_type(_nd.squeeze((0, 2)), npt.NDArray[np.int64]) From d2c06ce177ae941c1b19c00767aaa1f99fad3255 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 04:27:22 +0100 Subject: [PATCH 0780/1018] BUG: ``ndarray`` method signatures --- numpy/_core/_add_newdocs.py | 209 ++++++++++++++++++++++++++- numpy/_core/tests/test_multiarray.py | 30 ++++ 2 files changed, 232 insertions(+), 7 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 98bdec4e3c74..d2cc95c7f869 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -2982,6 +2982,7 @@ [5, 7]]]) """)) + ############################################################################## # # ndarray methods @@ -2991,6 +2992,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array__', """ + __array__($self, dtype=None, /, *, copy=None) + -- + a.__array__([dtype], *, copy=None) For ``dtype`` parameter it returns a new reference to self if @@ -3010,6 +3014,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_finalize__', """ + __array_finalize__($self, obj, /) + -- + a.__array_finalize__(obj, /) Present so subclasses can call super. Does nothing. @@ -3019,7 +3026,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', """ - a.__array_wrap__(array[, context], /) + __array_wrap__($self, array, context=None, return_scalar=True, /) + -- + + a.__array_wrap__(array[, context[, return_scalar]], /) Returns a view of `array` with the same type as self. @@ -3028,6 +3038,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__', """ + __copy__($self, /) + -- + a.__copy__() Used if :func:`copy.copy` is called on an array. Returns a copy of the array. @@ -3039,6 +3052,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__class_getitem__', """ + __class_getitem__($cls, item, /) + -- + a.__class_getitem__(item, /) Return a parametrized wrapper around the `~numpy.ndarray` type. @@ -3069,6 +3085,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__deepcopy__', """ + __deepcopy__($self, memo, /) + -- + a.__deepcopy__(memo, /) Used if :func:`copy.deepcopy` is called on an array. @@ -3078,6 +3097,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce__', """ + __reduce__($self, /) + -- + a.__reduce__() For pickling. @@ -3087,6 +3109,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__setstate__', """ + __setstate__($self, state, /) + -- + a.__setstate__(state, /) For unpickling. @@ -3109,6 +3134,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('all', """ + all($self, /, axis=None, out=None, keepdims=False, *, where=True) + -- + a.all(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns True if all elements evaluate to True. @@ -3124,6 +3152,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('any', """ + any($self, /, axis=None, out=None, keepdims=False, *, where=True) + -- + a.any(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns True if any of the elements of `a` evaluate to True. @@ -3139,6 +3170,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('argmax', """ + argmax($self, /, axis=None, out=None, *, keepdims=False) + -- + a.argmax(axis=None, out=None, *, keepdims=False) Return indices of the maximum values along the given axis. @@ -3154,6 +3188,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('argmin', """ + argmin($self, /, axis=None, out=None, *, keepdims=False) + -- + a.argmin(axis=None, out=None, *, keepdims=False) Return indices of the minimum values along the given axis. @@ -3169,7 +3206,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('argsort', """ - a.argsort(axis=-1, kind=None, order=None) + argsort($self, /, axis=-1, kind=None, order=None, *, stable=None) + -- + + a.argsort(axis=-1, kind=None, order=None, *, stable=None) Returns the indices that would sort this array. @@ -3184,6 +3224,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', """ + argpartition($self, kth, /, axis=-1, kind='introselect', order=None) + -- + a.argpartition(kth, axis=-1, kind='introselect', order=None) Returns the indices that would partition this array. @@ -3199,6 +3242,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('astype', """ + astype($self, /, dtype, order='K', casting='unsafe', subok=True, copy=True) + -- + a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) Copy of the array, cast to a specified type. @@ -3278,6 +3324,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('byteswap', """ + byteswap($self, /, inplace=False) + -- + a.byteswap(inplace=False) Swap the bytes of the array elements @@ -3333,6 +3382,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('choose', """ + choose($self, /, choices, out=None, mode='raise') + -- + a.choose(choices, out=None, mode='raise') Use an index array to construct a new array from a set of choices. @@ -3348,6 +3400,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('clip', """ + clip($self, /, min=None, max=None, out=None, **kwargs) + -- + a.clip(min=np._NoValue, max=np._NoValue, out=None, **kwargs) Return an array whose values are limited to ``[min, max]``. @@ -3364,6 +3419,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('compress', """ + compress($self, /, condition, axis=None, out=None) + -- + a.compress(condition, axis=None, out=None) Return selected slices of this array along given axis. @@ -3379,6 +3437,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('conj', """ + conj($self, /) + -- + a.conj() Complex-conjugate all elements. @@ -3394,6 +3455,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('conjugate', """ + conjugate($self, /) + -- + a.conjugate() Return the complex conjugate, element-wise. @@ -3409,6 +3473,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('copy', """ + copy($self, /, order='C') + -- + a.copy(order='C') Return a copy of the array. @@ -3482,6 +3549,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('cumprod', """ + cumprod($self, /, axis=None, dtype=None, out=None) + -- + a.cumprod(axis=None, dtype=None, out=None) Return the cumulative product of the elements along the given axis. @@ -3497,6 +3567,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('cumsum', """ + cumsum($self, /, axis=None, dtype=None, out=None) + -- + a.cumsum(axis=None, dtype=None, out=None) Return the cumulative sum of the elements along the given axis. @@ -3512,6 +3585,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('diagonal', """ + diagonal($self, /, offset=0, axis1=0, axis2=1) + -- + a.diagonal(offset=0, axis1=0, axis2=1) Return specified diagonals. In NumPy 1.9 the returned array is a @@ -3527,11 +3603,23 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('dot')) +add_newdoc('numpy._core.multiarray', 'ndarray', ('dot', + """ + dot($self, other, /, out=None) + -- + + a.dot(other, /, out=None) + + Refer to :func:`numpy.dot` for full documentation. + + """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('dump', """ + dump($self, /, file) + -- + a.dump(file) Dump a pickle of the array to the specified file. @@ -3547,6 +3635,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('dumps', """ + dumps($self, /) + -- + a.dumps() Returns the pickle of the array as a string. @@ -3561,6 +3652,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('fill', """ + fill($self, /, value) + -- + a.fill(value) Fill the array with a scalar value. @@ -3605,6 +3699,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('flatten', """ + flatten($self, /, order='C') + -- + a.flatten(order='C') Return a copy of the array collapsed into one dimension. @@ -3644,6 +3741,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('getfield', """ + getfield($self, /, dtype, offset=0) + -- + a.getfield(dtype, offset=0) Returns a field of the given array as a certain type. @@ -3687,6 +3787,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('item', """ + item($self, /, *args) + -- + a.item(*args) Copy an element of an array to a standard Python scalar and return it. @@ -3753,6 +3856,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('max', """ + max($self, /, axis=None, out=None, **kwargs) + -- + a.max(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the maximum along a given axis. @@ -3768,6 +3874,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('mean', """ + mean($self, /, axis=None, dtype=None, out=None, **kwargs) + -- + a.mean(axis=None, dtype=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns the average of the array elements along given axis. @@ -3783,6 +3892,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('min', """ + min($self, /, axis=None, out=None, **kwargs) + -- + a.min(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the minimum along a given axis. @@ -3798,6 +3910,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('nonzero', """ + nonzero($self, /) + -- + a.nonzero() Return the indices of the elements that are non-zero. @@ -3813,6 +3928,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('prod', """ + prod($self, /, axis=None, dtype=None, out=None, **kwargs) + -- + a.prod(axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) @@ -3829,6 +3947,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('put', """ + put($self, indices, values, /, mode='raise') + -- + a.put(indices, values, mode='raise') Set ``a.flat[n] = values[n]`` for all `n` in indices. @@ -3844,6 +3965,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('ravel', """ + ravel($self, /, order='C') + -- + a.ravel([order]) Return a flattened array. @@ -3861,6 +3985,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('repeat', """ + repeat($self, repeats, /, axis=None) + -- + a.repeat(repeats, axis=None) Repeat elements of an array. @@ -3876,6 +4003,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('reshape', """ + reshape($self, /, *shape, order='C', copy=None) + -- + a.reshape(shape, /, *, order='C', copy=None) Returns an array containing the same data with a new shape. @@ -3898,6 +4028,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('resize', """ + resize($self, /, *new_shape, refcheck=True) + -- + a.resize(new_shape, refcheck=True) Change shape and size of array in-place. @@ -3992,6 +4125,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('round', """ + round($self, /, decimals=0, out=None) + -- + a.round(decimals=0, out=None) Return `a` with each element rounded to the given number of decimals. @@ -4007,6 +4143,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('searchsorted', """ + searchsorted($self, v, /, side='left', sorter=None) + -- + a.searchsorted(v, side='left', sorter=None) Find indices where elements of v should be inserted in a to maintain order. @@ -4022,6 +4161,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('setfield', """ + setfield($self, val, /, dtype, offset=0) + -- + a.setfield(val, dtype, offset=0) Put a value into a specified place in a field defined by a data-type. @@ -4074,6 +4216,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('setflags', """ + setflags($self, /, *, write=None, align=None, uic=None) + -- + a.setflags(write=None, align=None, uic=None) Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY, @@ -4151,7 +4296,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('sort', """ - a.sort(axis=-1, kind=None, order=None) + sort($self, /, axis=-1, kind=None, order=None, *, stable=None) + -- + + a.sort(axis=-1, kind=None, order=None, *, stable=None) Sort an array in-place. Refer to `numpy.sort` for full documentation. @@ -4171,6 +4319,13 @@ be specified as a string, and not all fields need be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. + stable : bool, optional + Sort stability. If ``True``, the returned array will maintain + the relative order of ``a`` values which compare as equal. + If ``False`` or ``None``, this is not guaranteed. Internally, + this option selects ``kind='stable'``. Default: ``None``. + + .. versionadded:: 2.0.0 See Also -------- @@ -4211,6 +4366,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('partition', """ + partition($self, kth, /, axis=-1, kind='introselect', order=None) + -- + a.partition(kth, axis=-1, kind='introselect', order=None) Partially sorts the elements in the array in such a way that the value of @@ -4270,6 +4428,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('squeeze', """ + squeeze($self, /, axis=None) + -- + a.squeeze(axis=None) Remove axes of length one from `a`. @@ -4285,6 +4446,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('std', """ + std($self, /, axis=None, dtype=None, out=None, ddof=0, **kwargs) + -- + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) Returns the standard deviation of the array elements along given axis. @@ -4300,6 +4464,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('sum', """ + sum($self, /, axis=None, dtype=None, out=None, **kwargs) + -- + a.sum(axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the sum of the array elements over the given axis. @@ -4315,7 +4482,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('swapaxes', """ - a.swapaxes(axis1, axis2) + swapaxes($self, axis1, axis2, /) + -- + + a.swapaxes(axis1, axis2, /) Return a view of the array with `axis1` and `axis2` interchanged. @@ -4330,6 +4500,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('take', """ + take($self, indices, /, axis=None, out=None, mode='raise') + -- + a.take(indices, axis=None, out=None, mode='raise') Return an array formed from the elements of `a` at the given indices. @@ -4345,7 +4518,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('tofile', """ - a.tofile(fid, sep="", format="%s") + tofile($self, fid, /, sep='', format='%s') + -- + + a.tofile(fid, sep='', format='%s') Write array to a file as text or binary (default). @@ -4385,6 +4561,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('tolist', """ + tolist($self, /) + -- + a.tolist() Return the array as an ``a.ndim``-levels deep nested list of Python scalars. @@ -4448,7 +4627,11 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('tobytes', """ +add_newdoc('numpy._core.multiarray', 'ndarray', ('tobytes', + """ + tobytes($self, /, order='C') + -- + a.tobytes(order='C') Construct Python bytes containing the raw data bytes in the array. @@ -4491,6 +4674,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('trace', """ + trace($self, /, offset=0, axis1=0, axis2=1, dtype=None, out=None) + -- + a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) Return the sum along diagonals of the array. @@ -4506,6 +4692,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('transpose', """ + transpose($self, /, *axes) + -- + a.transpose(*axes) Returns a view of the array with axes transposed. @@ -4563,6 +4752,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('var', """ + var($self, /, axis=None, dtype=None, out=None, ddof=0, **kwargs) + -- + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) Returns the variance of the array elements, along given axis. @@ -4578,6 +4770,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('view', """ + view($self, /, *args, **kwargs) + -- + a.view([dtype][, type]) New view of array with the same data. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index c62fb5b4e905..38e710179067 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4,6 +4,7 @@ import functools import gc import importlib +import inspect import io import itertools import mmap @@ -10872,3 +10873,32 @@ def test_array_dunder_array_preserves_dtype_on_none(dtype): a = np.array([1], dtype=dtype) b = a.__array__(None) assert_array_equal(a, b, strict=True) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "methodname", + [ + "__array__", "__array_finalize__", "__array_wrap__", + "__copy__", "__deepcopy__", "__reduce__", "__setstate__", + "all", "any", "argmax", "argmin", "argsort", "argpartition", "astype", + "byteswap", "choose", "clip", "compress", "conj", "conjugate", "copy", + "cumprod", "cumsum", "diagonal", "dot", "dump", "dumps", "fill", "flatten", + "getfield", "item", "max", "mean", "min", "nonzero", "prod", "put", "ravel", + "repeat", "reshape", "resize", "round", "searchsorted", "setfield", "setflags", + "sort", "partition", "squeeze", "std", "sum", "swapaxes", "take", "tofile", + "tolist", "tobytes", "trace", "transpose", "var", "view", + ], +) +def test_array_method_signatures(methodname: str): + method = getattr(np.ndarray, methodname) + assert callable(method) + + try: + sig = inspect.signature(method) + except ValueError as e: + pytest.fail(f"Could not get signature for np.ndarray.{methodname}: {e}") + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY From 3911b2fb5473ae02edf356ea68083ee502c91411 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 04:52:05 +0100 Subject: [PATCH 0781/1018] TYP: update the ``ndarray`` method stubs to match their new signatures --- numpy/__init__.pyi | 244 +++++++++--------- .../tests/data/pass/ndarray_conversion.py | 6 - 2 files changed, 129 insertions(+), 121 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbd344ad3b43..c44b34cef794 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1696,18 +1696,23 @@ class _ArrayOrScalarCommon: def dump(self, file: StrOrBytesPath | SupportsWrite[bytes]) -> None: ... def dumps(self) -> bytes: ... def tobytes(self, order: _OrderKACF = ...) -> bytes: ... - def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, sep: str = ..., format: str = ...) -> None: ... + def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, /, sep: str = "", format: str = "%s") -> None: ... # generics and 0d arrays return builtin scalars def tolist(self) -> Any: ... def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ... # NOTE: for `generic`, these two methods don't do anything - def fill(self, value: Incomplete, /) -> None: ... - def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + def fill(self, /, value: Incomplete) -> None: ... + def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, /, mode: _ModeKind = "raise") -> None: ... # NOTE: even on `generic` this seems to work def setflags( - self, /, write: builtins.bool | None = None, align: builtins.bool | None = None, uic: builtins.bool | None = None + self, + /, + *, + write: builtins.bool | None = None, + align: builtins.bool | None = None, + uic: builtins.bool | None = None, ) -> None: ... @property @@ -1813,9 +1818,10 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def max( @@ -1823,9 +1829,10 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def max( @@ -1834,9 +1841,9 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1845,9 +1852,10 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def min( @@ -1855,9 +1863,10 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def min( @@ -1866,9 +1875,9 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1878,9 +1887,10 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 0, - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def sum( @@ -1889,9 +1899,10 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 0, - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def sum( @@ -1901,9 +1912,9 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 0, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1913,9 +1924,10 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 1, - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def prod( @@ -1924,9 +1936,10 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 1, - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def prod( @@ -1936,9 +1949,9 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 1, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1947,9 +1960,9 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def mean( @@ -1958,9 +1971,9 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def mean( @@ -1970,8 +1983,8 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1981,11 +1994,11 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def std( @@ -1994,11 +2007,11 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _ArrayT: ... @overload def std( @@ -2008,10 +2021,10 @@ class _ArrayOrScalarCommon: *, out: _ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -2021,11 +2034,11 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def var( @@ -2034,11 +2047,11 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _ArrayT: ... @overload def var( @@ -2048,10 +2061,10 @@ class _ArrayOrScalarCommon: *, out: _ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _ArrayT: ... class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @@ -2218,14 +2231,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def tolist(self, /) -> Any: ... @overload - def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = ...) -> None: ... + def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = True) -> None: ... @overload - def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... - - def setflags(self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ...) -> None: ... # type: ignore[override] + def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = True) -> None: ... def squeeze( self, + /, axis: SupportsIndex | tuple[SupportsIndex, ...] | None = ..., ) -> ndarray[_AnyShape, _DTypeT_co]: ... @@ -2314,8 +2326,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def partition( self, - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: None = None, @@ -2323,8 +2335,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def partition( self: NDArray[void], - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, @@ -2334,8 +2346,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def argpartition( self, - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex | None = -1, kind: _PartitionKind = "introselect", order: None = None, @@ -2343,8 +2355,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def argpartition( self: NDArray[void], - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex | None = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, @@ -2353,19 +2365,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # def diagonal( self, - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, ) -> ndarray[_AnyShape, _DTypeT_co]: ... # 1D + 1D returns a scalar; # all other with at least 1 non-0D array return an ndarray. @overload - def dot(self, b: _ScalarLike_co, out: None = None) -> NDArray[Any]: ... + def dot(self, b: _ScalarLike_co, /, out: None = None) -> NDArray[Any]: ... @overload - def dot(self, b: ArrayLike, out: None = None) -> Any: ... + def dot(self, b: ArrayLike, /, out: None = None) -> Any: ... @overload - def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... + def dot(self, b: ArrayLike, /, out: _ArrayT) -> _ArrayT: ... # `nonzero()` raises for 0d arrays/generics def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... @@ -2374,49 +2386,55 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def searchsorted( self, # >= 1D array v: _ScalarLike_co, # 0D array-like - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, ) -> intp: ... @overload def searchsorted( self, # >= 1D array v: ArrayLike, - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, ) -> NDArray[intp]: ... def sort( self, - axis: SupportsIndex = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + /, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: builtins.bool | None = ..., + stable: builtins.bool | None = None, ) -> None: ... # Keep in sync with `MaskedArray.trace` @overload def trace( self, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike | None = ..., + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, out: None = None, ) -> Any: ... @overload def trace( self, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike | None = ..., + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @overload def trace( self, # >= 2D array + /, offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, @@ -2428,6 +2446,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def take( self: NDArray[_ScalarT], indices: _IntLike_co, + /, axis: SupportsIndex | None = ..., out: None = None, mode: _ModeKind = ..., @@ -2436,6 +2455,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def take( self, indices: _ArrayLikeInt_co, + /, axis: SupportsIndex | None = ..., out: None = None, mode: _ModeKind = ..., @@ -2444,6 +2464,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def take( self, indices: _ArrayLikeInt_co, + /, axis: SupportsIndex | None = ..., *, out: _ArrayT, @@ -2453,23 +2474,16 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def take( self, indices: _ArrayLikeInt_co, + /, axis: SupportsIndex | None, out: _ArrayT, mode: _ModeKind = ..., ) -> _ArrayT: ... @overload - def repeat( - self, - repeats: _ArrayLikeInt_co, - axis: None = None, - ) -> ndarray[tuple[int], _DTypeT_co]: ... + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: None = None) -> ndarray[tuple[int], _DTypeT_co]: ... @overload - def repeat( - self, - repeats: _ArrayLikeInt_co, - axis: SupportsIndex, - ) -> ndarray[_AnyShape, _DTypeT_co]: ... + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex) -> ndarray[_AnyShape, _DTypeT_co]: ... def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... @@ -2592,11 +2606,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # (dtype: ?, type: T) def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... - def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... + def setfield(self, val: ArrayLike, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... @overload - def getfield(self, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... + def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... @overload - def getfield(self, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... def __index__(self: NDArray[integer], /) -> int: ... def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... @@ -3659,7 +3673,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: Never = ..., ) -> Never: ... def diagonal(self: Never, /, offset: Never = ..., axis1: Never = ..., axis2: Never = ...) -> Never: ... # type: ignore[misc] - def swapaxes(self: Never, /, axis1: Never, axis2: Never) -> Never: ... # type: ignore[misc] + def swapaxes(self: Never, axis1: Never, axis2: Never, /) -> Never: ... # type: ignore[misc] def sort(self: Never, /, axis: Never = ..., kind: Never = ..., order: Never = ...) -> Never: ... # type: ignore[misc] def nonzero(self: Never, /) -> Never: ... # type: ignore[misc] def setfield(self: Never, /, val: Never, dtype: Never, offset: Never = ...) -> None: ... # type: ignore[misc] @@ -3755,7 +3769,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): mode: _ModeKind = ..., ) -> _ArrayT: ... - def repeat(self, repeats: _ArrayLikeInt_co, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... diff --git a/numpy/typing/tests/data/pass/ndarray_conversion.py b/numpy/typing/tests/data/pass/ndarray_conversion.py index 76da1dadd327..a4e0bcf34bdd 100644 --- a/numpy/typing/tests/data/pass/ndarray_conversion.py +++ b/numpy/typing/tests/data/pass/ndarray_conversion.py @@ -74,14 +74,8 @@ # setflags nd.setflags() - -nd.setflags(True) nd.setflags(write=True) - -nd.setflags(True, True) nd.setflags(write=True, align=True) - -nd.setflags(True, True, False) nd.setflags(write=True, align=True, uic=False) # fill is pretty simple From d4469b5e9db3f17a1ffbda1fba8c7947246219df Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 20:26:01 +0100 Subject: [PATCH 0782/1018] BUG: scalar constructor signatures --- numpy/__init__.pyi | 8 +- numpy/_core/_add_newdocs_scalars.py | 455 +++++++++++------------ numpy/_core/tests/test_scalar_methods.py | 54 ++- 3 files changed, 276 insertions(+), 241 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbd344ad3b43..c83c9fe8ed6d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5648,18 +5648,18 @@ class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): # t class bytes_(character[bytes], bytes): # type: ignore[misc] @overload - def __new__(cls, o: object = ..., /) -> Self: ... + def __new__(cls, value: object = b"", /) -> Self: ... @overload - def __new__(cls, s: str, /, encoding: str, errors: str = ...) -> Self: ... + def __new__(cls, value: str, /, encoding: str, errors: str = "strict") -> Self: ... # def __bytes__(self, /) -> bytes: ... class str_(character[str], str): # type: ignore[misc] @overload - def __new__(cls, value: object = ..., /) -> Self: ... + def __new__(cls, value: object = "", /) -> Self: ... @overload - def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ... + def __new__(cls, value: bytes, /, encoding: str, errors: str = "strict") -> Self: ... # See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs @final diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index 9d4cb48825e0..8c7911e182ce 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -48,7 +48,7 @@ def type_aliases_gen(): ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), - ]) +]) def _get_platform_and_machine(): @@ -67,258 +67,232 @@ def _get_platform_and_machine(): _system, _machine = _get_platform_and_machine() _doc_alias_string = f":Alias on this platform ({_system} {_machine}):" +# docstring prefix that cpython uses to populate `__text_signature__` +_ARGUMENT_CLINIC_TEMPLATE = """{name}{signature} +-- -def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): +{docstring}""" + +def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None: # note: `:field: value` is rST syntax which renders as field lists. - o = getattr(_numerictypes, obj) - - character_code = dtype(o).char - canonical_name_doc = "" if obj == o.__name__ else \ - f":Canonical name: `numpy.{obj}`\n " - if fixed_aliases: - alias_doc = ''.join(f":Alias: `numpy.{alias}`\n " - for alias in fixed_aliases) - else: - alias_doc = '' - alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n " - for (alias_type, alias, doc) in possible_aliases if alias_type is o) - - docstring = f""" - {doc.strip()} - - :Character code: ``'{character_code}'`` - {canonical_name_doc}{alias_doc} - """ + cls = getattr(_numerictypes, name) + module = cls.__module__ - add_newdoc('numpy._core.numerictypes', obj, docstring) + lines_extra = [ + "", # blank line after main doc + f":Character code: ``{dtype(cls).char!r}``", + ] + if name != cls.__name__: + lines_extra.append(f":Canonical name: `{module}.{name}`") -_bool_docstring = ( - """ - Boolean type (True or False), stored as a byte. + lines_extra.extend( + f"{_doc_alias_string} `{module}.{alias}`: {doc}." + for alias_type, alias, doc in possible_aliases + if alias_type is cls + ) - .. warning:: + docstring = _ARGUMENT_CLINIC_TEMPLATE.format( + name=cls.__name__, # must match the class name + signature=text_signature, + docstring="\n".join([doc.strip(), *lines_extra]), + ) + add_newdoc('numpy._core.numerictypes', name, docstring) - The :class:`bool` type is not a subclass of the :class:`int_` type - (the :class:`bool` is not even a number type). This is different - than Python's default implementation of :class:`bool` as a - sub-class of :class:`int`. - """ -) -add_newdoc_for_scalar_type('bool', [], _bool_docstring) +for bool_name in ('bool', 'bool_'): + add_newdoc_for_scalar_type(bool_name, '(value=False, /)', """ +Boolean type (True or False), stored as a byte. -add_newdoc_for_scalar_type('bool_', [], _bool_docstring) +.. warning:: -add_newdoc_for_scalar_type('byte', [], - """ - Signed integer type, compatible with C ``char``. - """) + The :class:`bool` type is not a subclass of the :class:`int_` type + (the :class:`bool` is not even a number type). This is different + than Python's default implementation of :class:`bool` as a + sub-class of :class:`int`. +""") -add_newdoc_for_scalar_type('short', [], - """ - Signed integer type, compatible with C ``short``. - """) +add_newdoc_for_scalar_type('byte', '(value=0, /)', """ +Signed integer type, compatible with C ``char``. +""") -add_newdoc_for_scalar_type('intc', [], - """ - Signed integer type, compatible with C ``int``. - """) +add_newdoc_for_scalar_type('short', '(value=0, /)', """ +Signed integer type, compatible with C ``short``. +""") + +add_newdoc_for_scalar_type('intc', '(value=0, /)', """ +Signed integer type, compatible with C ``int``. +""") # TODO: These docs probably need an if to highlight the default rather than # the C-types (and be correct). -add_newdoc_for_scalar_type('int_', [], - """ - Default signed integer type, 64bit on 64bit systems and 32bit on 32bit - systems. - """) - -add_newdoc_for_scalar_type('longlong', [], - """ - Signed integer type, compatible with C ``long long``. - """) - -add_newdoc_for_scalar_type('ubyte', [], - """ - Unsigned integer type, compatible with C ``unsigned char``. - """) - -add_newdoc_for_scalar_type('ushort', [], - """ - Unsigned integer type, compatible with C ``unsigned short``. - """) - -add_newdoc_for_scalar_type('uintc', [], - """ - Unsigned integer type, compatible with C ``unsigned int``. - """) - -add_newdoc_for_scalar_type('uint', [], - """ - Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit - systems. - """) - -add_newdoc_for_scalar_type('ulonglong', [], - """ - Signed integer type, compatible with C ``unsigned long long``. - """) - -add_newdoc_for_scalar_type('half', [], - """ - Half-precision floating-point number type. - """) - -add_newdoc_for_scalar_type('single', [], - """ - Single-precision floating-point number type, compatible with C ``float``. - """) - -add_newdoc_for_scalar_type('double', [], - """ - Double-precision floating-point number type, compatible with Python - :class:`float` and C ``double``. - """) - -add_newdoc_for_scalar_type('longdouble', [], - """ - Extended-precision floating-point number type, compatible with C - ``long double`` but not necessarily with IEEE 754 quadruple-precision. - """) - -add_newdoc_for_scalar_type('csingle', [], - """ - Complex number type composed of two single-precision floating-point - numbers. - """) - -add_newdoc_for_scalar_type('cdouble', [], - """ - Complex number type composed of two double-precision floating-point - numbers, compatible with Python :class:`complex`. - """) - -add_newdoc_for_scalar_type('clongdouble', [], - """ - Complex number type composed of two extended-precision floating-point - numbers. - """) - -add_newdoc_for_scalar_type('object_', [], - """ - Any Python object. - """) - -add_newdoc_for_scalar_type('str_', [], - r""" - A unicode string. - - This type strips trailing null codepoints. - - >>> s = np.str_("abc\x00") - >>> s - 'abc' - - Unlike the builtin :class:`str`, this supports the - :ref:`python:bufferobjects`, exposing its contents as UCS4: - - >>> m = memoryview(np.str_("abc")) - >>> m.format - '3w' - >>> m.tobytes() - b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' - """) - -add_newdoc_for_scalar_type('bytes_', [], - r""" - A byte string. - - When used in arrays, this type strips trailing null bytes. - """) - -add_newdoc_for_scalar_type('void', [], - r""" - np.void(length_or_data, /, dtype=None) - - Create a new structured or unstructured void scalar. - - Parameters - ---------- - length_or_data : int, array-like, bytes-like, object - One of multiple meanings (see notes). The length or - bytes data of an unstructured void. Or alternatively, - the data to be stored in the new scalar when `dtype` - is provided. - This can be an array-like, in which case an array may - be returned. - dtype : dtype, optional - If provided the dtype of the new scalar. This dtype must - be "void" dtype (i.e. a structured or unstructured void, - see also :ref:`defining-structured-types`). - - .. versionadded:: 1.24 - - Notes - ----- - For historical reasons and because void scalars can represent both - arbitrary byte data and structured dtypes, the void constructor - has three calling conventions: - - 1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five - ``\0`` bytes. The 5 can be a Python or NumPy integer. - 2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. - The dtype itemsize will match the byte string length, here ``"V10"``. - 3. When a ``dtype=`` is passed the call is roughly the same as an - array creation. However, a void scalar rather than array is returned. - - Please see the examples which show all three different conventions. - - Examples - -------- - >>> np.void(5) - np.void(b'\x00\x00\x00\x00\x00') - >>> np.void(b'abcd') - np.void(b'\x61\x62\x63\x64') - >>> np.void((3.2, b'eggs'), dtype="d,S5") - np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) - np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')]) - - """) - -add_newdoc_for_scalar_type('datetime64', [], - """ - If created from a 64-bit integer, it represents an offset from - ``1970-01-01T00:00:00``. - If created from string, the string can be in ISO 8601 date - or datetime format. - - When parsing a string to create a datetime object, if the string contains - a trailing timezone (A 'Z' or a timezone offset), the timezone will be - dropped and a User Warning is given. - - Datetime64 objects should be considered to be UTC and therefore have an - offset of +0000. - - >>> np.datetime64(10, 'Y') - np.datetime64('1980') - >>> np.datetime64('1980', 'Y') - np.datetime64('1980') - >>> np.datetime64(10, 'D') - np.datetime64('1970-01-11') - - See :ref:`arrays.datetime` for more information. - """) - -add_newdoc_for_scalar_type('timedelta64', [], - """ - A timedelta stored as a 64-bit integer. - - See :ref:`arrays.datetime` for more information. - """) +add_newdoc_for_scalar_type('int_', '(value=0, /)', """ +Default signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. +""") + +add_newdoc_for_scalar_type('longlong', '(value=0, /)', """ +Signed integer type, compatible with C ``long long``. +""") + +add_newdoc_for_scalar_type('ubyte', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned char``. +""") + +add_newdoc_for_scalar_type('ushort', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned short``. +""") + +add_newdoc_for_scalar_type('uintc', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned int``. +""") + +add_newdoc_for_scalar_type('uint', '(value=0, /)', """ +Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. +""") + +add_newdoc_for_scalar_type('ulonglong', '(value=0, /)', """ +Signed integer type, compatible with C ``unsigned long long``. +""") + +add_newdoc_for_scalar_type('half', '(value=0, /)', """ +Half-precision floating-point number type. +""") + +add_newdoc_for_scalar_type('single', '(value=0, /)', """ +Single-precision floating-point number type, compatible with C ``float``. +""") + +add_newdoc_for_scalar_type('double', '(value=0, /)', """ +Double-precision floating-point number type, compatible with Python :class:`float` and C ``double``. +""") + +add_newdoc_for_scalar_type('longdouble', '(value=0, /)', """ +Extended-precision floating-point number type, compatible with C ``long double`` +but not necessarily with IEEE 754 quadruple-precision. +""") + +add_newdoc_for_scalar_type('csingle', '(real=0, imag=0, /)', """ +Complex number type composed of two single-precision floating-point numbers. +""") + +add_newdoc_for_scalar_type('cdouble', '(real=0, imag=0, /)', """ +Complex number type composed of two double-precision floating-point numbers, +compatible with Python :class:`complex`. +""") + +add_newdoc_for_scalar_type('clongdouble', '(real=0, imag=0, /)', """ +Complex number type composed of two extended-precision floating-point numbers. +""") + +add_newdoc_for_scalar_type('object_', '(value=None, /)', """ +Any Python object. +""") + +add_newdoc_for_scalar_type('str_', '(value="", /, *args, **kwargs)', r""" +A unicode string. + +This type strips trailing null codepoints. + +>>> s = np.str_("abc\x00") +>>> s +'abc' + +Unlike the builtin :class:`str`, this supports the +:ref:`python:bufferobjects`, exposing its contents as UCS4: + +>>> m = memoryview(np.str_("abc")) +>>> m.format +'3w' +>>> m.tobytes() +b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' +""") + +add_newdoc_for_scalar_type('bytes_', '(value="", /, *args, **kwargs)', r""" +A byte string. + +When used in arrays, this type strips trailing null bytes. +""") + +add_newdoc_for_scalar_type('void', '(length_or_data, /, dtype=None)', r""" +np.void(length_or_data, /, dtype=None) + +Create a new structured or unstructured void scalar. + +Parameters +---------- +length_or_data : int, array-like, bytes-like, object + One of multiple meanings (see notes). The length or + bytes data of an unstructured void. Or alternatively, + the data to be stored in the new scalar when `dtype` + is provided. + This can be an array-like, in which case an array may + be returned. +dtype : dtype, optional + If provided the dtype of the new scalar. This dtype must + be "void" dtype (i.e. a structured or unstructured void, + see also :ref:`defining-structured-types`). + + .. versionadded:: 1.24 + +Notes +----- +For historical reasons and because void scalars can represent both +arbitrary byte data and structured dtypes, the void constructor +has three calling conventions: + +1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five + ``\0`` bytes. The 5 can be a Python or NumPy integer. +2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. + The dtype itemsize will match the byte string length, here ``"V10"``. +3. When a ``dtype=`` is passed the call is roughly the same as an + array creation. However, a void scalar rather than array is returned. + +Please see the examples which show all three different conventions. + +Examples +-------- +>>> np.void(5) +np.void(b'\x00\x00\x00\x00\x00') +>>> np.void(b'abcd') +np.void(b'\x61\x62\x63\x64') +>>> np.void((3.2, b'eggs'), dtype="d,S5") +np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) +np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')]) +""") + +add_newdoc_for_scalar_type('datetime64', '(value=None, /, *args)', """ +If created from a 64-bit integer, it represents an offset from ``1970-01-01T00:00:00``. +If created from string, the string can be in ISO 8601 date or datetime format. + +When parsing a string to create a datetime object, if the string contains +a trailing timezone (A 'Z' or a timezone offset), the timezone will be +dropped and a User Warning is given. + +Datetime64 objects should be considered to be UTC and therefore have an +offset of +0000. + +>>> np.datetime64(10, 'Y') +np.datetime64('1980') +>>> np.datetime64('1980', 'Y') +np.datetime64('1980') +>>> np.datetime64(10, 'D') +np.datetime64('1970-01-11') + +See :ref:`arrays.datetime` for more information. +""") + +add_newdoc_for_scalar_type('timedelta64', '(value=0, /, *args)', """ +A timedelta stored as a 64-bit integer. + +See :ref:`arrays.datetime` for more information. +""") add_newdoc('numpy._core.numerictypes', "integer", ('is_integer', """ + is_integer($self, /) + -- + integer.is_integer() -> bool Return ``True`` if the number is finite with integral value. @@ -338,6 +312,9 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): for float_name in ('half', 'single', 'double', 'longdouble'): add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio', f""" + as_integer_ratio($self, /) + -- + {float_name}.as_integer_ratio() -> (int, int) Return a pair of integers, whose ratio is exactly equal to the original @@ -354,6 +331,9 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): add_newdoc('numpy._core.numerictypes', float_name, ('is_integer', f""" + is_integer($self, /) + -- + {float_name}.is_integer() -> bool Return ``True`` if the floating point number is finite with integral @@ -370,10 +350,13 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): """)) for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', - 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'): + 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'): # Add negative examples for signed cases by checking typecode add_newdoc('numpy._core.numerictypes', int_name, ('bit_count', f""" + bit_count($self, /) + -- + {int_name}.bit_count() -> int Computes the number of 1-bits in the absolute value of the input. diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index 6fd4846006d0..4ce921d67541 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -2,7 +2,9 @@ Test the scalar constructors, which also do type-coercion """ import fractions +import inspect import platform +import sys import types from typing import Any, Literal @@ -10,7 +12,7 @@ import numpy as np from numpy._core import sctypes -from numpy.testing import assert_equal, assert_raises +from numpy.testing import IS_PYPY, assert_equal, assert_raises class TestAsIntegerRatio: @@ -252,3 +254,53 @@ def test_array_wrap(scalar): arr1d = np.array([3], dtype=np.int8) assert scalar.__array_wrap__(arr1d) is arr1d assert scalar.__array_wrap__(arr1d, None, True) is arr1d + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "sctype", + [ + *sctypes["int" ], + *sctypes["uint"], + *sctypes["float"], + *sctypes["complex"], + *sctypes["others"], + np.datetime64, + np.timedelta64, + ], +) +def test_constructor_signatures(sctype: type[np.generic]) -> None: + try: + sig = inspect.signature(sctype) + except ValueError: + pytest.fail(f"missing signature: {sctype}") + + assert sig.parameters + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "sctype", + [np.integer, *sctypes["int" ], *sctypes["uint"], *sctypes["float"]], +) +def test_method_signatures_is_integer(sctype: type[np.integer | np.floating]) -> None: + try: + sig = inspect.signature(sctype.is_integer) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.is_integer") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize("sctype", sctypes["float"]) +def test_method_signatures_as_integer_ratio(sctype: type[np.floating]) -> None: + try: + sig = inspect.signature(sctype.as_integer_ratio) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.as_integer_ratio") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY From 40ead852e432aa9432acfbefce0e3b0c44ec3f10 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 20:41:52 +0100 Subject: [PATCH 0783/1018] TYP: update `_core._add_newdocs.scalars` stubs --- numpy/_core/_add_newdocs_scalars.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/_add_newdocs_scalars.pyi b/numpy/_core/_add_newdocs_scalars.pyi index 4a06c9b07d74..241f4a00bd45 100644 --- a/numpy/_core/_add_newdocs_scalars.pyi +++ b/numpy/_core/_add_newdocs_scalars.pyi @@ -1,4 +1,3 @@ -from collections.abc import Iterable from typing import Final import numpy as np @@ -8,9 +7,10 @@ _system: Final[str] = ... _machine: Final[str] = ... _doc_alias_string: Final[str] = ... _bool_docstring: Final[str] = ... +bool_name: str = ... int_name: str = ... float_name: str = ... def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ... -def add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ... +def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None: ... def _get_platform_and_machine() -> tuple[str, str]: ... From 1a1b67e058e960cf8ea33d3abb65fe53f88860db Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 20:42:18 +0100 Subject: [PATCH 0784/1018] TYP: update sctype constructor stubs --- numpy/__init__.pyi | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index c83c9fe8ed6d..7cd682e79f3a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4264,13 +4264,13 @@ bool_ = bool @final class object_(_RealMixin, generic): @overload - def __new__(cls, nothing_to_see_here: None = None, /) -> None: ... # type: ignore[misc] + def __new__(cls, value: None = None, /) -> None: ... # type: ignore[misc] @overload - def __new__(cls, stringy: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] + def __new__(cls, value: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] @overload - def __new__(cls, array: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] + def __new__(cls, value: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] @overload - def __new__(cls, sequence: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] + def __new__(cls, value: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] @overload def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] @overload # catch-all @@ -5628,9 +5628,9 @@ class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] @overload - def __new__(cls, value: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... + def __new__(cls, length_or_data: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... @overload - def __new__(cls, value: Any, /, dtype: _DTypeLikeVoid) -> Self: ... + def __new__(cls, length_or_data: object, /, dtype: _DTypeLikeVoid) -> Self: ... @overload def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... From e9b9047e4a8d765e84b962783011121b8c996b0d Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 20:43:01 +0100 Subject: [PATCH 0785/1018] STY: appease `ruff` --- numpy/_core/tests/test_scalar_methods.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index 4ce921d67541..7f1d7d6d7bdb 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -261,7 +261,7 @@ def test_array_wrap(scalar): @pytest.mark.parametrize( "sctype", [ - *sctypes["int" ], + *sctypes["int"], *sctypes["uint"], *sctypes["float"], *sctypes["complex"], @@ -282,7 +282,7 @@ def test_constructor_signatures(sctype: type[np.generic]) -> None: @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize( "sctype", - [np.integer, *sctypes["int" ], *sctypes["uint"], *sctypes["float"]], + [np.integer, *sctypes["int"], *sctypes["uint"], *sctypes["float"]], ) def test_method_signatures_is_integer(sctype: type[np.integer | np.floating]) -> None: try: From 6ba89bd87b77b4ddae4b93cd8c46036ce9857d36 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 21:39:10 +0100 Subject: [PATCH 0786/1018] BUG: `long` and `ulong` constructor signatures --- numpy/_core/_add_newdocs_scalars.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index 8c7911e182ce..7305f232510c 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -124,6 +124,10 @@ def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None Signed integer type, compatible with C ``int``. """) +add_newdoc_for_scalar_type('long', '(value=0, /)', """ +Signed integer type, compatible with C ``long``. +""") + # TODO: These docs probably need an if to highlight the default rather than # the C-types (and be correct). add_newdoc_for_scalar_type('int_', '(value=0, /)', """ @@ -150,8 +154,12 @@ def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. """) +add_newdoc_for_scalar_type('ulong', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned long``. +""") + add_newdoc_for_scalar_type('ulonglong', '(value=0, /)', """ -Signed integer type, compatible with C ``unsigned long long``. +Unsigned integer type, compatible with C ``unsigned long long``. """) add_newdoc_for_scalar_type('half', '(value=0, /)', """ From 2b2dac41564b31d826f55e9b00b718c8a1af583b Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 22:10:58 +0100 Subject: [PATCH 0787/1018] BUG: ``flatiter`` method signatures --- numpy/_core/_add_newdocs.py | 18 ++++++++++++------ numpy/_core/tests/test_indexing.py | 18 ++++++++++++++++++ 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 98bdec4e3c74..12553ea0215e 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -80,7 +80,6 @@ """)) - add_newdoc('numpy._core', 'flatiter', ('coords', """ An N-dimensional tuple of current coordinates. @@ -99,7 +98,6 @@ """)) - add_newdoc('numpy._core', 'flatiter', ('index', """ Current flat index into the array. @@ -118,17 +116,25 @@ """)) -# flatiter functions +# flatiter methods add_newdoc('numpy._core', 'flatiter', ('__array__', - """__array__(type=None) Get array from iterator + """ + __array__($self, dtype=None, /, *, copy=None) + -- - """)) + flat.__array__([dtype], *, copy=None) + Get array from iterator + + """)) add_newdoc('numpy._core', 'flatiter', ('copy', """ - copy() + copy($self, /) + -- + + flat.copy() Get a copy of the iterator as a 1-D array. diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index b4bb53fa71e1..65d42d6c9370 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -1,4 +1,5 @@ import functools +import inspect import operator import sys import warnings @@ -11,6 +12,7 @@ from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning from numpy.testing import ( HAS_REFCOUNT, + IS_PYPY, assert_, assert_array_equal, assert_equal, @@ -1672,3 +1674,19 @@ def test_nonempty_string_flat_index_on_flatiter(self): match=r"only integers, slices \(`:`\), ellipsis \(`\.\.\.`\) " r"and integer or boolean arrays are valid indices"): a.flat[b.flat] + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize("methodname", ["__array__", "copy"]) +def test_flatiter_method_signatures(methodname: str): + method = getattr(np.flatiter, methodname) + assert callable(method) + + try: + sig = inspect.signature(method) + except ValueError as e: + pytest.fail(f"Could not get signature for np.flatiter.{methodname}: {e}") + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY From 247cf63f07a3a0a5d057850e94c6c427baf54f11 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 1 Nov 2025 03:12:47 +0100 Subject: [PATCH 0788/1018] TYP: fix and improve the ``flatiter`` stubs --- numpy/__init__.pyi | 84 +++++++------ numpy/typing/tests/data/fail/flatiter.pyi | 40 ++++-- numpy/typing/tests/data/pass/flatiter.py | 10 +- numpy/typing/tests/data/reveal/flatiter.pyi | 129 +++++++++++++------- 4 files changed, 171 insertions(+), 92 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbd344ad3b43..d945a3dc197c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -782,7 +782,6 @@ _NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | _ShapeT = TypeVar("_ShapeT", bound=_Shape) _Shape1T = TypeVar("_Shape1T", bound=tuple[int, *tuple[int, ...]]) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_1DShapeT = TypeVar("_1DShapeT", bound=_1D) _2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) _1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | (1, 1) | (1, 1, 1) | ... @@ -1621,44 +1620,61 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @final class flatiter(Generic[_ArrayT_co]): - __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + @property - def base(self) -> _ArrayT_co: ... + def base(self, /) -> _ArrayT_co: ... @property - def coords(self) -> _Shape: ... + def coords(self: flatiter[ndarray[_ShapeT]], /) -> _ShapeT: ... @property - def index(self) -> int: ... - def copy(self) -> _ArrayT_co: ... - def __iter__(self) -> Self: ... - def __next__(self: flatiter[NDArray[_ScalarT]]) -> _ScalarT: ... - def __len__(self) -> int: ... - @overload + def index(self, /) -> int: ... + + # iteration + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Self: ... + def __next__(self: flatiter[NDArray[_ScalarT]], /) -> _ScalarT: ... + + # indexing + @overload # nd: _[()] + def __getitem__(self, key: tuple[()], /) -> _ArrayT_co: ... + @overload # 0d; _[] + def __getitem__(self: flatiter[NDArray[_ScalarT]], key: int | integer, /) -> _ScalarT: ... + @overload # 1d; _[[*]], _[:], _[...] def __getitem__( - self: flatiter[NDArray[_ScalarT]], - key: int | integer | tuple[int | integer], - ) -> _ScalarT: ... - @overload + self: flatiter[ndarray[Any, _DTypeT]], + key: list[int] | slice | EllipsisType | flatiter[NDArray[integer]], + /, + ) -> ndarray[tuple[int], _DTypeT]: ... + @overload # 2d; _[[*[*]]] def __getitem__( - self, - key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], - ) -> _ArrayT_co: ... - # TODO: `__setitem__` operates via `unsafe` casting rules, and can - # thus accept any type accepted by the relevant underlying `np.generic` - # constructor. - # This means that `value` must in reality be a supertype of `npt.ArrayLike`. - def __setitem__( - self, - key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], - value: Any, - ) -> None: ... - @overload - def __array__(self: flatiter[ndarray[_1DShapeT, _DTypeT]], dtype: None = None, /) -> ndarray[_1DShapeT, _DTypeT]: ... - @overload - def __array__(self: flatiter[ndarray[_1DShapeT, Any]], dtype: _DTypeT, /) -> ndarray[_1DShapeT, _DTypeT]: ... - @overload - def __array__(self: flatiter[ndarray[Any, _DTypeT]], dtype: None = None, /) -> ndarray[_AnyShape, _DTypeT]: ... - @overload - def __array__(self, dtype: _DTypeT, /) -> ndarray[_AnyShape, _DTypeT]: ... + self: flatiter[ndarray[Any, _DTypeT]], + key: list[list[int]], + /, + ) -> ndarray[tuple[int, int], _DTypeT]: ... + @overload # ?d + def __getitem__( + self: flatiter[ndarray[Any, _DTypeT]], + key: NDArray[integer] | _NestedSequence[int], + /, + ) -> ndarray[_AnyShape, _DTypeT]: ... + + # NOTE: `__setitem__` operates via `unsafe` casting rules, and can thus accept any + # type accepted by the relevant underlying `np.generic` constructor, which isn't + # known statically. So we cannot meaningfully annotate the value parameter. + def __setitem__(self, key: slice | EllipsisType | _ArrayLikeInt, val: object, /) -> None: ... + + # NOTE: `dtype` and `copy` are no-ops at runtime, so we don't support them here to + # avoid confusion + def __array__( + self: flatiter[ndarray[Any, _DTypeT]], + dtype: None = None, + /, + *, + copy: None = None, + ) -> ndarray[tuple[int], _DTypeT]: ... + + # This returns a flat copy of the underlying array, not of the iterator itself + def copy(self: flatiter[ndarray[Any, _DTypeT]], /) -> ndarray[tuple[int], _DTypeT]: ... @type_check_only class _ArrayOrScalarCommon: diff --git a/numpy/typing/tests/data/fail/flatiter.pyi b/numpy/typing/tests/data/fail/flatiter.pyi index be63a082535d..2c6e912bd318 100644 --- a/numpy/typing/tests/data/fail/flatiter.pyi +++ b/numpy/typing/tests/data/fail/flatiter.pyi @@ -1,22 +1,38 @@ +from typing import Any + import numpy as np -import numpy._typing as npt +import numpy.typing as npt -class Index: +class _Index: def __index__(self) -> int: ... -a: np.flatiter[npt.NDArray[np.float64]] -supports_array: npt._SupportsArray[np.dtype[np.float64]] +class _MyArray: + def __array__(self) -> np.ndarray[tuple[int], np.dtypes.Float64DType]: ... + +_index: _Index +_my_array: _MyArray +_something: Any +_dtype: np.dtype[np.int8] + +_a_nd: np.flatiter[npt.NDArray[np.float64]] + +### + +_a_nd.base = _something # type: ignore[misc] +_a_nd.coords = _something # type: ignore[misc] +_a_nd.index = _something # type: ignore[misc] -a.base = object() # type: ignore[assignment, misc] -a.coords = object() # type: ignore[assignment, misc] -a.index = object() # type: ignore[assignment, misc] -a.copy(order="C") # type: ignore[call-arg] +_a_nd.copy("C") # type: ignore[call-arg] +_a_nd.copy(order="C") # type: ignore[call-arg] # NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter` # does not accept objects with the `__array__` or `__index__` protocols; # boolean indexing is just plain broken (gh-17175) -a[np.bool()] # type: ignore[index] -a[Index()] # type: ignore[call-overload] -a[supports_array] # type: ignore[index] +_a_nd[np.True_] # type: ignore[call-overload] +_a_nd[_index] # type: ignore[call-overload] +_a_nd[_my_array] # type: ignore[call-overload] -a[[0, 1, 2]] +# `dtype` and `copy` are no-ops in `flatiter.__array__` +_a_nd.__array__(_dtype) # type: ignore[arg-type] +_a_nd.__array__(dtype=_dtype) # type: ignore[call-arg] +_a_nd.__array__(copy=True) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/pass/flatiter.py b/numpy/typing/tests/data/pass/flatiter.py index cc7c6069a89a..70de3a67917d 100644 --- a/numpy/typing/tests/data/pass/flatiter.py +++ b/numpy/typing/tests/data/pass/flatiter.py @@ -12,7 +12,15 @@ a[...] a[:] a.__array__() -a.__array__(np.dtype(np.float64)) b = np.array([1]).flat a[b] + +a[0] = "1" +a[:] = "2" +a[...] = "3" +a[[]] = "4" +a[[0]] = "5" +a[[[0]]] = "6" +a[[[[[0]]]]] = "7" +a[b] = "8" diff --git a/numpy/typing/tests/data/reveal/flatiter.pyi b/numpy/typing/tests/data/reveal/flatiter.pyi index e188d30fe79f..98d61a6d3428 100644 --- a/numpy/typing/tests/data/reveal/flatiter.pyi +++ b/numpy/typing/tests/data/reveal/flatiter.pyi @@ -1,47 +1,86 @@ -from typing import Literal, TypeAlias, assert_type +from typing import Any, TypeAlias, assert_type import numpy as np -import numpy.typing as npt - -a: np.flatiter[npt.NDArray[np.str_]] -a_1d: np.flatiter[np.ndarray[tuple[int], np.dtype[np.bytes_]]] - -Size: TypeAlias = Literal[42] -a_1d_fixed: np.flatiter[np.ndarray[tuple[Size], np.dtype[np.object_]]] - -assert_type(a.base, npt.NDArray[np.str_]) -assert_type(a.copy(), npt.NDArray[np.str_]) -assert_type(a.coords, tuple[int, ...]) -assert_type(a.index, int) -assert_type(iter(a), np.flatiter[npt.NDArray[np.str_]]) -assert_type(next(a), np.str_) -assert_type(a[0], np.str_) -assert_type(a[[0, 1, 2]], npt.NDArray[np.str_]) -assert_type(a[...], npt.NDArray[np.str_]) -assert_type(a[:], npt.NDArray[np.str_]) -assert_type(a[(...,)], npt.NDArray[np.str_]) -assert_type(a[(0,)], np.str_) - -assert_type(a.__array__(), npt.NDArray[np.str_]) -assert_type(a.__array__(np.dtype(np.float64)), npt.NDArray[np.float64]) -assert_type( - a_1d.__array__(), - np.ndarray[tuple[int], np.dtype[np.bytes_]], -) -assert_type( - a_1d.__array__(np.dtype(np.float64)), - np.ndarray[tuple[int], np.dtype[np.float64]], -) -assert_type( - a_1d_fixed.__array__(), - np.ndarray[tuple[Size], np.dtype[np.object_]], -) -assert_type( - a_1d_fixed.__array__(np.dtype(np.float64)), - np.ndarray[tuple[Size], np.dtype[np.float64]], -) - -a[0] = "a" -a[:5] = "a" -a[...] = "a" -a[(...,)] = "a" + +_ArrayND: TypeAlias = np.ndarray[tuple[Any, ...], np.dtypes.StrDType] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtypes.BytesDType] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtypes.Int8DType] + +_a_nd: np.flatiter[_ArrayND] +_a_1d: np.flatiter[_Array1D] +_a_2d: np.flatiter[_Array2D] + +### + +# .base +assert_type(_a_nd.base, _ArrayND) +assert_type(_a_1d.base, _Array1D) +assert_type(_a_2d.base, _Array2D) + +# .coords +assert_type(_a_nd.coords, tuple[Any, ...]) +assert_type(_a_1d.coords, tuple[int]) +assert_type(_a_2d.coords, tuple[int, int]) + +# .index +assert_type(_a_nd.index, int) +assert_type(_a_1d.index, int) +assert_type(_a_2d.index, int) + +# .__len__() +assert_type(len(_a_nd), int) +assert_type(len(_a_1d), int) +assert_type(len(_a_2d), int) + +# .__iter__() +assert_type(iter(_a_nd), np.flatiter[_ArrayND]) +assert_type(iter(_a_1d), np.flatiter[_Array1D]) +assert_type(iter(_a_2d), np.flatiter[_Array2D]) + +# .__next__() +assert_type(next(_a_nd), np.str_) +assert_type(next(_a_1d), np.bytes_) +assert_type(next(_a_2d), np.int8) + +# .__getitem__(()) +assert_type(_a_nd[()], _ArrayND) +assert_type(_a_1d[()], _Array1D) +assert_type(_a_2d[()], _Array2D) +# .__getitem__(int) +assert_type(_a_nd[0], np.str_) +assert_type(_a_1d[0], np.bytes_) +assert_type(_a_2d[0], np.int8) +# .__getitem__(slice) +assert_type(_a_nd[::], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[::], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[::], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(EllipsisType) +assert_type(_a_nd[...], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[...], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[...], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[!]) +assert_type(_a_nd[[]], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[[]], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[[]], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[int]) +assert_type(_a_nd[[0]], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[[0]], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[[0]], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[list[int]]) +assert_type(_a_nd[[[0]]], np.ndarray[tuple[int, int], np.dtypes.StrDType]) +assert_type(_a_1d[[[0]]], np.ndarray[tuple[int, int], np.dtypes.BytesDType]) +assert_type(_a_2d[[[0]]], np.ndarray[tuple[int, int], np.dtypes.Int8DType]) +# .__getitem__(list[list[list[list[int]]]]) +assert_type(_a_nd[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.StrDType]) +assert_type(_a_1d[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.BytesDType]) +assert_type(_a_2d[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.Int8DType]) + +# __array__() +assert_type(_a_nd.__array__(), np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d.__array__(), np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d.__array__(), np.ndarray[tuple[int], np.dtypes.Int8DType]) + +# .copy() +assert_type(_a_nd.copy(), np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d.copy(), np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d.copy(), np.ndarray[tuple[int], np.dtypes.Int8DType]) From 57794dfb6efc2a0c8498e4bc91cbedfb08c2a78b Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 1 Nov 2025 05:06:57 +0100 Subject: [PATCH 0789/1018] BUG: ``nditer`` runtime signatures --- numpy/_core/_add_newdocs.py | 200 ++++++++++++++++++------------- numpy/_core/tests/test_nditer.py | 26 ++++ 2 files changed, 144 insertions(+), 82 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 98bdec4e3c74..fea383390881 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -154,6 +154,19 @@ add_newdoc('numpy._core', 'nditer', """ + nditer( + op, + flags=None, + op_flags=None, + op_dtypes=None, + order='K', + casting='safe', + op_axes=None, + itershape=None, + buffersize=0, + ) + -- + nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0) @@ -165,63 +178,62 @@ ---------- op : ndarray or sequence of array_like The array(s) to iterate over. - flags : sequence of str, optional - Flags to control the behavior of the iterator. - - * ``buffered`` enables buffering when required. - * ``c_index`` causes a C-order index to be tracked. - * ``f_index`` causes a Fortran-order index to be tracked. - * ``multi_index`` causes a multi-index, or a tuple of indices - with one per iteration dimension, to be tracked. - * ``common_dtype`` causes all the operands to be converted to - a common data type, with copying or buffering as necessary. - * ``copy_if_overlap`` causes the iterator to determine if read - operands have overlap with write operands, and make temporary - copies as necessary to avoid overlap. False positives (needless - copying) are possible in some cases. - * ``delay_bufalloc`` delays allocation of the buffers until - a reset() call is made. Allows ``allocate`` operands to - be initialized before their values are copied into the buffers. - * ``external_loop`` causes the ``values`` given to be - one-dimensional arrays with multiple values instead of - zero-dimensional arrays. - * ``grow_inner`` allows the ``value`` array sizes to be made - larger than the buffer size when both ``buffered`` and - ``external_loop`` is used. - * ``ranged`` allows the iterator to be restricted to a sub-range - of the iterindex values. - * ``refs_ok`` enables iteration of reference types, such as - object arrays. - * ``reduce_ok`` enables iteration of ``readwrite`` operands - which are broadcasted, also known as reduction operands. - * ``zerosize_ok`` allows `itersize` to be zero. + Flags to control the behavior of the iterator. + + * ``buffered`` enables buffering when required. + * ``c_index`` causes a C-order index to be tracked. + * ``f_index`` causes a Fortran-order index to be tracked. + * ``multi_index`` causes a multi-index, or a tuple of indices + with one per iteration dimension, to be tracked. + * ``common_dtype`` causes all the operands to be converted to + a common data type, with copying or buffering as necessary. + * ``copy_if_overlap`` causes the iterator to determine if read + operands have overlap with write operands, and make temporary + copies as necessary to avoid overlap. False positives (needless + copying) are possible in some cases. + * ``delay_bufalloc`` delays allocation of the buffers until + a reset() call is made. Allows ``allocate`` operands to + be initialized before their values are copied into the buffers. + * ``external_loop`` causes the ``values`` given to be + one-dimensional arrays with multiple values instead of + zero-dimensional arrays. + * ``grow_inner`` allows the ``value`` array sizes to be made + larger than the buffer size when both ``buffered`` and + ``external_loop`` is used. + * ``ranged`` allows the iterator to be restricted to a sub-range + of the iterindex values. + * ``refs_ok`` enables iteration of reference types, such as + object arrays. + * ``reduce_ok`` enables iteration of ``readwrite`` operands + which are broadcasted, also known as reduction operands. + * ``zerosize_ok`` allows `itersize` to be zero. op_flags : list of list of str, optional - This is a list of flags for each operand. At minimum, one of - ``readonly``, ``readwrite``, or ``writeonly`` must be specified. - - * ``readonly`` indicates the operand will only be read from. - * ``readwrite`` indicates the operand will be read from and written to. - * ``writeonly`` indicates the operand will only be written to. - * ``no_broadcast`` prevents the operand from being broadcasted. - * ``contig`` forces the operand data to be contiguous. - * ``aligned`` forces the operand data to be aligned. - * ``nbo`` forces the operand data to be in native byte order. - * ``copy`` allows a temporary read-only copy if required. - * ``updateifcopy`` allows a temporary read-write copy if required. - * ``allocate`` causes the array to be allocated if it is None - in the ``op`` parameter. - * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. - * ``arraymask`` indicates that this operand is the mask to use - for selecting elements when writing to operands with the - 'writemasked' flag set. The iterator does not enforce this, - but when writing from a buffer back to the array, it only - copies those elements indicated by this mask. - * ``writemasked`` indicates that only elements where the chosen - ``arraymask`` operand is True will be written to. - * ``overlap_assume_elementwise`` can be used to mark operands that are - accessed only in the iterator order, to allow less conservative - copying when ``copy_if_overlap`` is present. + This is a list of flags for each operand. At minimum, one of + ``readonly``, ``readwrite``, or ``writeonly`` must be specified. + + * ``readonly`` indicates the operand will only be read from. + * ``readwrite`` indicates the operand will be read from and written to. + * ``writeonly`` indicates the operand will only be written to. + * ``no_broadcast`` prevents the operand from being broadcasted. + * ``contig`` forces the operand data to be contiguous. + * ``aligned`` forces the operand data to be aligned. + * ``nbo`` forces the operand data to be in native byte order. + * ``copy`` allows a temporary read-only copy if required. + * ``updateifcopy`` allows a temporary read-write copy if required. + * ``allocate`` causes the array to be allocated if it is None + in the ``op`` parameter. + * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. + * ``arraymask`` indicates that this operand is the mask to use + for selecting elements when writing to operands with the + 'writemasked' flag set. The iterator does not enforce this, + but when writing from a buffer back to the array, it only + copies those elements indicated by this mask. + * ``writemasked`` indicates that only elements where the chosen + ``arraymask`` operand is True will be written to. + * ``overlap_assume_elementwise`` can be used to mark operands that are + accessed only in the iterator order, to allow less conservative + copying when ``copy_if_overlap`` is present. op_dtypes : dtype or tuple of dtype(s), optional The required data type(s) of the operands. If copying or buffering is enabled, the data will be converted to/from their original types. @@ -422,10 +434,22 @@ """) +# nditer attributes + +add_newdoc('numpy._core', 'nditer', ('operands', + """ + operands[`Slice`] + + The array(s) to be iterated over. Valid only before the iterator is closed. + """)) + # nditer methods add_newdoc('numpy._core', 'nditer', ('copy', """ + copy($self, /) + -- + copy() Get a copy of the iterator in its current state. @@ -444,15 +468,11 @@ """)) -add_newdoc('numpy._core', 'nditer', ('operands', - """ - operands[`Slice`] - - The array(s) to be iterated over. Valid only before the iterator is closed. - """)) - add_newdoc('numpy._core', 'nditer', ('debug_print', """ + debug_print($self, /) + -- + debug_print() Print the current state of the `nditer` instance and debug info to stdout. @@ -461,6 +481,9 @@ add_newdoc('numpy._core', 'nditer', ('enable_external_loop', """ + enable_external_loop($self, /) + -- + enable_external_loop() When the "external_loop" was not used during construction, but @@ -471,6 +494,9 @@ add_newdoc('numpy._core', 'nditer', ('iternext', """ + iternext($self, /) + -- + iternext() Check whether iterations are left, and perform a single internal iteration @@ -486,6 +512,9 @@ add_newdoc('numpy._core', 'nditer', ('remove_axis', """ + remove_axis($self, i, /) + -- + remove_axis(i, /) Removes axis `i` from the iterator. Requires that the flag "multi_index" @@ -495,6 +524,9 @@ add_newdoc('numpy._core', 'nditer', ('remove_multi_index', """ + remove_multi_index($self, /) + -- + remove_multi_index() When the "multi_index" flag was specified, this removes it, allowing @@ -504,32 +536,50 @@ add_newdoc('numpy._core', 'nditer', ('reset', """ + reset($self, /) + -- + reset() Reset the iterator to its initial state. """)) +add_newdoc('numpy._core', 'nditer', ('close', + """ + close($self, /) + -- + + close() + + Resolve all writeback semantics in writeable operands. + + See Also + -------- + :ref:`nditer-context-manager` + + """)) + +# nested_iters + add_newdoc('numpy._core', 'nested_iters', """ - nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, \ - order="K", casting="safe", buffersize=0) + nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, + order='K', casting='safe', buffersize=0) Create nditers for use in nested loops Create a tuple of `nditer` objects which iterate in nested loops over different axes of the op argument. The first iterator is used in the - outermost loop, the last in the innermost loop. Advancing one will change - the subsequent iterators to point at its new element. + outermost loop, the last in the innermost loop. Advancing one will + change the subsequent iterators to point at its new element. Parameters ---------- op : ndarray or sequence of array_like The array(s) to iterate over. - axes : list of list of int Each item is used as an "op_axes" argument to an nditer - flags, op_flags, op_dtypes, order, casting, buffersize (optional) See `nditer` parameters of the same name @@ -574,20 +624,6 @@ """) -add_newdoc('numpy._core', 'nditer', ('close', - """ - close() - - Resolve all writeback semantics in writeable operands. - - See Also - -------- - - :ref:`nditer-context-manager` - - """)) - - ############################################################################### # # broadcast diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index acaaa0548fd5..84a88d54548e 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -1,3 +1,4 @@ +import inspect import subprocess import sys import textwrap @@ -11,6 +12,7 @@ from numpy import all, arange, array, nditer from numpy.testing import ( HAS_REFCOUNT, + IS_PYPY, IS_WASM, assert_, assert_array_equal, @@ -3496,3 +3498,27 @@ def test_debug_print(capfd): # The actual output may have additional pointers listed that are # stripped from the example output: assert res_line.startswith(expected_line.strip()) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +def test_signature_constructor(): + sig = inspect.signature(np.nditer) + + assert sig.parameters + assert "self" not in sig.parameters + assert "args" not in sig.parameters + assert "kwargs" not in sig.parameters + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "method", + [fn for name, fn in vars(np.nditer).items() if callable(fn) and name[0] != "_"], +) +def test_signature_methods(method): + sig = inspect.signature(method) + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY From 7e52b73098dcda9de9f7f45c60cf1c9f86ceb0d0 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 1 Nov 2025 22:38:10 +0100 Subject: [PATCH 0790/1018] DOC: remove outdated notes on how to build against numpy in conda-forge (#30127) Closes https://github.com/conda-forge/conda-forge.github.io/issues/2605 [skip actions] [skip azp] [skip cirrus] --- doc/source/dev/depending_on_numpy.rst | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index e3c03b0fea65..de809462654b 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -128,14 +128,9 @@ compatible with a new major release of NumPy and may not be compatible with very old versions. For conda-forge packages, please see -`here `__. - -as of now, it is usually as easy as including:: - - host: - - numpy - run: - - {{ pin_compatible('numpy') }} +`here `__ +for instructions on how to declare a dependency on ``numpy`` when using the C +API. Runtime dependency & version ranges From a8546b7812aaf4734fe82e42c69b068bd4db0718 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sun, 2 Nov 2025 01:13:31 -0700 Subject: [PATCH 0791/1018] CI: disable flaky ubuntu UBsan CI job, and tweak macOS config (#30118) --- .github/workflows/compiler_sanitizers.yml | 47 ++--------------------- 1 file changed, 4 insertions(+), 43 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index a86ff5ae8b53..290ddfa75ffe 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -49,16 +49,16 @@ jobs: pyenv --version - name: Set up LLVM run: | - brew install llvm@19 - LLVM_PREFIX=$(brew --prefix llvm@19) + brew install llvm@20 + LLVM_PREFIX=$(brew --prefix llvm@20) echo CC="$LLVM_PREFIX/bin/clang" >> $GITHUB_ENV echo CXX="$LLVM_PREFIX/bin/clang++" >> $GITHUB_ENV echo LDFLAGS="-L$LLVM_PREFIX/lib" >> $GITHUB_ENV echo CPPFLAGS="-I$LLVM_PREFIX/include" >> $GITHUB_ENV - name: Build Python with address sanitizer run: | - CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14t - pyenv global 3.14t + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14 + pyenv global 3.14 - name: Install dependencies run: | pip install -r requirements/build_requirements.txt @@ -107,42 +107,3 @@ jobs: python -m spin test \ `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ -- -v -s --timeout=600 --durations=10 - - ubuntu_UBSAN: - # To enable this workflow on a fork, comment out: - if: github.repository == 'numpy/numpy' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - name: Set up pyenv - run: | - git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" - PYENV_ROOT="$HOME/.pyenv" - PYENV_BIN="$PYENV_ROOT/bin" - PYENV_SHIMS="$PYENV_ROOT/shims" - echo "$PYENV_BIN" >> $GITHUB_PATH - echo "$PYENV_SHIMS" >> $GITHUB_PATH - echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV - - name: Check pyenv is working - run: pyenv --version - - name: Build python with address sanitizer - run: | - CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14t - pyenv global 3.14t - - name: Install dependencies - run: | - pip install -r requirements/build_requirements.txt - pip install -r requirements/ci_requirements.txt - pip install -r requirements/test_requirements.txt - - name: Build numpy with UndefinedBehaviorSanitizer - run: python -m spin build -- -Db_sanitize=address,undefined -Db_lundef=false - - name: Test - run: | - # pass -s to pytest to see UBSAN errors and warnings, otherwise pytest captures them - ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1 \ - UBSAN_OPTIONS=halt_on_error=1:suppressions=${GITHUB_WORKSPACE}/tools/ci/ubsan_suppressions_x86_64.txt \ - spin test -- -v -s --timeout=600 --durations=10 From 1e424dae42a2d560520b6e053e8e60ac4205bfc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20Laboissi=C3=A8re?= Date: Sun, 2 Nov 2025 18:46:22 +0100 Subject: [PATCH 0792/1018] BUG: Avoid compilation error of wrapper file generated with SWIG >= 4.4 (#30128) The `import_array` macro, which is defined in the file `numpy/core/code_generators/generate_numpy_api.py`, is intended for use inside an internal SWIG function that is called in the generated C wrapper file. This macro contains a return statement whose argument must match the function definition. Until version 4.3 of SWIG, the aforementioned function returned a `void*` value. However, in version 4.4, the return value was changed to `int`. This causes compilation of code using import_array() to fail with the following error message: `returning 'void *' from a function with return type 'int' makes integer from pointer without a cast [-Wint-conversion].` This commit resolves the issue by returning either `NULL` or `0`, depending on the SWIG version being used (< 3.4 or >= 3.4, respectively). This change has been successfully tested against SWIG versions 4.3 and 4.4. Closes: numpy/numpy#30122 --- numpy/_core/code_generators/generate_numpy_api.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/_core/code_generators/generate_numpy_api.py b/numpy/_core/code_generators/generate_numpy_api.py index dc11bcd2c272..23d678872ca4 100644 --- a/numpy/_core/code_generators/generate_numpy_api.py +++ b/numpy/_core/code_generators/generate_numpy_api.py @@ -157,6 +157,12 @@ return 0; } +#if (SWIG_VERSION < 0x040400) +#define _RETURN_VALUE NULL +#else +#define _RETURN_VALUE 0 +#endif + #define import_array() { \ if (_import_array() < 0) { \ PyErr_Print(); \ @@ -164,7 +170,7 @@ PyExc_ImportError, \ "numpy._core.multiarray failed to import" \ ); \ - return NULL; \ + return _RETURN_VALUE; \ } \ } From b5474d49065cf866eb8890bb9b9a20cbc45c41d2 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Mon, 3 Nov 2025 19:34:40 -0500 Subject: [PATCH 0793/1018] MAINT,BUG: make later arguments in array2string keyword only. --- doc/release/upcoming_changes/30068.expired.rst | 3 ++- numpy/_core/arrayprint.py | 8 ++++---- numpy/_core/arrayprint.pyi | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/doc/release/upcoming_changes/30068.expired.rst b/doc/release/upcoming_changes/30068.expired.rst index 56678465e4b9..5d41c98b3260 100644 --- a/doc/release/upcoming_changes/30068.expired.rst +++ b/doc/release/upcoming_changes/30068.expired.rst @@ -4,7 +4,8 @@ The following long-deprecated APIs have been removed or converted to errors: * The ``style`` parameter has been removed from ``numpy.array2string``. - This argument had no effect since Numpy 1.14.0. + This argument had no effect since Numpy 1.14.0. Any arguments following + it, such as ``formatter`` have now been made keyword-only. * Calling ``np.sum(generator)`` directly on a generator object now raises a `TypeError`. This behavior was deprecated in NumPy 1.15.0. Use ``np.sum(np.fromiter(generator))`` diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index ec17d3063c09..8d576d9e1d56 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -619,18 +619,18 @@ def _array2string(a, options, separator=' ', prefix=""): def _array2string_dispatcher( a, max_line_width=None, precision=None, suppress_small=None, separator=None, prefix=None, - formatter=None, threshold=None, + *, formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix=None, - *, legacy=None): + legacy=None): return (a,) @array_function_dispatch(_array2string_dispatcher, module='numpy') def array2string(a, max_line_width=None, precision=None, suppress_small=None, separator=' ', prefix="", - formatter=None, threshold=None, + *, formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix="", - *, legacy=None): + legacy=None): """ Return a string representation of an array. diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 6834565da97f..307f844634ca 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -98,13 +98,13 @@ def array2string( suppress_small: bool | None = None, separator: str = " ", prefix: str = "", + *, formatter: _FormatDict | None = None, threshold: int | None = None, edgeitems: int | None = None, sign: _Sign | None = None, floatmode: _FloatMode | None = None, suffix: str = "", - *, legacy: _Legacy | None = None, ) -> str: ... From b928580d9f596d1e003479dcdbecd368ce8459b9 Mon Sep 17 00:00:00 2001 From: Trey Cole Date: Mon, 3 Nov 2025 22:08:21 -0500 Subject: [PATCH 0794/1018] MAINT: fix math markup (\times -> \\times) in numpy.linalg.multidot docstring Notes section --- numpy/linalg/_linalg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index c3cbf3d0ef98..cfcaef13daf2 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -2938,7 +2938,7 @@ def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1] Assume we have three matrices - :math:`A_{10 \times 100}, B_{100 \times 5}, C_{5 \times 50}`. + :math:`A_{10 \\times 100}, B_{100 \\times 5}, C_{5 \\times 50}`. The costs for the two different parenthesizations are as follows:: From 0fdb1979cdfcdd31b61f423b66a384fabd1d7fd7 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 4 Nov 2025 12:24:28 +0200 Subject: [PATCH 0795/1018] BLD: use scipy-openblas 0.3.30.7 (#30132) --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 19e6b119b73f..824934787e10 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin==0.15 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.30.0.6 +scipy-openblas32==0.3.30.0.7 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 605132ee720a..37e685fef0cc 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin==0.15 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.30.0.6 -scipy-openblas64==0.3.30.0.6 +scipy-openblas32==0.3.30.0.7 +scipy-openblas64==0.3.30.0.7 From 3b44dc061343cdfb1f15bf7d8280a844aa994b76 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 4 Nov 2025 17:40:35 +0100 Subject: [PATCH 0796/1018] BUG, DOC, TYP: ``empty`` and ``zeros`` runtime signatures, and missing ``device`` parameter docs (#30140) * BUG: ``empty`` and ``zeros`` runtime signatures * TYP: `empty`, `zeros`, and `ones` parameter defaults and minor shape-type fix * TST: Update `_convert2ma` test signature for `np.ma.empty` --- numpy/_core/_add_newdocs.py | 26 ++-- numpy/_core/multiarray.pyi | 112 ++++++++++-------- numpy/_core/tests/test_numeric.py | 31 +++++ numpy/ma/tests/test_core.py | 8 +- .../tests/data/reveal/array_constructors.pyi | 5 +- 5 files changed, 121 insertions(+), 61 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index f02ab63c146c..bdb84d78bb27 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1293,7 +1293,10 @@ add_newdoc('numpy._core.multiarray', 'empty', """ - empty(shape, dtype=float, order='C', *, device=None, like=None) + empty(shape, dtype=None, order='C', *, device=None, like=None) + -- + + empty(shape, dtype=None, order='C', *, device=None, like=None) Return a new array of given shape and type, without initializing entries. @@ -1306,8 +1309,7 @@ `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. + (C-style) or column-major (Fortran-style) order in memory. device : str, optional The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -1363,11 +1365,14 @@ string. If `obj` is not given, it will be interpreted as None for object type and as zeros for all other types. - """) + """) # sufficient null bytes for all number dtypes add_newdoc('numpy._core.multiarray', 'zeros', """ - zeros(shape, dtype=float, order='C', *, like=None) + zeros(shape, dtype=None, order='C', *, device=None, like=None) + -- + + zeros(shape, dtype=None, order='C', *, device=None, like=None) Return a new array of given shape and type, filled with zeros. @@ -1380,8 +1385,12 @@ `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. + (C-style) or column-major (Fortran-style) order in memory. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -1423,7 +1432,8 @@ """) add_newdoc('numpy._core.multiarray', 'set_typeDict', - """set_typeDict(dict) + """ + set_typeDict(dict) Set the internal dictionary that can look up an array type using a registered code. diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index dd47e8e872d7..ec56f3429892 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1,6 +1,6 @@ # TODO: Sort out any and all missing functions in this namespace import datetime as dt -from _typeshed import StrOrBytesPath, SupportsLenAndGetItem +from _typeshed import Incomplete, StrOrBytesPath, SupportsLenAndGetItem from collections.abc import Callable, Iterable, Sequence from typing import ( Any, @@ -10,9 +10,7 @@ from typing import ( Protocol, SupportsIndex, TypeAlias, - TypedDict, TypeVar, - Unpack, final, overload, type_check_only, @@ -32,7 +30,6 @@ from numpy import ( # type: ignore[attr-defined] _SupportsBuffer, _SupportsFileMethods, broadcast, - # Re-exports busdaycalendar, complexfloating, correlate, @@ -54,7 +51,6 @@ from numpy import ( # type: ignore[attr-defined] signedinteger, str_, timedelta64, - # The rest ufunc, uint8, unsignedinteger, @@ -62,10 +58,9 @@ from numpy import ( # type: ignore[attr-defined] ) from numpy._typing import ( ArrayLike, - # DTypes DTypeLike, - # Arrays NDArray, + _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeBytes_co, @@ -82,7 +77,6 @@ from numpy._typing import ( _IntLike_co, _NestedSequence, _ScalarLike_co, - # Shapes _Shape, _ShapeLike, _SupportsArrayFunc, @@ -192,18 +186,15 @@ __all__ = [ _ScalarT = TypeVar("_ScalarT", bound=generic) _DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) -_ArrayT_co = TypeVar( - "_ArrayT_co", - bound=ndarray[Any, Any], - covariant=True, -) +_ArrayT = TypeVar("_ArrayT", bound=ndarray) +_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, covariant=True) +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +# TODO: fix the names of these typevars _ReturnType = TypeVar("_ReturnType") _IDType = TypeVar("_IDType") _Nin = TypeVar("_Nin", bound=int) _Nout = TypeVar("_Nout", bound=int) -_ShapeT = TypeVar("_ShapeT", bound=_Shape) _Array: TypeAlias = ndarray[_ShapeT, dtype[_ScalarT]] _Array1D: TypeAlias = ndarray[tuple[int], dtype[_ScalarT]] @@ -236,11 +227,6 @@ _RollKind: TypeAlias = L[ # `raise` is deliberately excluded class _SupportsArray(Protocol[_ArrayT_co]): def __array__(self, /) -> _ArrayT_co: ... -@type_check_only -class _KwargsEmpty(TypedDict, total=False): - device: L["cpu"] | None - like: _SupportsArrayFunc | None - @type_check_only class _ConstructorEmpty(Protocol): # 1-D shape @@ -250,8 +236,10 @@ class _ConstructorEmpty(Protocol): /, shape: SupportsIndex, dtype: None = None, - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array1D[float64]: ... @overload def __call__( @@ -259,8 +247,10 @@ class _ConstructorEmpty(Protocol): /, shape: SupportsIndex, dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> ndarray[tuple[int], _DTypeT]: ... @overload def __call__( @@ -268,18 +258,22 @@ class _ConstructorEmpty(Protocol): /, shape: SupportsIndex, dtype: type[_ScalarT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array1D[_ScalarT]: ... @overload def __call__( self, /, shape: SupportsIndex, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array1D[Any]: ... + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array1D[Incomplete]: ... # known shape @overload @@ -288,8 +282,10 @@ class _ConstructorEmpty(Protocol): /, shape: _AnyShapeT, dtype: None = None, - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[_AnyShapeT, float64]: ... @overload def __call__( @@ -297,8 +293,10 @@ class _ConstructorEmpty(Protocol): /, shape: _AnyShapeT, dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> ndarray[_AnyShapeT, _DTypeT]: ... @overload def __call__( @@ -306,18 +304,22 @@ class _ConstructorEmpty(Protocol): /, shape: _AnyShapeT, dtype: type[_ScalarT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[_AnyShapeT, _ScalarT]: ... @overload def __call__( self, /, shape: _AnyShapeT, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_AnyShapeT, Any]: ... + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array[_AnyShapeT, Incomplete]: ... # unknown shape @overload @@ -325,34 +327,42 @@ class _ConstructorEmpty(Protocol): self, /, shape: _ShapeLike, dtype: None = None, - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[float64]: ... @overload def __call__( self, /, shape: _ShapeLike, dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> ndarray[Any, _DTypeT]: ... + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> ndarray[_AnyShape, _DTypeT]: ... @overload def __call__( self, /, shape: _ShapeLike, dtype: type[_ScalarT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def __call__( self, /, shape: _ShapeLike, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> NDArray[Any]: ... + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> NDArray[Incomplete]: ... # using `Final` or `TypeAlias` will break stubtest error = Exception diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index fb5a37c33126..9af89c6ff391 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1,3 +1,4 @@ +import inspect import itertools import math import platform @@ -18,6 +19,7 @@ from numpy.random import rand, randint, randn from numpy.testing import ( HAS_REFCOUNT, + IS_PYPY, IS_WASM, assert_, assert_almost_equal, @@ -3371,6 +3373,35 @@ def test_for_reference_leak(self): np.full([dim] * 10, 0) assert_(sys.getrefcount(dim) == beg) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize("func", [np.empty, np.zeros, np.ones, np.full]) + def test_signatures(self, func): + sig = inspect.signature(func) + params = sig.parameters + + assert len(params) in {5, 6} + + assert 'shape' in params + assert params["shape"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["shape"].default is inspect.Parameter.empty + + assert 'dtype' in params + assert params["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["dtype"].default is None + + assert 'order' in params + assert params["order"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["order"].default == "C" + + assert 'device' in params + assert params["device"].kind is inspect.Parameter.KEYWORD_ONLY + assert params["device"].default is None + + assert 'like' in params + assert params["like"].kind is inspect.Parameter.KEYWORD_ONLY + assert params["like"].default is None + class TestLikeFuncs: '''Test ones_like, zeros_like, empty_like and full_like''' diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 0e4d42da0a5b..b978b25f5827 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -5982,7 +5982,13 @@ def test_frommethod_signature(fn, signature): @pytest.mark.parametrize( ('fn', 'signature'), [ - (np.ma.empty, "(*args, fill_value=None, hardmask=False, **kwargs)"), + ( + np.ma.empty, + ( + "(shape, dtype=None, order='C', *, device=None, like=None, " + "fill_value=None, hardmask=False)" + ), + ), (np.ma.empty_like, "(*args, **kwargs)"), (np.ma.squeeze, "(a, axis=None, *, fill_value=None, hardmask=False)"), ( diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 2d8b5fa3fd17..1d970ef68de5 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -189,7 +189,10 @@ assert_type(np.ones(_shape_2d), np.ndarray[tuple[int, int], np.dtype[np.float64] assert_type(np.ones(_shape_nd), np.ndarray[tuple[int, ...], np.dtype[np.float64]]) assert_type(np.ones(_shape_1d, dtype=np.int64), np.ndarray[tuple[int], np.dtype[np.int64]]) assert_type(np.ones(_shape_like), npt.NDArray[np.float64]) -assert_type(np.ones(_shape_like, dtype=np.dtypes.Int64DType()), np.ndarray[Any, np.dtypes.Int64DType]) +assert_type( + np.ones(_shape_like, dtype=np.dtypes.Int64DType()), + np.ndarray[tuple[Any, ...], np.dtypes.Int64DType], +) assert_type(np.ones(_shape_like, dtype=int), npt.NDArray[Any]) assert_type(np.ones(mixed_shape), npt.NDArray[np.float64]) From e431aaaa7d465e47ebc39646fe0cf88759c7b728 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 4 Nov 2025 07:34:09 +0100 Subject: [PATCH 0797/1018] BUG: ``_core.multiarray.*`` function runtime signatures --- numpy/_core/multiarray.py | 73 ++++++++++++++++------------ numpy/_core/overrides.py | 11 +++-- numpy/_core/tests/test_multiarray.py | 34 +++++++++++++ 3 files changed, 84 insertions(+), 34 deletions(-) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 5599494720b6..a5995bd829bf 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -114,11 +114,20 @@ def _override___module__(): @array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) def empty_like( - prototype, dtype=None, order=None, subok=None, shape=None, *, device=None + prototype, dtype=None, order="K", subok=True, shape=None, *, device=None ): """ - empty_like(prototype, dtype=None, order='K', subok=True, shape=None, *, - device=None) + empty_like( + prototype, + /, + dtype=None, + order='K', + subok=True, + shape=None, + *, + device=None, + ) + -- Return a new array with the same shape and type as a given array. @@ -186,15 +195,18 @@ def empty_like( @array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) -def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): +def concatenate(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): """ concatenate( - (a1, a2, ...), + arrays, + /, axis=0, out=None, + *, dtype=None, - casting="same_kind" + casting="same_kind", ) + -- Join a sequence of arrays along an existing axis. @@ -295,7 +307,7 @@ def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) -def inner(a, b): +def inner(a, b, /): """ inner(a, b, /) @@ -389,7 +401,7 @@ def inner(a, b): @array_function_from_c_func_and_dispatcher(_multiarray_umath.where) -def where(condition, x=None, y=None): +def where(condition, x=None, y=None, /): """ where(condition, [x, y], /) @@ -465,7 +477,7 @@ def where(condition, x=None, y=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) -def lexsort(keys, axis=None): +def lexsort(keys, axis=-1): """ lexsort(keys, axis=-1) @@ -586,7 +598,7 @@ def lexsort(keys, axis=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) -def can_cast(from_, to, casting=None): +def can_cast(from_, to, casting="safe"): """ can_cast(from_, to, casting='safe') @@ -648,7 +660,7 @@ def can_cast(from_, to, casting=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) -def min_scalar_type(a): +def min_scalar_type(a, /): """ min_scalar_type(a, /) @@ -862,7 +874,7 @@ def dot(a, b, out=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) -def vdot(a, b): +def vdot(a, b, /): r""" vdot(a, b, /) @@ -925,7 +937,7 @@ def vdot(a, b): @array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) -def bincount(x, weights=None, minlength=None): +def bincount(x, /, weights=None, minlength=0): """ bincount(x, /, weights=None, minlength=0) @@ -1001,7 +1013,7 @@ def bincount(x, weights=None, minlength=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) -def ravel_multi_index(multi_index, dims, mode=None, order=None): +def ravel_multi_index(multi_index, dims, mode="raise", order="C"): """ ravel_multi_index(multi_index, dims, mode='raise', order='C') @@ -1059,7 +1071,7 @@ def ravel_multi_index(multi_index, dims, mode=None, order=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) -def unravel_index(indices, shape=None, order=None): +def unravel_index(indices, shape, order="C"): """ unravel_index(indices, shape, order='C') @@ -1104,7 +1116,7 @@ def unravel_index(indices, shape=None, order=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) -def copyto(dst, src, casting=None, where=None): +def copyto(dst, src, casting="same_kind", where=True): """ copyto(dst, src, casting='same_kind', where=True) @@ -1156,7 +1168,7 @@ def copyto(dst, src, casting=None, where=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) def putmask(a, /, mask, values): """ - putmask(a, mask, values) + putmask(a, /, mask, values) Changes elements of an array based on conditional and input values. @@ -1200,7 +1212,7 @@ def putmask(a, /, mask, values): @array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) -def packbits(a, axis=None, bitorder='big'): +def packbits(a, /, axis=None, bitorder="big"): """ packbits(a, /, axis=None, bitorder='big') @@ -1257,7 +1269,7 @@ def packbits(a, axis=None, bitorder='big'): @array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) -def unpackbits(a, axis=None, count=None, bitorder='big'): +def unpackbits(a, /, axis=None, count=None, bitorder="big"): """ unpackbits(a, /, axis=None, count=None, bitorder='big') @@ -1337,9 +1349,9 @@ def unpackbits(a, axis=None, count=None, bitorder='big'): @array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) -def shares_memory(a, b, max_work=None): +def shares_memory(a, b, /, max_work=-1): """ - shares_memory(a, b, /, max_work=None) + shares_memory(a, b, /, max_work=-1) Determine if two arrays share memory. @@ -1416,9 +1428,9 @@ def shares_memory(a, b, max_work=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) -def may_share_memory(a, b, max_work=None): +def may_share_memory(a, b, /, max_work=0): """ - may_share_memory(a, b, /, max_work=None) + may_share_memory(a, b, /, max_work=0) Determine if two arrays might share memory @@ -1458,14 +1470,14 @@ def may_share_memory(a, b, max_work=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) -def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): +def is_busday(dates, weekmask="1111100", holidays=None, busdaycal=None, out=None): """ is_busday( dates, weekmask='1111100', holidays=None, busdaycal=None, - out=None + out=None, ) Calculates which of the given dates are valid days, and which are not. @@ -1517,7 +1529,7 @@ def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) -def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, +def busday_offset(dates, offsets, roll="raise", weekmask="1111100", holidays=None, busdaycal=None, out=None): """ busday_offset( @@ -1527,7 +1539,7 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, weekmask='1111100', holidays=None, busdaycal=None, - out=None + out=None, ) First adjusts the date to fall on a valid day according to @@ -1619,7 +1631,7 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, @array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) -def busday_count(begindates, enddates, weekmask=None, holidays=None, +def busday_count(begindates, enddates, weekmask="1111100", holidays=(), busdaycal=None, out=None): """ busday_count( @@ -1692,9 +1704,8 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None, return (begindates, enddates, weekmask, holidays, out) -@array_function_from_c_func_and_dispatcher( - _multiarray_umath.datetime_as_string) -def datetime_as_string(arr, unit=None, timezone=None, casting=None): +@array_function_from_c_func_and_dispatcher(_multiarray_umath.datetime_as_string) +def datetime_as_string(arr, unit=None, timezone="naive", casting="same_kind"): """ datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') diff --git a/numpy/_core/overrides.py b/numpy/_core/overrides.py index 6414710ae900..6d5e7750b09b 100644 --- a/numpy/_core/overrides.py +++ b/numpy/_core/overrides.py @@ -1,6 +1,7 @@ """Implementation of __array_function__ overrides from NEP-18.""" import collections import functools +import inspect from numpy._core._multiarray_umath import ( _ArrayFunctionDispatcher, @@ -156,11 +157,15 @@ def decorator(implementation): "argument and a keyword-only argument. " f"{implementation} does not seem to comply.") - if docs_from_dispatcher: - add_docstring(implementation, dispatcher.__doc__) + if docs_from_dispatcher and dispatcher.__doc__ is not None: + doc = inspect.cleandoc(dispatcher.__doc__) + add_docstring(implementation, doc) public_api = _ArrayFunctionDispatcher(dispatcher, implementation) - public_api = functools.wraps(implementation)(public_api) + functools.update_wrapper(public_api, implementation) + + if not verify and not getattr(implementation, "__text_signature__", None): + public_api.__signature__ = inspect.signature(dispatcher) if module is not None: public_api.__module__ = module diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 38e710179067..d62c2018292c 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10902,3 +10902,37 @@ def test_array_method_signatures(methodname: str): assert "self" in sig.parameters assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize("func", [np.empty_like, np.concatenate]) +def test_c_func_dispatcher_text_signature(func): + text_sig = func.__wrapped__.__text_signature__ + assert text_sig.startswith("(") and text_sig.endswith(")") + + sig = inspect.signature(func) + assert sig == inspect.signature(func.__wrapped__) + assert not hasattr(func, "__signature__") + + with pytest.raises(ValueError): + inspect.signature(func, follow_wrapped=False) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "func", + [ + np.inner, np.where, np.lexsort, np.can_cast, np.min_scalar_type, np.result_type, + np.dot, np.vdot, np.bincount, np.ravel_multi_index, np.unravel_index, np.copyto, + np.putmask, np.packbits, np.unpackbits, np.shares_memory, np.may_share_memory, + np.is_busday, np.busday_offset, np.busday_count, np.datetime_as_string, + ], +) +def test_c_func_dispatcher_signature(func): + sig = inspect.signature(func) + + assert hasattr(func, "__signature__") + assert sig == func.__signature__ + assert sig.parameters From 0d7844eef4ad646ed42852ddcc8f29f45466cbc0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 4 Nov 2025 08:39:04 +0100 Subject: [PATCH 0798/1018] TYP: ``_core.multiarray.*`` function signature updates --- numpy/_core/multiarray.pyi | 350 ++++++++---------- numpy/lib/_index_tricks_impl.pyi | 61 ++- numpy/ma/core.pyi | 8 +- numpy/typing/tests/data/fail/multiarray.pyi | 4 +- numpy/typing/tests/data/pass/multiarray.py | 5 +- numpy/typing/tests/data/reveal/multiarray.pyi | 15 +- 6 files changed, 239 insertions(+), 204 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index ec56f3429892..c1a5bb7567fe 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -18,7 +18,7 @@ from typing import ( from typing_extensions import CapsuleType import numpy as np -from numpy import ( # type: ignore[attr-defined] +from numpy import ( _AnyShapeT, _CastingKind, _CopyMode, @@ -413,43 +413,47 @@ empty: Final[_ConstructorEmpty] = ... @overload def empty_like( prototype: _ArrayT, + /, dtype: None = None, - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def empty_like( prototype: _ArrayLike[_ScalarT], + /, dtype: None = None, - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def empty_like( - prototype: Any, + prototype: Incomplete, + /, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def empty_like( - prototype: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + prototype: Incomplete, + /, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, +) -> NDArray[Incomplete]: ... @overload def array( @@ -536,44 +540,44 @@ def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF # NOTE: Allow any sequence of array-like objects @overload -def concatenate( # type: ignore[misc] +def concatenate( arrays: _ArrayLike[_ScalarT], /, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = 0, out: None = None, *, dtype: None = None, - casting: _CastingKind | None = ... + casting: _CastingKind | None = "same_kind", ) -> NDArray[_ScalarT]: ... @overload -def concatenate( # type: ignore[misc] +def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = 0, out: None = None, *, dtype: _DTypeLike[_ScalarT], - casting: _CastingKind | None = ... + casting: _CastingKind | None = "same_kind", ) -> NDArray[_ScalarT]: ... @overload -def concatenate( # type: ignore[misc] +def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = 0, out: None = None, *, dtype: DTypeLike | None = None, - casting: _CastingKind | None = ... -) -> NDArray[Any]: ... + casting: _CastingKind | None = "same_kind", +) -> NDArray[Incomplete]: ... @overload def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = 0, *, out: _ArrayT, - dtype: DTypeLike | None = ..., - casting: _CastingKind | None = ... + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", ) -> _ArrayT: ... @overload def concatenate( @@ -582,115 +586,80 @@ def concatenate( axis: SupportsIndex | None, out: _ArrayT, *, - dtype: DTypeLike | None = ..., - casting: _CastingKind | None = ... + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", ) -> _ArrayT: ... -def inner( - a: ArrayLike, - b: ArrayLike, - /, -) -> Any: ... +def inner(a: ArrayLike, b: ArrayLike, /) -> Incomplete: ... @overload -def where( - condition: ArrayLike, - /, -) -> tuple[NDArray[intp], ...]: ... +def where(condition: ArrayLike, x: None = None, y: None = None, /) -> tuple[NDArray[intp], ...]: ... @overload -def where( - condition: ArrayLike, - x: ArrayLike, - y: ArrayLike, - /, -) -> NDArray[Any]: ... +def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike, /) -> NDArray[Incomplete]: ... -def lexsort( - keys: ArrayLike, - axis: SupportsIndex | None = ..., -) -> Any: ... +def lexsort(keys: ArrayLike, axis: SupportsIndex = -1) -> NDArray[intp]: ... -def can_cast( - from_: ArrayLike | DTypeLike, - to: DTypeLike, - casting: _CastingKind | None = ..., -) -> bool: ... +def can_cast(from_: ArrayLike | DTypeLike, to: DTypeLike, casting: _CastingKind = "safe") -> bool: ... def min_scalar_type(a: ArrayLike, /) -> dtype: ... - def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... @overload -def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Any: ... +def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... @overload def dot(a: ArrayLike, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... @overload -def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... # type: ignore[misc] +def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... @overload -def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger: ... # type: ignore[misc] +def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger: ... @overload -def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger: ... # type: ignore[misc] +def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger: ... @overload -def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating: ... # type: ignore[misc] +def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating: ... @overload -def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating: ... # type: ignore[misc] +def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating: ... @overload def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ... @overload -def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ... +def vdot(a: _ArrayLikeObject_co, b: object, /) -> Any: ... @overload -def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ... +def vdot(a: object, b: _ArrayLikeObject_co, /) -> Any: ... -def bincount( - x: ArrayLike, - /, - weights: ArrayLike | None = ..., - minlength: SupportsIndex = ..., -) -> NDArray[intp]: ... +def bincount(x: ArrayLike, /, weights: ArrayLike | None = None, minlength: SupportsIndex = 0) -> NDArray[intp]: ... -def copyto( - dst: NDArray[Any], - src: ArrayLike, - casting: _CastingKind | None = ..., - where: _ArrayLikeBool_co | None = ..., -) -> None: ... +def copyto(dst: ndarray, src: ArrayLike, casting: _CastingKind = "same_kind", where: object = True) -> None: ... +def putmask(a: ndarray, /, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... -def putmask( - a: NDArray[Any], - /, - mask: _ArrayLikeBool_co, - values: ArrayLike, -) -> None: ... +_BitOrder: TypeAlias = L["big", "little"] -def packbits( - a: _ArrayLikeInt_co, - /, - axis: SupportsIndex | None = ..., - bitorder: L["big", "little"] = ..., -) -> NDArray[uint8]: ... +@overload +def packbits(a: _ArrayLikeInt_co, /, axis: None = None, bitorder: _BitOrder = "big") -> ndarray[tuple[int], dtype[uint8]]: ... +@overload +def packbits(a: _ArrayLikeInt_co, /, axis: SupportsIndex, bitorder: _BitOrder = "big") -> NDArray[uint8]: ... +@overload def unpackbits( a: _ArrayLike[uint8], /, - axis: SupportsIndex | None = ..., - count: SupportsIndex | None = ..., - bitorder: L["big", "little"] = ..., + axis: None = None, + count: SupportsIndex | None = None, + bitorder: _BitOrder = "big", +) -> ndarray[tuple[int], dtype[uint8]]: ... +@overload +def unpackbits( + a: _ArrayLike[uint8], + /, + axis: SupportsIndex, + count: SupportsIndex | None = None, + bitorder: _BitOrder = "big", ) -> NDArray[uint8]: ... -def shares_memory( - a: object, - b: object, - /, - max_work: int | None = ..., -) -> bool: ... +_MaxWork: TypeAlias = L[-1, 0] -def may_share_memory( - a: object, - b: object, - /, - max_work: int | None = ..., -) -> bool: ... +# any two python objects will be accepted, not just `ndarray`s +def shares_memory(a: object, b: object, /, max_work: _MaxWork = -1) -> bool: ... +def may_share_memory(a: object, b: object, /, max_work: _MaxWork = 0) -> bool: ... @overload def asarray( @@ -1096,175 +1065,180 @@ def datetime_data( # The datetime functions perform unsafe casts to `datetime64[D]`, # so a lot of different argument types are allowed here +_ToDates: TypeAlias = dt.date | _NestedSequence[dt.date] +_ToDeltas: TypeAlias = dt.timedelta | _NestedSequence[dt.timedelta] + @overload -def busday_count( # type: ignore[misc] +def busday_count( begindates: _ScalarLike_co | dt.date, enddates: _ScalarLike_co | dt.date, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, out: None = None, ) -> int_: ... @overload -def busday_count( # type: ignore[misc] - begindates: ArrayLike | dt.date | _NestedSequence[dt.date], - enddates: ArrayLike | dt.date | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., +def busday_count( + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, out: None = None, ) -> NDArray[int_]: ... @overload def busday_count( - begindates: ArrayLike | dt.date | _NestedSequence[dt.date], - enddates: ArrayLike | dt.date | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @overload def busday_count( - begindates: ArrayLike | dt.date | _NestedSequence[dt.date], - enddates: ArrayLike | dt.date | _NestedSequence[dt.date], + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, weekmask: ArrayLike, - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + holidays: ArrayLike | _ToDates, busdaycal: busdaycalendar | None, out: _ArrayT, ) -> _ArrayT: ... # `roll="raise"` is (more or less?) equivalent to `casting="safe"` @overload -def busday_offset( # type: ignore[misc] +def busday_offset( dates: datetime64 | dt.date, offsets: _TD64Like_co | dt.timedelta, - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, out: None = None, ) -> datetime64: ... @overload -def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], - offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., +def busday_offset( + dates: _ArrayLike[datetime64] | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, out: None = None, ) -> NDArray[datetime64]: ... @overload -def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], - offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., +def busday_offset( + dates: _ArrayLike[datetime64] | _ToDates, + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @overload -def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], - offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], +def busday_offset( + dates: _ArrayLike[datetime64] | _ToDates, + offsets: _ArrayLikeTD64_co | _ToDeltas, roll: L["raise"], weekmask: ArrayLike, - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, out: _ArrayT, ) -> _ArrayT: ... @overload -def busday_offset( # type: ignore[misc] +def busday_offset( dates: _ScalarLike_co | dt.date, offsets: _ScalarLike_co | dt.timedelta, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, out: None = None, ) -> datetime64: ... @overload -def busday_offset( # type: ignore[misc] - dates: ArrayLike | dt.date | _NestedSequence[dt.date], - offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], +def busday_offset( + dates: ArrayLike | _NestedSequence[dt.date], + offsets: ArrayLike | _ToDeltas, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, out: None = None, ) -> NDArray[datetime64]: ... @overload def busday_offset( - dates: ArrayLike | dt.date | _NestedSequence[dt.date], - offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + dates: ArrayLike | _ToDates, + offsets: ArrayLike | _ToDeltas, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @overload def busday_offset( - dates: ArrayLike | dt.date | _NestedSequence[dt.date], - offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + dates: ArrayLike | _ToDates, + offsets: ArrayLike | _ToDeltas, roll: _RollKind, weekmask: ArrayLike, - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, out: _ArrayT, ) -> _ArrayT: ... @overload -def is_busday( # type: ignore[misc] +def is_busday( dates: _ScalarLike_co | dt.date, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, out: None = None, ) -> np.bool: ... @overload -def is_busday( # type: ignore[misc] +def is_busday( dates: ArrayLike | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, out: None = None, ) -> NDArray[np.bool]: ... @overload def is_busday( - dates: ArrayLike | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + dates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @overload def is_busday( - dates: ArrayLike | _NestedSequence[dt.date], + dates: ArrayLike | _ToDates, weekmask: ArrayLike, - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, out: _ArrayT, ) -> _ArrayT: ... +_TimezoneContext: TypeAlias = L["naive", "UTC", "local"] | dt.tzinfo + @overload -def datetime_as_string( # type: ignore[misc] +def datetime_as_string( arr: datetime64 | dt.date, - unit: L["auto"] | _UnitKind | None = ..., - timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., - casting: _CastingKind = ..., + unit: L["auto"] | _UnitKind | None = None, + timezone: _TimezoneContext = "naive", + casting: _CastingKind = "same_kind", ) -> str_: ... @overload def datetime_as_string( arr: _ArrayLikeDT64_co | _NestedSequence[dt.date], - unit: L["auto"] | _UnitKind | None = ..., - timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., - casting: _CastingKind = ..., + unit: L["auto"] | _UnitKind | None = None, + timezone: _TimezoneContext = "naive", + casting: _CastingKind = "same_kind", ) -> NDArray[str_]: ... @overload diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 8f12e4a36d99..570688fe8d62 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -1,4 +1,4 @@ -from _typeshed import Incomplete +from _typeshed import Incomplete, SupportsLenAndGetItem from collections.abc import Sequence from typing import ( Any, @@ -14,11 +14,15 @@ from typing import ( from typing_extensions import TypeVar import numpy as np +from numpy import _CastingKind from numpy._core.multiarray import ravel_multi_index, unravel_index from numpy._typing import ( ArrayLike, + DTypeLike, NDArray, _AnyShape, + _ArrayLike, + _DTypeLike, _FiniteNestedSequence, _NestedSequence, _SupportsArray, @@ -149,13 +153,62 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] def __getitem__(self, key: Incomplete, /) -> Incomplete: ... def __len__(self, /) -> L[0]: ... - # + # Keep in sync with _core.multiarray.concatenate + @staticmethod + @overload + def concatenate( + arrays: _ArrayLike[_ScalarT], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: None = None, + casting: _CastingKind | None = "same_kind", + ) -> NDArray[_ScalarT]: ... + @staticmethod + @overload + def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind | None = "same_kind", + ) -> NDArray[_ScalarT]: ... + @staticmethod + @overload + def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> NDArray[Incomplete]: ... @staticmethod @overload - def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: _ArrayT) -> _ArrayT: ... + def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + *, + out: _ArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> _ArrayT: ... @staticmethod @overload - def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: None = None) -> NDArray[Incomplete]: ... + def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None, + out: _ArrayT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> _ArrayT: ... @final class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 84e9b0660daf..57959ded47bc 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3431,6 +3431,7 @@ def empty( @overload def empty_like( a: _MArrayT, + /, dtype: None = None, order: _OrderKACF = "K", subok: bool = True, @@ -3441,6 +3442,7 @@ def empty_like( @overload def empty_like( a: _ArrayLike[_ScalarT], + /, dtype: None = None, order: _OrderKACF = "K", subok: bool = True, @@ -3450,7 +3452,8 @@ def empty_like( ) -> _MaskedArray[_ScalarT]: ... @overload def empty_like( - a: object, + a: Incomplete, + /, dtype: _DTypeLike[_ScalarT], order: _OrderKACF = "K", subok: bool = True, @@ -3460,7 +3463,8 @@ def empty_like( ) -> _MaskedArray[_ScalarT]: ... @overload def empty_like( - a: object, + a: Incomplete, + /, dtype: DTypeLike | None = None, order: _OrderKACF = "K", subok: bool = True, diff --git a/numpy/typing/tests/data/fail/multiarray.pyi b/numpy/typing/tests/data/fail/multiarray.pyi index 1f9ef6894bad..51128dfbf6f7 100644 --- a/numpy/typing/tests/data/fail/multiarray.pyi +++ b/numpy/typing/tests/data/fail/multiarray.pyi @@ -26,10 +26,10 @@ np.copyto(AR_LIKE_f, AR_f8) # type: ignore[arg-type] np.putmask(AR_LIKE_f, [True, True, False], 1.5) # type: ignore[arg-type] np.packbits(AR_f8) # type: ignore[arg-type] -np.packbits(AR_u1, bitorder=">") # type: ignore[arg-type] +np.packbits(AR_u1, bitorder=">") # type: ignore[call-overload] np.unpackbits(AR_i8) # type: ignore[arg-type] -np.unpackbits(AR_u1, bitorder=">") # type: ignore[arg-type] +np.unpackbits(AR_u1, bitorder=">") # type: ignore[call-overload] np.shares_memory(1, 1, max_work=i8) # type: ignore[arg-type] np.may_share_memory(1, 1, max_work=i8) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/pass/multiarray.py b/numpy/typing/tests/data/pass/multiarray.py index 26cedfd77566..3a505590b5d3 100644 --- a/numpy/typing/tests/data/pass/multiarray.py +++ b/numpy/typing/tests/data/pass/multiarray.py @@ -70,7 +70,8 @@ np.unpackbits(AR_u1) np.shares_memory(1, 2) -np.shares_memory(AR_f8, AR_f8, max_work=1) +np.shares_memory(AR_f8, AR_f8, max_work=-1) np.may_share_memory(1, 2) -np.may_share_memory(AR_f8, AR_f8, max_work=1) +np.may_share_memory(AR_f8, AR_f8, max_work=0) +np.may_share_memory(AR_f8, AR_f8, max_work=-1) diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 6ba3fcde632f..424f60df27e7 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -66,7 +66,7 @@ assert_type(np.inner(AR_f8, AR_i8), Any) assert_type(np.where([True, True, False]), tuple[npt.NDArray[np.intp], ...]) assert_type(np.where([True, True, False], 1, 0), npt.NDArray[Any]) -assert_type(np.lexsort([0, 1, 2]), Any) +assert_type(np.lexsort([0, 1, 2]), npt.NDArray[np.intp]) assert_type(np.can_cast(np.dtype("i8"), int), bool) assert_type(np.can_cast(AR_f8, "f8"), bool) @@ -94,16 +94,19 @@ assert_type(np.copyto(AR_f8, [1., 1.5, 1.6]), None) assert_type(np.putmask(AR_f8, [True, True, False], 1.5), None) -assert_type(np.packbits(AR_i8), npt.NDArray[np.uint8]) -assert_type(np.packbits(AR_u1), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_i8), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.packbits(AR_u1), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.packbits(AR_i8, axis=1), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_u1, axis=1), npt.NDArray[np.uint8]) -assert_type(np.unpackbits(AR_u1), npt.NDArray[np.uint8]) +assert_type(np.unpackbits(AR_u1), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.unpackbits(AR_u1, axis=1), npt.NDArray[np.uint8]) assert_type(np.shares_memory(1, 2), bool) -assert_type(np.shares_memory(AR_f8, AR_f8, max_work=1), bool) +assert_type(np.shares_memory(AR_f8, AR_f8, max_work=-1), bool) assert_type(np.may_share_memory(1, 2), bool) -assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=1), bool) +assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=0), bool) assert_type(np.promote_types(np.int32, np.int64), np.dtype) assert_type(np.promote_types("f4", float), np.dtype) From 93864e12bff826b56c6d7d9a572ca07420f9b37d Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 4 Nov 2025 08:45:54 +0100 Subject: [PATCH 0799/1018] TST: Update `_convert2ma` test signature for `np.ma.empty_like` --- numpy/ma/tests/test_core.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index b978b25f5827..a082f8aa7450 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -5989,7 +5989,13 @@ def test_frommethod_signature(fn, signature): "fill_value=None, hardmask=False)" ), ), - (np.ma.empty_like, "(*args, **kwargs)"), + ( + np.ma.empty_like, + ( + "(prototype, /, dtype=None, order='K', subok=True, shape=None, *, " + "device=None)" + ), + ), (np.ma.squeeze, "(a, axis=None, *, fill_value=None, hardmask=False)"), ( np.ma.identity, From 91b4a3d326090da711a07ad4e8d403055a622a2b Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 4 Nov 2025 08:57:12 +0100 Subject: [PATCH 0800/1018] TYP: ignore a python-version-dependent mypy false-positive --- numpy/_core/multiarray.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index c1a5bb7567fe..ff22fc312a11 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -18,7 +18,7 @@ from typing import ( from typing_extensions import CapsuleType import numpy as np -from numpy import ( +from numpy import ( # type: ignore[attr-defined] # Python >=3.12 _AnyShapeT, _CastingKind, _CopyMode, From 7aa023f008969a43648aaf4c3e707349abc7a2f7 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 4 Nov 2025 23:57:32 +0100 Subject: [PATCH 0801/1018] BUG: ``broadcast`` runtime signatures (#30137) --- numpy/_core/_add_newdocs.py | 8 ++++++++ numpy/_core/tests/test_numeric.py | 13 +++++++++++++ 2 files changed, 21 insertions(+) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index bdb84d78bb27..afc5307eeacc 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -638,6 +638,9 @@ add_newdoc('numpy._core', 'broadcast', """ + broadcast(*arrays) + -- + Produce an object that mimics broadcasting. Parameters @@ -807,8 +810,13 @@ """)) +# methods + add_newdoc('numpy._core', 'broadcast', ('reset', """ + reset($self, /) + -- + reset() Reset the broadcasted result's iterator(s). diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 9af89c6ff391..485147500461 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -4205,6 +4205,19 @@ def test_shape_mismatch_error_message(self): r"arg 2 with shape \(2,\)"): np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7]) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + def test_signatures(self): + sig_new = inspect.signature(np.broadcast) + assert len(sig_new.parameters) == 1 + assert "arrays" in sig_new.parameters + assert sig_new.parameters["arrays"].kind == inspect.Parameter.VAR_POSITIONAL + + sig_reset = inspect.signature(np.broadcast.reset) + assert len(sig_reset.parameters) == 1 + assert "self" in sig_reset.parameters + assert sig_reset.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + class TestKeepdims: From 7388112cf643238dad828677455d4722a8382bec Mon Sep 17 00:00:00 2001 From: diego_atencia <53157128+alektebel@users.noreply.github.com> Date: Wed, 5 Nov 2025 10:14:19 +0100 Subject: [PATCH 0802/1018] DOC: Fix Returns section formatting in linalg.qr and linalg.svd (#30148) * DOC: Fix Returns section formatting in linalg.qr and linalg.svd Move initial return information from Returns section to Notes section to comply with numpydoc standards and fix rendering issues. Fixes #29779 [skip actions][skip azp][skip cirrus] Co-authored-by: Ross Barnowski Co-authored-by: Matti Picus --- numpy/linalg/_linalg.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index cfcaef13daf2..6884c9b7ef8d 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -995,9 +995,6 @@ def qr(a, mode='reduced'): Returns ------- - When mode is 'reduced' or 'complete', the result will be a namedtuple with - the attributes `Q` and `R`. - Q : ndarray of float or complex, optional A matrix with orthonormal columns. When mode = 'complete' the result is an orthogonal/unitary matrix depending on whether or not @@ -1026,6 +1023,9 @@ def qr(a, mode='reduced'): Notes ----- + When mode is 'reduced' or 'complete', the result will be a namedtuple with + the attributes ``Q`` and ``R``. + This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``, ``dorgqr``, and ``zungqr``. @@ -1696,9 +1696,6 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Returns ------- - When `compute_uv` is True, the result is a namedtuple with the following - attribute names: - U : { (..., M, M), (..., M, K) } array Unitary array(s). The first ``a.ndim - 2`` dimensions have the same size as those of the input `a`. The size of the last two dimensions @@ -1726,6 +1723,9 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Notes ----- + When `compute_uv` is True, the result is a namedtuple with the following + attribute names: `U`, `S`, and `Vh`. + The decomposition is performed using LAPACK routine ``_gesdd``. SVD is usually described for the factorization of a 2D matrix :math:`A`. From ee591622a397b361f7cf6a136fc32c03a4e2bea0 Mon Sep 17 00:00:00 2001 From: "T.Yamada" Date: Wed, 5 Nov 2025 20:41:55 +0900 Subject: [PATCH 0803/1018] FIX: Not show signature in git_version (#30149) --- numpy/_build_utils/gitversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_build_utils/gitversion.py b/numpy/_build_utils/gitversion.py index 7975dd9dba65..47dd71d1567b 100644 --- a/numpy/_build_utils/gitversion.py +++ b/numpy/_build_utils/gitversion.py @@ -28,7 +28,7 @@ def git_version(version): git_hash = '' try: p = subprocess.Popen( - ['git', 'log', '-1', '--format="%H %aI"'], + ['git', '-c', 'log.showSignature=false', 'log', '-1', '--format="%H %aI"'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(__file__), From 05eab252df4918ea06c06250cf7d6b054d89fb1c Mon Sep 17 00:00:00 2001 From: Michael Davidsaver Date: Wed, 5 Nov 2025 11:26:19 -0800 Subject: [PATCH 0804/1018] BUG: decref on error in PyArray_NewFromDescr (#30152) --- numpy/_core/src/multiarray/ctors.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 76aa2fca86e7..63434b6b5954 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -992,15 +992,16 @@ PyArray_NewFromDescr( int nd, npy_intp const *dims, npy_intp const *strides, void *data, int flags, PyObject *obj) { - if (subtype == NULL) { + if (descr == NULL) { PyErr_SetString(PyExc_ValueError, - "subtype is NULL in PyArray_NewFromDescr"); + "descr is NULL in PyArray_NewFromDescr"); return NULL; } - if (descr == NULL) { + if (subtype == NULL) { PyErr_SetString(PyExc_ValueError, - "descr is NULL in PyArray_NewFromDescr"); + "subtype is NULL in PyArray_NewFromDescr"); + Py_DECREF(descr); return NULL; } From 83b118ea17a3e984853ef26eb4a3bfe73dcf977d Mon Sep 17 00:00:00 2001 From: Amelia Thurdekoos Date: Wed, 5 Nov 2025 13:05:20 -0800 Subject: [PATCH 0805/1018] BUG: update requires to requirements in numpy.multiarray see #30142 --- numpy/_core/src/multiarray/ctors.c | 14 +++++++------- numpy/_core/src/multiarray/ctors.h | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 76aa2fca86e7..407e02d48103 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1793,7 +1793,7 @@ cleanup:; */ NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, - int max_depth, int requires, PyObject *context) + int max_depth, int requirements, PyObject *context) { npy_dtype_info dt_info = {NULL, NULL}; @@ -1814,7 +1814,7 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, } PyObject* ret = PyArray_CheckFromAny_int( - op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requires, + op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requirements, context); Py_XDECREF(dt_info.descr); @@ -1829,11 +1829,11 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, - int max_depth, int requires, PyObject *context) + int max_depth, int requirements, PyObject *context) { PyObject *obj; Py_XINCREF(in_descr); /* take ownership as we may replace it */ - if (requires & NPY_ARRAY_NOTSWAPPED) { + if (requirements & NPY_ARRAY_NOTSWAPPED) { if (!in_descr && PyArray_Check(op)) { in_descr = PyArray_DESCR((PyArrayObject *)op); Py_INCREF(in_descr); @@ -1848,16 +1848,16 @@ PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, int was_scalar; obj = PyArray_FromAny_int(op, in_descr, in_DType, min_depth, - max_depth, requires, context, &was_scalar); + max_depth, requirements, context, &was_scalar); Py_XDECREF(in_descr); if (obj == NULL) { return NULL; } - if ((requires & NPY_ARRAY_ELEMENTSTRIDES) + if ((requirements & NPY_ARRAY_ELEMENTSTRIDES) && !PyArray_ElementStrides(obj)) { PyObject *ret; - if (requires & NPY_ARRAY_ENSURENOCOPY) { + if (requirements & NPY_ARRAY_ENSURENOCOPY) { PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); return NULL; } diff --git a/numpy/_core/src/multiarray/ctors.h b/numpy/_core/src/multiarray/ctors.h index 094589968b66..b7a60e0065e0 100644 --- a/numpy/_core/src/multiarray/ctors.h +++ b/numpy/_core/src/multiarray/ctors.h @@ -68,11 +68,11 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, - int max_depth, int requires, PyObject *context); + int max_depth, int requirements, PyObject *context); NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, - int max_depth, int requires, PyObject *context); + int max_depth, int requirements, PyObject *context); NPY_NO_EXPORT PyObject * PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags); From 7497f61ce72267a02e4de50026826af025f06426 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 5 Nov 2025 23:57:29 +0100 Subject: [PATCH 0806/1018] BUG, DOC: ``ndarray`` dunder method runtime signatures and missing docs --- numpy/_core/_add_newdocs.py | 145 +++++++++++++++++++++------ numpy/_core/src/multiarray/methods.c | 9 +- numpy/_core/tests/test_multiarray.py | 6 +- 3 files changed, 125 insertions(+), 35 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index afc5307eeacc..bfb3e09d195b 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -2315,8 +2315,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', """ - ndarray(shape, dtype=float, buffer=None, offset=0, - strides=None, order=None) + ndarray(shape, dtype=None, buffer=None, offset=0, strides=None, order=None) + -- + + ndarray(shape, dtype=float, buffer=None, offset=0, strides=None, order=None) An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type object describes the @@ -2339,6 +2341,7 @@ Shape of created array. dtype : data-type, optional Any object that can be interpreted as a numpy data type. + Default is `numpy.float64`. buffer : object exposing buffer interface, optional Used to fill the array with data. offset : int, optional @@ -2456,21 +2459,6 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_struct__', """Array protocol: C-struct side.""")) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', - """ - a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) - - DLPack Protocol: Part of the Array API. - - """)) - -add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', - """ - a.__dlpack_device__() - - DLPack Protocol: Part of the Array API. - - """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('base', """ @@ -3084,28 +3072,50 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_function__', """ - __array_wrap__($self, array, context=None, return_scalar=True, /) + __array_function__($self, /, func, types, args, kwargs) -- - a.__array_wrap__(array[, context[, return_scalar]], /) + a.__array_function__(func, types, args, kwargs) - Returns a view of `array` with the same type as self. + See :ref:`NEP 18 ` and :ref:`NEP 35 ` for details. """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__', +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_namespace__', """ - __copy__($self, /) + __array_namespace__($self, /, *, api_version=None) -- - a.__copy__() + a.__array_namespace__(*, api_version=None) - Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + For Array API compatibility. + + """)) - Equivalent to ``a.copy(order='K')``. + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_ufunc__', + """ + __array_ufunc__($self, ufunc, method, /, *inputs, **kwargs) + -- + + a.__array_ufunc__(ufunc, method, /, *inputs, **kwargs) + + See :ref:`NEP 13 ` for details. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', + """ + __array_wrap__($self, array, context=None, return_scalar=True, /) + -- + + a.__array_wrap__(array[, context[, return_scalar]], /) + + Returns a view of `array` with the same type as self. """)) @@ -3115,7 +3125,7 @@ __class_getitem__($cls, item, /) -- - a.__class_getitem__(item, /) + ndarray[shape, dtype] Return a parametrized wrapper around the `~numpy.ndarray` type. @@ -3128,11 +3138,10 @@ Examples -------- - >>> from typing import Any >>> import numpy as np - >>> np.ndarray[Any, np.dtype[np.uint8]] - numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]] + >>> np.ndarray[tuple[int], np.dtype[np.uint8]] + numpy.ndarray[tuple[int], numpy.dtype[numpy.uint8]] See Also -------- @@ -3143,6 +3152,20 @@ """)) +add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__', + """ + __copy__($self, /) + -- + + a.__copy__() + + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + + Equivalent to ``a.copy(order='K')``. + + """)) + + add_newdoc('numpy._core.multiarray', 'ndarray', ('__deepcopy__', """ __deepcopy__($self, memo, /) @@ -3155,6 +3178,31 @@ """)) +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', + """ + __dlpack__($self, /, *, stream=None, max_version=None, dl_device=None, copy=None) + -- + + a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) + + Exports the array for consumption by ``from_dlpack()`` as a DLPack capsule. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', + """ + __dlpack_device__($self, /) + -- + + a.__dlpack_device__() + + Returns device type (``1``) and device ID (``0``) in DLPack format. + Meant for use within ``from_dlpack()``. + + """)) + + add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce__', """ __reduce__($self, /) @@ -3167,6 +3215,18 @@ """)) +add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce_ex__', + """ + __reduce_ex__($self, protocol, /) + -- + + a.__reduce_ex__(protocol, /) + + For pickling. + + """)) + + add_newdoc('numpy._core.multiarray', 'ndarray', ('__setstate__', """ __setstate__($self, state, /) @@ -4576,6 +4636,31 @@ """)) +add_newdoc('numpy._core.multiarray', 'ndarray', ('to_device', + """ + to_device($self, device, /, *, stream=None) + -- + + a.to_device(device, /, *, stream=None) + + For Array API compatibility. Since NumPy only supports CPU arrays, this + method is a no-op that returns the same array. + + Parameters + ---------- + device : "cpu" + Must be ``"cpu"``. + stream : None, optional + Currently unsupported. + + Returns + ------- + out : Self + Returns the same array. + + """)) + + add_newdoc('numpy._core.multiarray', 'ndarray', ('tofile', """ tofile($self, fid, /, sep='', format='%s') diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index c856fc52742d..26b5a6e179e0 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -2899,7 +2899,8 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { /* for the sys module */ {"__sizeof__", (PyCFunction) array_sizeof, - METH_NOARGS, NULL}, + METH_NOARGS, + "__sizeof__($self, /)\n--\n\nSize in memory."}, /* for the copy module */ {"__copy__", @@ -2928,11 +2929,13 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { {"__complex__", (PyCFunction) array_complex, - METH_VARARGS, NULL}, + METH_VARARGS, + "__complex__($self, /)\n--\n\ncomplex(self)"}, {"__format__", (PyCFunction) array_format, - METH_VARARGS, NULL}, + METH_VARARGS, + "__format__($self, spec, /)\n--\n\nformat(self[, spec])"}, {"__class_getitem__", (PyCFunction)array_class_getitem, diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index d62c2018292c..1b221356512c 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10880,8 +10880,9 @@ def test_array_dunder_array_preserves_dtype_on_none(dtype): @pytest.mark.parametrize( "methodname", [ - "__array__", "__array_finalize__", "__array_wrap__", - "__copy__", "__deepcopy__", "__reduce__", "__setstate__", + "__array__", "__array_finalize__", "__array_function__", "__array_ufunc__", + "__array_wrap__", "__complex__", "__copy__", "__deepcopy__", + "__reduce__", "__reduce_ex__", "__setstate__", "all", "any", "argmax", "argmin", "argsort", "argpartition", "astype", "byteswap", "choose", "clip", "compress", "conj", "conjugate", "copy", "cumprod", "cumsum", "diagonal", "dot", "dump", "dumps", "fill", "flatten", @@ -10889,6 +10890,7 @@ def test_array_dunder_array_preserves_dtype_on_none(dtype): "repeat", "reshape", "resize", "round", "searchsorted", "setfield", "setflags", "sort", "partition", "squeeze", "std", "sum", "swapaxes", "take", "tofile", "tolist", "tobytes", "trace", "transpose", "var", "view", + "__array_namespace__", "__dlpack__", "__dlpack_device__", "to_device", ], ) def test_array_method_signatures(methodname: str): From 641001177b485bac38cf048615d1802caaf07ba3 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 6 Nov 2025 00:21:20 +0100 Subject: [PATCH 0807/1018] DOC: Remove nonexistent ``order`` parameter docs of ``ma.asanyarray`` (#30057) --- numpy/ma/core.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 95bec501dc73..f60b42663e00 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -8629,9 +8629,6 @@ def asanyarray(a, dtype=None): Input data, in any form that can be converted to an array. dtype : dtype, optional By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('FORTRAN') memory - representation. Default is 'C'. Returns ------- From 3a35b7c56d0d7c5ef5b7806c6bde336a58857bbe Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 6 Nov 2025 00:28:28 +0100 Subject: [PATCH 0808/1018] BUG: array construction function runtime signatures (#30138) --- numpy/_core/_add_newdocs.py | 45 ++++++++++++++++++++-------- numpy/_core/tests/test_multiarray.py | 26 ++++++++++++++++ 2 files changed, 58 insertions(+), 13 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index afc5307eeacc..4fb3c645cd85 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -855,6 +855,19 @@ add_newdoc('numpy._core.multiarray', 'array', """ + array( + object, + dtype=None, + *, + copy=True, + order='K', + subok=False, + ndmin=0, + ndmax=None, + like=None, + ) + -- + array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, ndmax=None, like=None) @@ -933,7 +946,7 @@ ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. - copy: Return an array copy of the given object. + copy : Return an array copy of the given object. Notes @@ -1004,6 +1017,9 @@ add_newdoc('numpy._core.multiarray', 'asarray', """ + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + -- + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) Convert the input to an array. @@ -1053,12 +1069,10 @@ -------- asanyarray : Similar function which passes through subclasses. ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. + asfortranarray : Convert input to an ndarray with column-major memory order. asarray_chkfinite : Similar function which checks input for NaNs and Infs. fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. + fromfunction : Construct an array by executing a function on grid positions. Examples -------- @@ -1097,6 +1111,9 @@ add_newdoc('numpy._core.multiarray', 'asanyarray', """ + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + -- + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) Convert the input to an ndarray, but pass ndarray subclasses through. @@ -1148,13 +1165,10 @@ -------- asarray : Similar function which always returns ndarrays. ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and - Infs. + asfortranarray : Convert input to an ndarray with column-major memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. + fromfunction : Construct an array by executing a function on grid positions. Examples -------- @@ -1175,6 +1189,9 @@ add_newdoc('numpy._core.multiarray', 'ascontiguousarray', """ + ascontiguousarray(a, dtype=None, *, like=None) + -- + ascontiguousarray(a, dtype=None, *, like=None) Return a contiguous array (ndim >= 1) in memory (C order). @@ -1197,8 +1214,7 @@ See Also -------- - asfortranarray : Convert input to an ndarray with column-major - memory order. + asfortranarray : Convert input to an ndarray with column-major memory order. require : Return an ndarray that satisfies requirements. ndarray.flags : Information about the memory layout of the array. @@ -1238,6 +1254,9 @@ add_newdoc('numpy._core.multiarray', 'asfortranarray', """ + asfortranarray(a, dtype=None, *, like=None) + -- + asfortranarray(a, dtype=None, *, like=None) Return an array (ndim >= 1) laid out in Fortran order in memory. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index d62c2018292c..87c2a598437e 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -591,6 +591,32 @@ def test_array_as_keyword(self, func): else: func(a=3) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize("func", + [np.array, + np.asarray, + np.asanyarray, + np.ascontiguousarray, + np.asfortranarray]) + def test_array_signature(self, func): + sig = inspect.signature(func) + + assert len(sig.parameters) >= 3 + + arg0 = "object" if func is np.array else "a" + assert arg0 in sig.parameters + assert sig.parameters[arg0].default is inspect.Parameter.empty + assert sig.parameters[arg0].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + assert "dtype" in sig.parameters + assert sig.parameters["dtype"].default is None + assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + assert "like" in sig.parameters + assert sig.parameters["like"].default is None + assert sig.parameters["like"].kind is inspect.Parameter.KEYWORD_ONLY + class TestAssignment: def test_assignment_broadcasting(self): From a90a3423c221f2b0899fc66e352d82bc4385c5af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Nov 2025 07:44:10 +0100 Subject: [PATCH 0809/1018] TYP: fix an invalid default value for `array`'s `ndmax` parameter --- numpy/_core/_add_newdocs.py | 9 +++++---- numpy/_core/multiarray.pyi | 10 +++++----- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index e5939be54bcc..99700c61ed65 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -863,13 +863,13 @@ order='K', subok=False, ndmin=0, - ndmax=None, + ndmax=0, like=None, ) -- array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, - ndmax=None, like=None) + ndmax=0, like=None) Create an array. @@ -920,8 +920,9 @@ needed to meet this requirement. ndmax : int, optional Specifies the maximum number of dimensions to create when inferring - shape from nested sequences. By default, NumPy recurses through all - nesting levels (up to the compile-time constant ``NPY_MAXDIMS``). + shape from nested sequences. By default (ndmax=0), NumPy recurses + through all nesting levels (up to the compile-time constant + ``NPY_MAXDIMS``). Setting ``ndmax`` stops recursion at the specified depth, preserving deeper nested structures as objects instead of promoting them to higher-dimensional arrays. In this case, ``dtype=object`` is required. diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index ff22fc312a11..35acc4dad5dd 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -464,7 +464,7 @@ def array( order: _OrderKACF = "K", subok: L[True], ndmin: int = 0, - ndmax: int | None = None, + ndmax: int = 0, like: _SupportsArrayFunc | None = None, ) -> _ArrayT: ... @overload @@ -476,7 +476,7 @@ def array( order: _OrderKACF = "K", subok: L[True], ndmin: L[0] = 0, - ndmax: int | None = None, + ndmax: int = 0, like: _SupportsArrayFunc | None = None, ) -> _ArrayT: ... @overload @@ -488,7 +488,7 @@ def array( order: _OrderKACF = "K", subok: bool = False, ndmin: int = 0, - ndmax: int | None = None, + ndmax: int = 0, like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload @@ -500,7 +500,7 @@ def array( order: _OrderKACF = "K", subok: bool = False, ndmin: int = 0, - ndmax: int | None = None, + ndmax: int = 0, like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload @@ -512,7 +512,7 @@ def array( order: _OrderKACF = "K", subok: bool = False, ndmin: int = 0, - ndmax: int | None = None, + ndmax: int = 0, like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... From 32817ccf7812662e99508e82ce168c8ffc55ac2c Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 4 Nov 2025 21:07:56 +0100 Subject: [PATCH 0810/1018] BUG: ``_core.multiarray`` function runtime signatures --- numpy/_core/_add_newdocs.py | 61 +++++++++++-- numpy/_core/tests/test_multiarray.py | 127 ++++++++++++++++----------- 2 files changed, 127 insertions(+), 61 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index e5939be54bcc..46450e14be40 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -570,6 +570,18 @@ add_newdoc('numpy._core', 'nested_iters', """ + nested_iters( + op, + axes, + flags=None, + op_flags=None, + op_dtypes=None, + order='K', + casting='safe', + buffersize=0, + ) + -- + nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', buffersize=0) @@ -1469,6 +1481,9 @@ add_newdoc('numpy._core.multiarray', 'fromstring', """ + fromstring(string, dtype=None, count=-1, *, sep, like=None) + -- + fromstring(string, dtype=float, count=-1, *, sep, like=None) A new 1-D array initialized from text data in a string. @@ -1478,7 +1493,7 @@ string : str A string containing the data. dtype : data-type, optional - The data type of the array; default: float. For binary input data, + The data type of the array; default: `numpy.float64`. For binary input data, the data must be in exactly this format. Most builtin numeric types are supported and extension types may be supported. count : int, optional @@ -1529,6 +1544,9 @@ add_newdoc('numpy._core.multiarray', 'compare_chararrays', """ + compare_chararrays(a1, a2, cmp, rstrip) + -- + compare_chararrays(a1, a2, cmp, rstrip) Performs element-wise comparison of two string arrays using the @@ -1540,20 +1558,20 @@ Arrays to be compared. cmp : {"<", "<=", "==", ">=", ">", "!="} Type of comparison. - rstrip : Boolean - If True, the spaces at the end of Strings are removed before the comparison. + rstrip : bool + If True, the spaces at the end of strings are removed before the comparison. Returns ------- out : ndarray - The output array of type Boolean with the same shape as a and b. + The output array of type `numpy.bool` with the same shape as `a1` and `a2`. Raises ------ ValueError If `cmp` is not valid. TypeError - If at least one of `a` or `b` is a non-string array + If at least one of `a1` or `a2` is a non-string array Examples -------- @@ -1567,6 +1585,9 @@ add_newdoc('numpy._core.multiarray', 'fromiter', """ + fromiter(iter, dtype, count=-1, *, like=None) + -- + fromiter(iter, dtype, count=-1, *, like=None) Create a new 1-dimensional array from an iterable object. @@ -1622,6 +1643,9 @@ add_newdoc('numpy._core.multiarray', 'fromfile', """ + fromfile(file, dtype=None, count=-1, sep='', offset=0, *, like=None) + -- + fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) Construct an array from data in a text or binary file. @@ -1707,6 +1731,9 @@ add_newdoc('numpy._core.multiarray', 'frombuffer', """ + frombuffer(buffer, dtype=None, count=-1, offset=0, *, like=None) + -- + frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None) Interpret a buffer as a 1-dimensional array. @@ -1716,7 +1743,7 @@ buffer : buffer_like An object that exposes the buffer interface. dtype : data-type, optional - Data-type of the returned array; default: float. + Data-type of the returned array. Default is `numpy.float64`. count : int, optional Number of items to read. ``-1`` means all data in the buffer. offset : int, optional @@ -1767,6 +1794,9 @@ add_newdoc('numpy._core.multiarray', 'from_dlpack', """ + from_dlpack(x, /, *, device=None, copy=None) + -- + from_dlpack(x, /, *, device=None, copy=None) Create a NumPy array from an object implementing the ``__dlpack__`` @@ -1817,6 +1847,9 @@ add_newdoc('numpy._core.multiarray', 'arange', """ + arange(*args, device=None, like=None, **kwargs) + -- + arange([start,] stop[, step,], dtype=None, *, device=None, like=None) Return evenly spaced values within a given interval. @@ -1941,13 +1974,16 @@ add_newdoc('numpy._core.multiarray', '_reconstruct', """_reconstruct(subtype, shape, dtype) - Construct an empty array. Used by Pickles. + Construct an empty array. Used by Pickle. """) add_newdoc('numpy._core.multiarray', 'promote_types', """ - promote_types(type1, type2) + promote_types(type1, type2, /) + -- + + promote_types(type1, type2, /) Returns the data type with the smallest size and smallest scalar kind to which both ``type1`` and ``type2`` may be safely cast. @@ -5064,6 +5100,9 @@ add_newdoc('numpy._core.umath', 'frompyfunc', """ + frompyfunc(func, /, nin, nout, **kwargs) + -- + frompyfunc(func, /, nin, nout, *[, identity]) Takes an arbitrary Python function and returns a NumPy ufunc. @@ -6874,6 +6913,9 @@ add_newdoc('numpy._core.multiarray', 'busdaycalendar', """ + busdaycalendar(weekmask='1111100', holidays=None) + -- + busdaycalendar(weekmask='1111100', holidays=None) A business day calendar object that efficiently stores information @@ -6997,6 +7039,9 @@ add_newdoc('numpy._core.multiarray', 'datetime_data', """ + datetime_data(dtype, /) + -- + datetime_data(dtype, /) Get information about the step size of a date or time type. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index c54a4e60ba9a..ace409e2083c 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10902,65 +10902,86 @@ def test_array_dunder_array_preserves_dtype_on_none(dtype): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -@pytest.mark.parametrize( - "methodname", - [ - "__array__", "__array_finalize__", "__array_function__", "__array_ufunc__", - "__array_wrap__", "__complex__", "__copy__", "__deepcopy__", - "__reduce__", "__reduce_ex__", "__setstate__", - "all", "any", "argmax", "argmin", "argsort", "argpartition", "astype", - "byteswap", "choose", "clip", "compress", "conj", "conjugate", "copy", - "cumprod", "cumsum", "diagonal", "dot", "dump", "dumps", "fill", "flatten", - "getfield", "item", "max", "mean", "min", "nonzero", "prod", "put", "ravel", - "repeat", "reshape", "resize", "round", "searchsorted", "setfield", "setflags", - "sort", "partition", "squeeze", "std", "sum", "swapaxes", "take", "tofile", - "tolist", "tobytes", "trace", "transpose", "var", "view", - "__array_namespace__", "__dlpack__", "__dlpack_device__", "to_device", +@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") +class TestTextSignatures: + @pytest.mark.parametrize( + "methodname", + [ + "__array__", "__array_finalize__", "__array_function__", "__array_ufunc__", + "__array_wrap__", "__complex__", "__copy__", "__deepcopy__", + "__reduce__", "__reduce_ex__", "__setstate__", + "all", "any", "argmax", "argmin", "argsort", "argpartition", "astype", + "byteswap", "choose", "clip", "compress", "conj", "conjugate", "copy", + "cumprod", "cumsum", "diagonal", "dot", "dump", "dumps", "fill", "flatten", + "getfield", "item", "max", "mean", "min", "nonzero", "prod", "put", "ravel", + "repeat", "reshape", "resize", "round", "searchsorted", "setfield", + "setflags", "sort", "partition", "squeeze", "std", "sum", "swapaxes", + "take", "tofile", "tolist", "tobytes", "trace", "transpose", "var", "view", + "__array_namespace__", "__dlpack__", "__dlpack_device__", "to_device", ], -) -def test_array_method_signatures(methodname: str): - method = getattr(np.ndarray, methodname) - assert callable(method) - - try: - sig = inspect.signature(method) - except ValueError as e: - pytest.fail(f"Could not get signature for np.ndarray.{methodname}: {e}") + ) + def test_array_method_signatures(self, methodname: str): + method = getattr(np.ndarray, methodname) + assert callable(method) - assert "self" in sig.parameters - assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + try: + sig = inspect.signature(method) + except ValueError as e: + pytest.fail(f"Could not get signature for np.ndarray.{methodname}: {e}") + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY -@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -@pytest.mark.parametrize("func", [np.empty_like, np.concatenate]) -def test_c_func_dispatcher_text_signature(func): - text_sig = func.__wrapped__.__text_signature__ - assert text_sig.startswith("(") and text_sig.endswith(")") + @pytest.mark.parametrize("func", [np.empty_like, np.concatenate]) + def test_c_func_dispatcher_text_signature(self, func): + text_sig = func.__wrapped__.__text_signature__ + assert text_sig.startswith("(") and text_sig.endswith(")") - sig = inspect.signature(func) - assert sig == inspect.signature(func.__wrapped__) - assert not hasattr(func, "__signature__") + sig = inspect.signature(func) + assert sig == inspect.signature(func.__wrapped__) + assert not hasattr(func, "__signature__") - with pytest.raises(ValueError): - inspect.signature(func, follow_wrapped=False) + with pytest.raises(ValueError): + inspect.signature(func, follow_wrapped=False) + @pytest.mark.parametrize( + "func", + [ + np.inner, np.where, np.lexsort, np.can_cast, np.min_scalar_type, + np.result_type, np.dot, np.vdot, np.bincount, np.ravel_multi_index, + np.unravel_index, np.copyto, np.putmask, np.packbits, np.unpackbits, + np.shares_memory, np.may_share_memory, np.is_busday, np.busday_offset, + np.busday_count, np.datetime_as_string, + ], + ) + def test_c_func_dispatcher_signature(self, func): + sig = inspect.signature(func) -@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -@pytest.mark.parametrize( - "func", - [ - np.inner, np.where, np.lexsort, np.can_cast, np.min_scalar_type, np.result_type, - np.dot, np.vdot, np.bincount, np.ravel_multi_index, np.unravel_index, np.copyto, - np.putmask, np.packbits, np.unpackbits, np.shares_memory, np.may_share_memory, - np.is_busday, np.busday_offset, np.busday_count, np.datetime_as_string, - ], -) -def test_c_func_dispatcher_signature(func): - sig = inspect.signature(func) + assert hasattr(func, "__signature__") + assert sig == func.__signature__ + assert sig.parameters + + @pytest.mark.parametrize(("func", "parameter_names"), [ + (np.arange, ("args", "device", "like", "kwargs")), + (np.busdaycalendar, ("weekmask", "holidays")), + (np.char.compare_chararrays, ("a1", "a2", "cmp", "rstrip")), + (np.datetime_data, ("dtype",)), + (np.from_dlpack, ("x", "device", "copy")), + (np.frombuffer, ("buffer", "dtype", "count", "offset", "like")), + (np.fromfile, ("file", "dtype", "count", "sep", "offset", "like")), + (np.fromiter, ("iter", "dtype", "count", "like")), + (np.frompyfunc, ("func", "nin", "nout", "kwargs")), + (np.fromstring, ("string", "dtype", "count", "sep", "like")), + (np.nested_iters, ( + "op", "axes", "flags", "op_flags", "op_dtypes", "order", "casting", + "buffersize", + )), + (np.promote_types, ("type1", "type2")), + ]) + def test_add_newdoc_function_signature(self, func, parameter_names): + assert not hasattr(func, "__signature__") + assert getattr(func, "__text_signature__", None) - assert hasattr(func, "__signature__") - assert sig == func.__signature__ - assert sig.parameters + sig = inspect.signature(func) + assert sig.parameters + assert tuple(sig.parameters) == parameter_names From 49de145702ea29703259266355d39ea02315da6c Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 5 Nov 2025 00:14:07 +0100 Subject: [PATCH 0811/1018] BUG: refine ``arange`` runtime signature for array-api compatibility --- numpy/_core/_add_newdocs.py | 2 +- numpy/_core/tests/test_multiarray.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 46450e14be40..d997998b28c2 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1847,7 +1847,7 @@ add_newdoc('numpy._core.multiarray', 'arange', """ - arange(*args, device=None, like=None, **kwargs) + arange(start_or_stop, /, stop=None, step=1, *, dtype=None, device=None, like=None) -- arange([start,] stop[, step,], dtype=None, *, device=None, like=None) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index ace409e2083c..2193887cb43f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10962,7 +10962,7 @@ def test_c_func_dispatcher_signature(self, func): assert sig.parameters @pytest.mark.parametrize(("func", "parameter_names"), [ - (np.arange, ("args", "device", "like", "kwargs")), + (np.arange, ("start_or_stop", "stop", "step", "dtype", "device", "like")), (np.busdaycalendar, ("weekmask", "holidays")), (np.char.compare_chararrays, ("a1", "a2", "cmp", "rstrip")), (np.datetime_data, ("dtype",)), From 25d74021d3b0f737c85aae9b512bbcbfc2ab6e79 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 5 Nov 2025 00:22:15 +0100 Subject: [PATCH 0812/1018] TYP: update and improve the ``arange`` stubs --- numpy/_core/multiarray.pyi | 174 +++++++++--------- numpy/ma/core.pyi | 103 +++++++---- numpy/typing/tests/data/pass/numeric.py | 15 +- .../tests/data/reveal/array_constructors.pyi | 24 ++- 4 files changed, 168 insertions(+), 148 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index ff22fc312a11..8c3d5dcec1af 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -957,110 +957,102 @@ def frombuffer( like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... -@overload -def arange( # type: ignore[misc] - stop: _IntLike_co, - /, *, - dtype: None = None, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[signedinteger]: ... -@overload -def arange( # type: ignore[misc] - start: _IntLike_co, - stop: _IntLike_co, - step: _IntLike_co = ..., - dtype: None = None, - *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[signedinteger]: ... -@overload -def arange( # type: ignore[misc] - stop: _FloatLike_co, - /, *, - dtype: None = None, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[floating]: ... -@overload -def arange( # type: ignore[misc] - start: _FloatLike_co, - stop: _FloatLike_co, - step: _FloatLike_co = ..., - dtype: None = None, +_ArangeScalar: TypeAlias = np.integer | np.floating | np.datetime64 | np.timedelta64 +_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) + +# keep in sync with ma.core.arange +# NOTE: The `float64 | Any` return types needed to avoid incompatible overlapping overloads +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[floating]: ... -@overload + dtype: _DTypeLike[_ArangeScalarT], + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[_ArangeScalarT]: ... +@overload # (int-like, int-like?, int-like?) def arange( - stop: _TD64Like_co, - /, *, - dtype: None = None, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[timedelta64]: ... -@overload + start_or_stop: _IntLike_co, + /, + stop: _IntLike_co | None = None, + step: _IntLike_co = 1, + *, + dtype: type[int] | _DTypeLike[np.int_] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.int_]: ... +@overload # (float, float-like?, float-like?) def arange( - start: _TD64Like_co, - stop: _TD64Like_co, - step: _TD64Like_co = ..., - dtype: None = None, + start_or_stop: float | floating, + /, + stop: _FloatLike_co | None = None, + step: _FloatLike_co = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[timedelta64]: ... -@overload -def arange( # both start and stop must always be specified for datetime64 - start: datetime64, - stop: datetime64, - step: datetime64 = ..., - dtype: None = None, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # (float-like, float, float-like?) +def arange( + start_or_stop: _FloatLike_co, + /, + stop: float | floating, + step: _FloatLike_co = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[datetime64]: ... -@overload + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # (timedelta, timedelta-like?, timedelta-like?) def arange( - stop: Any, - /, *, - dtype: _DTypeLike[_ScalarT], - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[_ScalarT]: ... -@overload + start_or_stop: np.timedelta64, + /, + stop: _TD64Like_co | None = None, + step: _TD64Like_co = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.timedelta64[Incomplete]]: ... +@overload # (timedelta-like, timedelta, timedelta-like?) def arange( - start: Any, - stop: Any, - step: Any = ..., - dtype: _DTypeLike[_ScalarT] = ..., + start_or_stop: _TD64Like_co, + /, + stop: np.timedelta64, + step: _TD64Like_co = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[_ScalarT]: ... -@overload + dtype: _DTypeLike[np.timedelta64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.timedelta64[Incomplete]]: ... +@overload # (datetime, datetime, timedelta-like) (requires both start and stop) def arange( - stop: Any, /, + start_or_stop: np.datetime64, + /, + stop: np.datetime64, + step: _TD64Like_co = 1, *, - dtype: DTypeLike | None = ..., - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[Any]: ... -@overload + dtype: _DTypeLike[np.datetime64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.datetime64[Incomplete]]: ... +@overload # dtype= def arange( - start: Any, - stop: Any, - step: Any = ..., - dtype: DTypeLike | None = ..., + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[Any]: ... + dtype: DTypeLike | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... -def datetime_data( - dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /, -) -> tuple[str, int]: ... +# +def datetime_data(dtype: str | _DTypeLike[datetime64 | timedelta64], /) -> tuple[str, int]: ... # The datetime functions perform unsafe casts to `datetime64[D]`, # so a lot of different argument types are allowed here diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 57959ded47bc..16cdc6b0854b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -98,6 +98,7 @@ from numpy._typing import ( _DTypeLike, _DTypeLikeBool, _DTypeLikeVoid, + _FloatLike_co, _IntLike_co, _NestedSequence, _ScalarLike_co, @@ -105,6 +106,7 @@ from numpy._typing import ( _ShapeLike, _SupportsArrayFunc, _SupportsDType, + _TD64Like_co, ) from numpy._typing._dtype_like import _VoidDTypeLike @@ -315,6 +317,7 @@ _Ignored: TypeAlias = object # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` _MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] +_Masked1D: TypeAlias = MaskedArray[tuple[int], dtype[_ScalarT]] _MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] _MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] @@ -3142,85 +3145,111 @@ def _convert2ma( params: dict[str, Any] | None = None, ) -> Callable[..., Any]: ... -# keep roughly in sync with `_core.multiarray.arange` -@overload # int, dtype=None (default) +# keep in sync with `_core.multiarray.arange` +@overload # dtype= def arange( - start: int, - stop: int = ..., - step: int = ..., + start_or_stop: _ArangeScalar | float, /, - dtype: None = None, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float = 1, *, + dtype: _DTypeLike[_ArangeScalarT], device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[np.int_]]: ... -@overload # float, dtype=None (default) +) -> _Masked1D[_ArangeScalarT]: ... +@overload # (int-like, int-like?, int-like?) def arange( - start: float, - stop: float = ..., - step: float = ..., + start_or_stop: _IntLike_co, /, - dtype: None = None, + stop: _IntLike_co | None = None, + step: _IntLike_co = 1, *, + dtype: type[int] | _DTypeLike[np.int_] | None = None, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[np.float64 | Any]]: ... -@overload # integer | floating | datetime64 | timedelta64, dtype=None (default) +) -> _Masked1D[np.int_]: ... +@overload # (float, float-like?, float-like?) def arange( - start: _ArangeScalarT, - stop: _ArangeScalarT = ..., - step: _ArangeScalarT = ..., + start_or_stop: float | floating, /, - dtype: None = None, + stop: _FloatLike_co | None = None, + step: _FloatLike_co = 1, *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[_ArangeScalarT]]: ... -@overload # dtype: known dtype-like (positional) +) -> _Masked1D[np.float64 | Any]: ... +@overload # (float-like, float, float-like?) def arange( - start: _ArangeScalar, - stop: _ArangeScalar, - step: _ArangeScalar, + start_or_stop: _FloatLike_co, /, - dtype: _DTypeLike[_ScalarT], + stop: float | floating, + step: _FloatLike_co = 1, *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... -@overload # dtype: known dtype-like (keyword) +) -> _Masked1D[np.float64 | Any]: ... +@overload # (timedelta, timedelta-like?, timedelta-like?) def arange( - start: _ArangeScalar, - stop: _ArangeScalar = ..., - step: _ArangeScalar = ..., + start_or_stop: np.timedelta64, /, + stop: _TD64Like_co | None = None, + step: _TD64Like_co = 1, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[np.timedelta64] | None = None, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... -@overload # dtype: unknown dtype +) -> _Masked1D[np.timedelta64[Incomplete]]: ... +@overload # (timedelta-like, timedelta, timedelta-like?) def arange( - start: _ArangeScalar, - stop: _ArangeScalar = ..., - step: _ArangeScalar = ..., + start_or_stop: _TD64Like_co, /, - dtype: DTypeLike | None = None, + stop: np.timedelta64, + step: _TD64Like_co = 1, *, + dtype: _DTypeLike[np.timedelta64] | None = None, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int]]: ... +) -> _Masked1D[np.timedelta64[Incomplete]]: ... +@overload # (datetime, datetime, timedelta-like) (requires both start and stop) +def arange( + start_or_stop: np.datetime64, + /, + stop: np.datetime64, + step: _TD64Like_co = 1, + *, + dtype: _DTypeLike[np.datetime64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.datetime64[Incomplete]]: ... +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float = 1, + *, + dtype: DTypeLike | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[Incomplete]: ... # based on `_core.fromnumeric.clip` @overload diff --git a/numpy/typing/tests/data/pass/numeric.py b/numpy/typing/tests/data/pass/numeric.py index 4c1ffa0b2776..825a6dd74f34 100644 --- a/numpy/typing/tests/data/pass/numeric.py +++ b/numpy/typing/tests/data/pass/numeric.py @@ -2,27 +2,20 @@ Tests for :mod:`numpy._core.numeric`. Does not include tests which fall under ``array_constructors``. - """ -from __future__ import annotations - -from typing import cast +from typing import Any import numpy as np -import numpy.typing as npt -class SubClass(npt.NDArray[np.float64]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): ... i8 = np.int64(1) -A = cast( - np.ndarray[tuple[int, int, int], np.dtype[np.intp]], - np.arange(27).reshape(3, 3, 3), -) -B: list[list[list[int]]] = A.tolist() +A = np.arange(27).reshape(3, 3, 3) +B = A.tolist() C = np.empty((27, 27)).view(SubClass) np.count_nonzero(i8) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 1d970ef68de5..b0ba67d6a087 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -114,17 +114,23 @@ assert_type(np.frombuffer(A), npt.NDArray[np.float64]) assert_type(np.frombuffer(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) -assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) -assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) -assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) -assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.floating]]) -assert_type(np.arange(start=0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.floating]]) -assert_type(np.arange(np.timedelta64(0)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) -assert_type(np.arange(0, np.timedelta64(10)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) -assert_type(np.arange(np.datetime64("0"), np.datetime64("10")), np.ndarray[tuple[int], np.dtype[np.datetime64]]) +_x_bool: bool +_x_int: int +_x_float: float +_x_timedelta: np.timedelta64 +_x_datetime: np.datetime64 + +assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.arange(0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.arange(_x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) +assert_type(np.arange(0, _x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) +assert_type(np.arange(_x_datetime, _x_datetime), np.ndarray[tuple[int], np.dtype[np.datetime64[Any]]]) assert_type(np.arange(10, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.arange(0, 10, step=2, dtype=np.int16), np.ndarray[tuple[int], np.dtype[np.int16]]) -assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype]) +assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype[np.int_]]) assert_type(np.arange(0, 10, dtype="f8"), np.ndarray[tuple[int], np.dtype]) assert_type(np.require(A), npt.NDArray[np.float64]) From 3b259283bcefc890147bc9eae1e62e57d5edb094 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 5 Nov 2025 00:59:51 +0100 Subject: [PATCH 0813/1018] TYP: Allow passing `step=None` to `arange` --- numpy/_core/multiarray.pyi | 16 ++++++++-------- numpy/ma/core.pyi | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 8c3d5dcec1af..fa4850094646 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -967,7 +967,7 @@ def arange( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, - step: _ArangeScalar | float = 1, + step: _ArangeScalar | float | None = 1, *, dtype: _DTypeLike[_ArangeScalarT], device: L["cpu"] | None = None, @@ -978,7 +978,7 @@ def arange( start_or_stop: _IntLike_co, /, stop: _IntLike_co | None = None, - step: _IntLike_co = 1, + step: _IntLike_co | None = 1, *, dtype: type[int] | _DTypeLike[np.int_] | None = None, device: L["cpu"] | None = None, @@ -989,7 +989,7 @@ def arange( start_or_stop: float | floating, /, stop: _FloatLike_co | None = None, - step: _FloatLike_co = 1, + step: _FloatLike_co | None = 1, *, dtype: type[float] | _DTypeLike[np.float64] | None = None, device: L["cpu"] | None = None, @@ -1000,7 +1000,7 @@ def arange( start_or_stop: _FloatLike_co, /, stop: float | floating, - step: _FloatLike_co = 1, + step: _FloatLike_co | None = 1, *, dtype: type[float] | _DTypeLike[np.float64] | None = None, device: L["cpu"] | None = None, @@ -1011,7 +1011,7 @@ def arange( start_or_stop: np.timedelta64, /, stop: _TD64Like_co | None = None, - step: _TD64Like_co = 1, + step: _TD64Like_co | None = 1, *, dtype: _DTypeLike[np.timedelta64] | None = None, device: L["cpu"] | None = None, @@ -1022,7 +1022,7 @@ def arange( start_or_stop: _TD64Like_co, /, stop: np.timedelta64, - step: _TD64Like_co = 1, + step: _TD64Like_co | None = 1, *, dtype: _DTypeLike[np.timedelta64] | None = None, device: L["cpu"] | None = None, @@ -1033,7 +1033,7 @@ def arange( start_or_stop: np.datetime64, /, stop: np.datetime64, - step: _TD64Like_co = 1, + step: _TD64Like_co | None = 1, *, dtype: _DTypeLike[np.datetime64] | None = None, device: L["cpu"] | None = None, @@ -1044,7 +1044,7 @@ def arange( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, - step: _ArangeScalar | float = 1, + step: _ArangeScalar | float | None = 1, *, dtype: DTypeLike | None = None, device: L["cpu"] | None = None, diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 16cdc6b0854b..e30a60ef2384 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3151,7 +3151,7 @@ def arange( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, - step: _ArangeScalar | float = 1, + step: _ArangeScalar | float | None = 1, *, dtype: _DTypeLike[_ArangeScalarT], device: Literal["cpu"] | None = None, @@ -3164,7 +3164,7 @@ def arange( start_or_stop: _IntLike_co, /, stop: _IntLike_co | None = None, - step: _IntLike_co = 1, + step: _IntLike_co | None = 1, *, dtype: type[int] | _DTypeLike[np.int_] | None = None, device: Literal["cpu"] | None = None, @@ -3177,7 +3177,7 @@ def arange( start_or_stop: float | floating, /, stop: _FloatLike_co | None = None, - step: _FloatLike_co = 1, + step: _FloatLike_co | None = 1, *, dtype: type[float] | _DTypeLike[np.float64] | None = None, device: Literal["cpu"] | None = None, @@ -3190,7 +3190,7 @@ def arange( start_or_stop: _FloatLike_co, /, stop: float | floating, - step: _FloatLike_co = 1, + step: _FloatLike_co | None = 1, *, dtype: type[float] | _DTypeLike[np.float64] | None = None, device: Literal["cpu"] | None = None, @@ -3203,7 +3203,7 @@ def arange( start_or_stop: np.timedelta64, /, stop: _TD64Like_co | None = None, - step: _TD64Like_co = 1, + step: _TD64Like_co | None = 1, *, dtype: _DTypeLike[np.timedelta64] | None = None, device: Literal["cpu"] | None = None, @@ -3216,7 +3216,7 @@ def arange( start_or_stop: _TD64Like_co, /, stop: np.timedelta64, - step: _TD64Like_co = 1, + step: _TD64Like_co | None = 1, *, dtype: _DTypeLike[np.timedelta64] | None = None, device: Literal["cpu"] | None = None, @@ -3229,7 +3229,7 @@ def arange( start_or_stop: np.datetime64, /, stop: np.datetime64, - step: _TD64Like_co = 1, + step: _TD64Like_co | None = 1, *, dtype: _DTypeLike[np.datetime64] | None = None, device: Literal["cpu"] | None = None, @@ -3242,7 +3242,7 @@ def arange( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, - step: _ArangeScalar | float = 1, + step: _ArangeScalar | float | None = 1, *, dtype: DTypeLike | None = None, device: Literal["cpu"] | None = None, From fa476b5ca9b9f6103b216e6fc096c55a422d3efb Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 6 Nov 2025 11:37:06 +0100 Subject: [PATCH 0814/1018] DOC: add release note for #30147 --- doc/release/upcoming_changes/30147.compatibility.rst | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 doc/release/upcoming_changes/30147.compatibility.rst diff --git a/doc/release/upcoming_changes/30147.compatibility.rst b/doc/release/upcoming_changes/30147.compatibility.rst new file mode 100644 index 000000000000..c5d13323fe6e --- /dev/null +++ b/doc/release/upcoming_changes/30147.compatibility.rst @@ -0,0 +1,4 @@ +* Type-checkers will no longer accept calls to `numpy.arange` with + ``start`` as a keyword argument. This was done for compatibility with + the Array API standard. At runtime it is still possible to use + `numpy.arange` with ``start`` as a keyword argument. From 2740d5f6a93bff84e14ef9a4eed1475cbd15f3fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20Laboissi=C3=A8re?= Date: Thu, 6 Nov 2025 16:49:46 +0100 Subject: [PATCH 0815/1018] ENH: Run SWIG unit tests in CI action (#30161) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Replace deprecated unittest.makeSuite() by unittest.TestLoader().loadTestsFromTestCase() * Do not use negative values in argument of Tensor.Max() The unit tests involving `TestCase.testMaxWrongDim` do not work for `` being one of `uchar`, `ushort`, `uint`, `ulong`, and `ulongLong` (which translate to `uint8`, `uint16`, `uint32`, `uint64`, and `uint64`, respectively). Instead of raising the expected `TypeError`, the unit tests raise `OverflowError`, with the error message “Python integer -1 out of bounds for ”. This is fixed by replacing the negative values in the argument of the `max` function by positive ones. * Run unit tests for the SWIG binding at the end of the sdist job The unit tests will be run in the Linux workflow using the Makefile distributed in `tools/swig/test` directory. They are run after installation of NumPy in the system, ensuring that the C header files are correctly included (see GitHub issue [#30131](https://github.com/numpy/numpy/issues/30131)). --- .github/workflows/linux.yml | 5 +++++ tools/swig/test/testArray.py | 6 +++--- tools/swig/test/testFarray.py | 2 +- tools/swig/test/testFlat.py | 24 ++++++++++++------------ tools/swig/test/testFortran.py | 24 ++++++++++++------------ tools/swig/test/testMatrix.py | 24 ++++++++++++------------ tools/swig/test/testSuperTensor.py | 24 ++++++++++++------------ tools/swig/test/testTensor.py | 26 +++++++++++++------------- tools/swig/test/testVector.py | 24 ++++++++++++------------ 9 files changed, 82 insertions(+), 77 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index b749c60f3cc3..74edb50d9273 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -280,6 +280,11 @@ jobs: run: | cd tools pytest --pyargs numpy -m "not slow" + - name: Test SWIG binding + run: | + sudo apt update + sudo apt install make swig + make -C tools/swig/test test array_api_tests: needs: [smoke_test] diff --git a/tools/swig/test/testArray.py b/tools/swig/test/testArray.py index a8528207c167..b9b4af3319ae 100755 --- a/tools/swig/test/testArray.py +++ b/tools/swig/test/testArray.py @@ -378,9 +378,9 @@ def testView(self): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite(Array1TestCase)) - suite.addTest(unittest.makeSuite(Array2TestCase)) - suite.addTest(unittest.makeSuite(ArrayZTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(Array1TestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(Array2TestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ArrayZTestCase)) # Execute the test suite print("Testing Classes of Module Array") diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py index a9310e20a897..75bf99c054cb 100755 --- a/tools/swig/test/testFarray.py +++ b/tools/swig/test/testFarray.py @@ -149,7 +149,7 @@ def testView(self): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite(FarrayTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(FarrayTestCase)) # Execute the test suite print("Testing Classes of Module Farray") diff --git a/tools/swig/test/testFlat.py b/tools/swig/test/testFlat.py index 543ee0c41d9f..43ed84bcfa06 100755 --- a/tools/swig/test/testFlat.py +++ b/tools/swig/test/testFlat.py @@ -176,18 +176,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 1D Functions of Module Flat") diff --git a/tools/swig/test/testFortran.py b/tools/swig/test/testFortran.py index 498732f3118f..8b23af610481 100644 --- a/tools/swig/test/testFortran.py +++ b/tools/swig/test/testFortran.py @@ -140,18 +140,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 2D Functions of Module Matrix") diff --git a/tools/swig/test/testMatrix.py b/tools/swig/test/testMatrix.py index d20312ecc2a0..d3151a0fb857 100755 --- a/tools/swig/test/testMatrix.py +++ b/tools/swig/test/testMatrix.py @@ -339,18 +339,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 2D Functions of Module Matrix") diff --git a/tools/swig/test/testSuperTensor.py b/tools/swig/test/testSuperTensor.py index e0027428e647..f49a0aa07a90 100644 --- a/tools/swig/test/testSuperTensor.py +++ b/tools/swig/test/testSuperTensor.py @@ -374,18 +374,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 4D Functions of Module SuperTensor") diff --git a/tools/swig/test/testTensor.py b/tools/swig/test/testTensor.py index aa962b0cbcda..536d848e6135 100755 --- a/tools/swig/test/testTensor.py +++ b/tools/swig/test/testTensor.py @@ -99,7 +99,7 @@ def testMaxWrongDim(self): "Test max function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) max = Tensor.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, [0, -1, 2, -3]) + self.assertRaises(TypeError, max, [0, 1, 2, 3]) # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap def testMin(self): @@ -379,18 +379,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 3D Functions of Module Tensor") diff --git a/tools/swig/test/testVector.py b/tools/swig/test/testVector.py index f0b51715d1d5..15ad96da4503 100755 --- a/tools/swig/test/testVector.py +++ b/tools/swig/test/testVector.py @@ -358,18 +358,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 1D Functions of Module Vector") From 666719b3742d5c7a887356de1320b0e959ee5ab5 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 6 Nov 2025 18:38:18 +0100 Subject: [PATCH 0816/1018] ENH: Add ``order`` parameter to ``np.ma.asanyarray`` (#30163) * ENH: Add ``order`` parameter to ``np.ma.asanyarray`` * TYP: Update the ``np.ma.asanyarray`` stubs --- numpy/ma/core.py | 23 ++++++++++++++++++++--- numpy/ma/core.pyi | 8 ++++---- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index f60b42663e00..e56b0cfa573b 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -8616,7 +8616,7 @@ def asarray(a, dtype=None, order=None): subok=False, order=order) -def asanyarray(a, dtype=None): +def asanyarray(a, dtype=None, order='A'): """ Convert the input to a masked array, conserving subclasses. @@ -8629,6 +8629,14 @@ def asanyarray(a, dtype=None): Input data, in any form that can be converted to an array. dtype : dtype, optional By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C', then the array + will be in C-contiguous order (last-index varies the fastest). + If order is 'F', then the returned array will be in + Fortran-contiguous order (first-index varies the fastest). + If order is 'A' (default), then the returned array may be + in any order (either C-, Fortran-contiguous, or even discontiguous), + unless a copy is required, in which case it will be C-contiguous. Returns ------- @@ -8658,9 +8666,18 @@ def asanyarray(a, dtype=None): """ # workaround for #8666, to preserve identity. Ideally the bottom line # would handle this for us. - if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): + if ( + isinstance(a, MaskedArray) + and (dtype is None or dtype == a.dtype) + and ( + order == 'A' + or order == 'C' and a.flags.carray + or order == 'F' and a.flags.f_contiguous + ) + ): return a - return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True, + order=order) ############################################################################## diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index e30a60ef2384..bbcd617e9699 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2453,13 +2453,13 @@ def asarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None # keep in sync with `asarray` (but note the additional first overload) @overload -def asanyarray(a: _MArrayT, dtype: None = None) -> _MArrayT: ... +def asanyarray(a: _MArrayT, dtype: None = None, order: _OrderACF = "A") -> _MArrayT: ... @overload -def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None) -> _MaskedArray[_ScalarT]: ... +def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderACF = "A") -> _MaskedArray[_ScalarT]: ... @overload -def asanyarray(a: object, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +def asanyarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderACF = "A") -> _MaskedArray[_ScalarT]: ... @overload -def asanyarray(a: object, dtype: DTypeLike | None = None) -> _MaskedArray[_ScalarT]: ... +def asanyarray(a: object, dtype: DTypeLike | None = None, order: _OrderACF = "A") -> _MaskedArray[_ScalarT]: ... # def is_masked(x: object) -> bool: ... From 125b5ce97176325a6ee6a1dbb22b0db995087598 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 6 Nov 2025 20:13:06 +0100 Subject: [PATCH 0817/1018] MAINT: some ``numpy.polynomial.*`` namespace pollution cleanup (#30165) --- numpy/polynomial/chebyshev.py | 8 +++----- numpy/polynomial/hermite.py | 10 ++++------ numpy/polynomial/hermite_e.py | 10 ++++------ numpy/polynomial/laguerre.py | 10 ++++------ numpy/polynomial/legendre.py | 10 ++++------ numpy/polynomial/polynomial.py | 14 ++++++-------- numpy/polynomial/polyutils.py | 6 +++--- numpy/polynomial/tests/test_polynomial.py | 3 +-- 8 files changed, 29 insertions(+), 42 deletions(-) diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 58fce6046287..55b48b905848 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -108,8 +108,6 @@ """ # noqa: E501 import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -936,7 +934,7 @@ def chebder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1059,7 +1057,7 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1721,7 +1719,7 @@ def chebroots(c): # rotated companion matrix reduces error m = chebcompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 47e1dfc05b4b..fc2ea9e4194a 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -655,7 +653,7 @@ def hermder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -772,7 +770,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1543,7 +1541,7 @@ def hermroots(c): # rotated companion matrix reduces error m = hermcompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r @@ -1636,7 +1634,7 @@ def hermgauss(deg): # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0] * deg + [1], dtype=np.float64) m = hermcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = _normed_hermite_n(x, ideg) diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index d30fc1b5aa14..6225b9dd6b75 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -653,7 +651,7 @@ def hermeder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -770,7 +768,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1461,7 +1459,7 @@ def hermeroots(c): # rotated companion matrix reduces error m = hermecompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r @@ -1548,7 +1546,7 @@ def hermegauss(deg): # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0] * deg + [1]) m = hermecompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = _normed_hermite_e_n(x, ideg) diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 8d5d5ae67632..b1d87bf6d035 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -650,7 +648,7 @@ def lagder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -770,7 +768,7 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1525,7 +1523,7 @@ def lagroots(c): # rotated companion matrix reduces error m = lagcompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r @@ -1577,7 +1575,7 @@ def laggauss(deg): # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0] * deg + [1]) m = lagcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = lagval(x, c) diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index 5fdc5245b9d3..237e340cbf45 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -80,8 +80,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -676,7 +674,7 @@ def legder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -799,7 +797,7 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1464,7 +1462,7 @@ def legroots(c): # rotated companion matrix reduces error m = legcompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r @@ -1510,7 +1508,7 @@ def leggauss(deg): # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0] * deg + [1]) m = legcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = legval(x, c) diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 220306693cf9..e3823c89cd98 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -81,9 +81,7 @@ 'polycompanion'] import numpy as np -import numpy.linalg as la -from numpy._core.overrides import array_function_dispatch -from numpy.lib.array_utils import normalize_axis_index +from numpy._core.overrides import array_function_dispatch as _array_function_dispatch from . import polyutils as pu from ._polybase import ABCPolyBase @@ -523,7 +521,7 @@ def polyder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -637,7 +635,7 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -852,7 +850,7 @@ def _polyval2d_dispatcher(x, y, c): def _polygrid2d_dispatcher(x, y, c): return (x, y, c) -@array_function_dispatch(_polyval2d_dispatcher) +@_array_function_dispatch(_polyval2d_dispatcher) def polyval2d(x, y, c): """ Evaluate a 2-D polynomial at points (x, y). @@ -904,7 +902,7 @@ def polyval2d(x, y, c): """ return pu._valnd(polyval, c, x, y) -@array_function_dispatch(_polygrid2d_dispatcher) +@_array_function_dispatch(_polygrid2d_dispatcher) def polygrid2d(x, y, c): """ Evaluate a 2-D polynomial on the Cartesian product of x and y. @@ -1547,7 +1545,7 @@ def polyroots(c): return np.array([-c[0] / c[1]]) m = polycompanion(c) - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index 18dc0a8d1d24..5e0e1af973ae 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -23,8 +23,6 @@ import warnings import numpy as np -from numpy._core.multiarray import dragon4_positional, dragon4_scientific -from numpy.exceptions import RankWarning __all__ = [ 'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', @@ -661,7 +659,7 @@ def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" - warnings.warn(msg, RankWarning, stacklevel=2) + warnings.warn(msg, np.exceptions.RankWarning, stacklevel=2) if full: return c, [resids, rank, s, rcond] @@ -725,6 +723,8 @@ def _as_int(x, desc): def format_float(x, parens=False): + from numpy._core.multiarray import dragon4_positional, dragon4_scientific + if not np.issubdtype(type(x), np.floating): return str(x) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 5c8e85c7f860..4c924a758b06 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -10,7 +10,6 @@ import numpy as np import numpy.polynomial.polynomial as poly -import numpy.polynomial.polyutils as pu from numpy.testing import ( assert_, assert_almost_equal, @@ -657,7 +656,7 @@ def test_fit_degenerate_domain(self): assert_equal(p.coef, [2.]) p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) assert_almost_equal(p.coef, [2.05]) - with pytest.warns(pu.RankWarning): + with pytest.warns(np.exceptions.RankWarning): p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) def test_result_type(self): From 2b216b1a6072d28e4aa7b02a7e13e69713a20c5d Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 6 Nov 2025 20:49:07 +0100 Subject: [PATCH 0818/1018] BUG: ``numpy.random.*`` class runtime signatures (#30164) This fixes `inspect.signature` for - `np.random.BitGenerator` - `np.random.Generator` - `np.random.MT19937` - `np.random.PCG64` - `np.random.PCG64DXSM` - `np.random.Philox` - `np.random.RandomState` - `np.random.SFC64` - `np.random.SeedSequence` - `np.random.bit_generator.SeedlessSeedSequence` This also fixes a typo in `bit_generator.pxd` that accidentally defined an empty unused class `np.random.bit_generator.SeedlessSequence`. Related to #30104, #30114, #30121, #30124, #30126, #30137, #30138, #30140, #30143, #30146, #30147, and #30155 --- numpy/random/_generator.pyx | 118 +++++++++++++------------- numpy/random/_mt19937.pyx | 4 +- numpy/random/_pcg64.pyx | 8 +- numpy/random/_philox.pyx | 6 +- numpy/random/_sfc64.pyx | 4 +- numpy/random/bit_generator.pxd | 2 +- numpy/random/bit_generator.pyx | 18 ++-- numpy/random/mtrand.pyx | 40 ++++----- numpy/random/tests/test_regression.py | 29 ++++++- 9 files changed, 129 insertions(+), 100 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index d9c2730de8ca..bace56b59422 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -140,8 +140,8 @@ cdef bint _check_bit_generator(object bitgen): cdef class Generator: - """ - Generator(bit_generator) + # the first line is used to populate `__text_signature__` + """Generator(bit_generator)\n-- Container for the BitGenerators. @@ -396,8 +396,8 @@ cdef class Generator: Drawn samples from the parameterized beta distribution. Examples - -------- - The beta distribution has mean a/(a+b). If ``a == b`` and both + -------- + The beta distribution has mean a/(a+b). If ``a == b`` and both are > 1, the distribution is symmetric with mean 0.5. >>> rng = np.random.default_rng() @@ -405,11 +405,11 @@ cdef class Generator: >>> sample = rng.beta(a=a, b=b, size=size) >>> np.mean(sample) 0.5047328775385895 # may vary - + Otherwise the distribution is skewed left or right according to whether ``a`` or ``b`` is greater. The distribution is mirror symmetric. See for example: - + >>> a, b, size = 2, 7, 10000 >>> sample_left = rng.beta(a=a, b=b, size=size) >>> sample_right = rng.beta(a=b, b=a, size=size) @@ -422,12 +422,12 @@ cdef class Generator: -0.0003163943736596009 # may vary Display the histogram of the two samples: - + >>> import matplotlib.pyplot as plt - >>> plt.hist([sample_left, sample_right], + >>> plt.hist([sample_left, sample_right], ... 50, density=True, histtype='bar') >>> plt.show() - + References ---------- .. [1] Wikipedia, "Beta distribution", @@ -477,17 +477,17 @@ cdef class Generator: Examples -------- - Assume a company has 10000 customer support agents and the time - between customer calls is exponentially distributed and that the + Assume a company has 10000 customer support agents and the time + between customer calls is exponentially distributed and that the average time between customer calls is 4 minutes. >>> scale, size = 4, 10000 >>> rng = np.random.default_rng() >>> time_between_calls = rng.exponential(scale=scale, size=size) - What is the probability that a customer will call in the next - 4 to 5 minutes? - + What is the probability that a customer will call in the next + 4 to 5 minutes? + >>> x = ((time_between_calls < 5).sum())/size >>> y = ((time_between_calls < 4).sum())/size >>> x - y @@ -718,10 +718,10 @@ cdef class Generator: Notes ----- - This function generates random bytes from a discrete uniform - distribution. The generated bytes are independent from the CPU's + This function generates random bytes from a discrete uniform + distribution. The generated bytes are independent from the CPU's native endianness. - + Examples -------- >>> rng = np.random.default_rng() @@ -1024,9 +1024,9 @@ cdef class Generator: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than high. The high limit may be included in the returned array of - floats due to floating-point rounding in the equation - ``low + (high-low) * random_sample()``. high - low must be + less than high. The high limit may be included in the returned array of + floats due to floating-point rounding in the equation + ``low + (high-low) * random_sample()``. high - low must be non-negative. The default value is 1.0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then @@ -1541,10 +1541,10 @@ cdef class Generator: So there is about a 1% chance that the F statistic will exceed 7.62, the measured value is 36, so the null hypothesis is rejected at the 1% level. - - The corresponding probability density function for ``n = 20`` + + The corresponding probability density function for ``n = 20`` and ``m = 20`` is: - + >>> import matplotlib.pyplot as plt >>> from scipy import stats >>> dfnum, dfden, size = 20, 20, 10000 @@ -1554,7 +1554,7 @@ cdef class Generator: >>> plt.plot(x, stats.f.pdf(x, dfnum, dfden)) >>> plt.xlim([0, 5]) >>> plt.show() - + """ return cont(&random_f, &self._bitgen, size, self.lock, 2, dfnum, 'dfnum', CONS_POSITIVE, @@ -1701,7 +1701,7 @@ cdef class Generator: The distribution of a chi-square random variable with 20 degrees of freedom looks as follows: - + >>> import matplotlib.pyplot as plt >>> import scipy.stats as stats >>> s = rng.chisquare(20, 10000) @@ -1921,14 +1921,14 @@ cdef class Generator: Does their energy intake deviate systematically from the recommended value of 7725 kJ? Our null hypothesis will be the absence of deviation, and the alternate hypothesis will be the presence of an effect that could be - either positive or negative, hence making our test 2-tailed. + either positive or negative, hence making our test 2-tailed. Because we are estimating the mean and we have N=11 values in our sample, - we have N-1=10 degrees of freedom. We set our significance level to 95% and - compute the t statistic using the empirical mean and empirical standard - deviation of our intake. We use a ddof of 1 to base the computation of our + we have N-1=10 degrees of freedom. We set our significance level to 95% and + compute the t statistic using the empirical mean and empirical standard + deviation of our intake. We use a ddof of 1 to base the computation of our empirical standard deviation on an unbiased estimate of the variance (note: - the final estimate is not unbiased due to the concave nature of the square + the final estimate is not unbiased due to the concave nature of the square root). >>> np.mean(intake) @@ -1947,18 +1947,18 @@ cdef class Generator: >>> s = rng.standard_t(10, size=1000000) >>> h = plt.hist(s, bins=100, density=True) - Does our t statistic land in one of the two critical regions found at + Does our t statistic land in one of the two critical regions found at both tails of the distribution? >>> np.sum(np.abs(t) < np.abs(s)) / float(len(s)) 0.018318 #random < 0.05, statistic is in critical region - The probability value for this 2-tailed test is about 1.83%, which is - lower than the 5% pre-determined significance threshold. + The probability value for this 2-tailed test is about 1.83%, which is + lower than the 5% pre-determined significance threshold. Therefore, the probability of observing values as extreme as our intake - conditionally on the null hypothesis being true is too low, and we reject - the null hypothesis of no deviation. + conditionally on the null hypothesis being true is too low, and we reject + the null hypothesis of no deviation. """ return cont(&random_standard_t, &self._bitgen, size, self.lock, 1, @@ -3034,21 +3034,21 @@ cdef class Generator: Draw samples from the distribution: >>> rng = np.random.default_rng() - >>> n, p, size = 10, .5, 10000 + >>> n, p, size = 10, .5, 10000 >>> s = rng.binomial(n, p, 10000) Assume a company drills 9 wild-cat oil exploration wells, each with - an estimated probability of success of ``p=0.1``. All nine wells fail. + an estimated probability of success of ``p=0.1``. All nine wells fail. What is the probability of that happening? - Over ``size = 20,000`` trials the probability of this happening + Over ``size = 20,000`` trials the probability of this happening is on average: >>> n, p, size = 9, 0.1, 20000 >>> np.sum(rng.binomial(n=n, p=p, size=size) == 0)/size 0.39015 # may vary - The following can be used to visualize a sample with ``n=100``, + The following can be used to visualize a sample with ``n=100``, ``p=0.4`` and the corresponding probability density function: >>> import matplotlib.pyplot as plt @@ -3167,10 +3167,10 @@ cdef class Generator: appear before the third "1" is a negative binomial distribution. Because this method internally calls ``Generator.poisson`` with an - intermediate random value, a ValueError is raised when the choice of + intermediate random value, a ValueError is raised when the choice of :math:`n` and :math:`p` would result in the mean + 10 sigma of the sampled - intermediate distribution exceeding the max acceptable value of the - ``Generator.poisson`` method. This happens when :math:`p` is too low + intermediate distribution exceeding the max acceptable value of the + ``Generator.poisson`` method. This happens when :math:`p` is too low (a lot of failures happen for every success) and :math:`n` is too big ( a lot of successes are allowed). Therefore, the :math:`n` and :math:`p` values must satisfy the constraint: @@ -3302,7 +3302,7 @@ cdef class Generator: >>> s = rng.poisson(lam=lam, size=size) Verify the mean and variance, which should be approximately ``lam``: - + >>> s.mean(), s.var() (4.9917 5.1088311) # may vary @@ -3456,7 +3456,7 @@ cdef class Generator: Examples -------- - Draw 10,000 values from the geometric distribution, with the + Draw 10,000 values from the geometric distribution, with the probability of an individual success equal to ``p = 0.35``: >>> p, size = 0.35, 10000 @@ -3475,7 +3475,7 @@ cdef class Generator: >>> plt.plot(bins, (1-p)**(bins-1)*p) >>> plt.xlim([0, 25]) >>> plt.show() - + """ return disc(&random_geometric, &self._bitgen, size, self.lock, 1, 0, p, 'p', CONS_BOUNDED_GT_0_1, @@ -4646,11 +4646,11 @@ cdef class Generator: -------- shuffle permutation - + Notes ----- - An important distinction between methods ``shuffle`` and ``permuted`` is - how they both treat the ``axis`` parameter which can be found at + An important distinction between methods ``shuffle`` and ``permuted`` is + how they both treat the ``axis`` parameter which can be found at :ref:`generator-handling-axis-parameter`. Examples @@ -4722,7 +4722,7 @@ cdef class Generator: if axis is None: if x.ndim > 1: if not (np.PyArray_FLAGS(out) & (np.NPY_ARRAY_C_CONTIGUOUS | - np.NPY_ARRAY_F_CONTIGUOUS)): + np.NPY_ARRAY_F_CONTIGUOUS)): flags = (np.NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_WRITEBACKIFCOPY) to_shuffle = PyArray_FromArray(out, @@ -4802,8 +4802,8 @@ cdef class Generator: Notes ----- - An important distinction between methods ``shuffle`` and ``permuted`` is - how they both treat the ``axis`` parameter which can be found at + An important distinction between methods ``shuffle`` and ``permuted`` is + how they both treat the ``axis`` parameter which can be found at :ref:`generator-handling-axis-parameter`. Examples @@ -5011,11 +5011,11 @@ def default_rng(seed=None): Examples -------- `default_rng` is the recommended constructor for the random number class - `Generator`. Here are several ways we can construct a random - number generator using `default_rng` and the `Generator` class. + `Generator`. Here are several ways we can construct a random + number generator using `default_rng` and the `Generator` class. Here we use `default_rng` to generate a random float: - + >>> import numpy as np >>> rng = np.random.default_rng(12345) >>> print(rng) @@ -5025,10 +5025,10 @@ def default_rng(seed=None): 0.22733602246716966 >>> type(rfloat) - - Here we use `default_rng` to generate 3 random integers between 0 + + Here we use `default_rng` to generate 3 random integers between 0 (inclusive) and 10 (exclusive): - + >>> import numpy as np >>> rng = np.random.default_rng(12345) >>> rints = rng.integers(low=0, high=10, size=3) @@ -5036,9 +5036,9 @@ def default_rng(seed=None): array([6, 2, 7]) >>> type(rints[0]) - + Here we specify a seed so that we have reproducible results: - + >>> import numpy as np >>> rng = np.random.default_rng(seed=42) >>> print(rng) diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx index ed69c2aa6c58..c74498356dda 100644 --- a/numpy/random/_mt19937.pyx +++ b/numpy/random/_mt19937.pyx @@ -43,8 +43,8 @@ cdef uint64_t mt19937_raw(void *st) noexcept nogil: return mt19937_next32( st) cdef class MT19937(BitGenerator): - """ - MT19937(seed=None) + # the first line is used to populate `__text_signature__` + """MT19937(seed=None)\n-- Container for the Mersenne Twister pseudo-random number generator. diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index e6e9b8e0ac3c..fd2dd40db565 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -51,8 +51,8 @@ cdef double pcg64_cm_double(void* st) noexcept nogil: return uint64_to_double(pcg64_cm_next64(st)) cdef class PCG64(BitGenerator): - """ - PCG64(seed=None) + # the first line is used to populate `__text_signature__` + """PCG64(seed=None)\n-- BitGenerator for the PCG-64 pseudo-random number generator. @@ -284,8 +284,8 @@ cdef class PCG64(BitGenerator): cdef class PCG64DXSM(BitGenerator): - """ - PCG64DXSM(seed=None) + # the first line is used to populate `__text_signature__` + """PCG64DXSM(seed=None)\n-- BitGenerator for the PCG-64 DXSM pseudo-random number generator. diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index 3f33c7078f83..75fc4081ee62 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -52,8 +52,8 @@ cdef double philox_double(void*st) noexcept nogil: return uint64_to_double(philox_next64( st)) cdef class Philox(BitGenerator): - """ - Philox(seed=None, counter=None, key=None) + # the first line is used to populate `__text_signature__` + """Philox(seed=None, counter=None, key=None)\n-- Container for the Philox (4x64) pseudo-random number generator. @@ -194,7 +194,7 @@ cdef class Philox(BitGenerator): cdef _reset_state_variables(self): cdef philox_state *rng_state = &self.rng_state - + rng_state[0].has_uint32 = 0 rng_state[0].uinteger = 0 rng_state[0].buffer_pos = PHILOX_BUFFER_SIZE diff --git a/numpy/random/_sfc64.pyx b/numpy/random/_sfc64.pyx index 86136f0b42fb..81a5fc3d21e5 100644 --- a/numpy/random/_sfc64.pyx +++ b/numpy/random/_sfc64.pyx @@ -34,8 +34,8 @@ cdef double sfc64_double(void* st) noexcept nogil: cdef class SFC64(BitGenerator): - """ - SFC64(seed=None) + # the first line is used to populate `__text_signature__` + """SFC64(seed=None)\n-- BitGenerator for Chris Doty-Humphrey's Small Fast Chaotic PRNG. diff --git a/numpy/random/bit_generator.pxd b/numpy/random/bit_generator.pxd index dfa7d0a71c08..dbaab4721fec 100644 --- a/numpy/random/bit_generator.pxd +++ b/numpy/random/bit_generator.pxd @@ -31,5 +31,5 @@ cdef class SeedSequence(): np.ndarray[np.npy_uint32, ndim=1] entropy_array) cdef get_assembled_entropy(self) -cdef class SeedlessSequence(): +cdef class SeedlessSeedSequence: pass diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index d9a733c7a618..0bb9552a86ce 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -226,8 +226,10 @@ class ISpawnableSeedSequence(ISeedSequence): """ -cdef class SeedlessSeedSequence(): - """ +cdef class SeedlessSeedSequence: + # the first line is used to populate `__text_signature__` + """SeedlessSeedSequence()\n-- + A seed sequence for BitGenerators with no need for seed state. See Also @@ -247,9 +249,9 @@ cdef class SeedlessSeedSequence(): ISpawnableSeedSequence.register(SeedlessSeedSequence) -cdef class SeedSequence(): - """ - SeedSequence(entropy=None, *, spawn_key=(), pool_size=4) +cdef class SeedSequence: + # the first line is used to populate `__text_signature__` + """SeedSequence(entropy=None, *, spawn_key=(), pool_size=4, n_children_spawned=0)\n-- SeedSequence mixes sources of entropy in a reproducible way to set the initial state for independent and very probably non-overlapping @@ -489,9 +491,9 @@ cdef class SeedSequence(): ISpawnableSeedSequence.register(SeedSequence) -cdef class BitGenerator(): - """ - BitGenerator(seed=None) +cdef class BitGenerator: + # the first line is used to populate `__text_signature__` + """BitGenerator(seed=None)\n-- Base Class for generic BitGenerators, which provide a stream of random bits based on different algorithms. Must be overridden. diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 2bf6441bb368..8e7f437641ca 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -119,8 +119,8 @@ cdef object int64_to_long(object x): cdef class RandomState: - """ - RandomState(seed=None) + # the first line is used to populate `__text_signature__` + """RandomState(seed=None)\n-- Container for the slow Mersenne Twister pseudo-random number generator. Consider using a different BitGenerator with the Generator container @@ -542,16 +542,16 @@ cdef class RandomState: Examples -------- - A real world example: Assume a company has 10000 customer support + A real world example: Assume a company has 10000 customer support agents and the average time between customer calls is 4 minutes. >>> n = 10000 >>> time_between_calls = np.random.default_rng().exponential(scale=4, size=n) - What is the probability that a customer will call in the next - 4 to 5 minutes? - - >>> x = ((time_between_calls < 5).sum())/n + What is the probability that a customer will call in the next + 4 to 5 minutes? + + >>> x = ((time_between_calls < 5).sum())/n >>> y = ((time_between_calls < 4).sum())/n >>> x-y 0.08 # may vary @@ -1087,9 +1087,9 @@ cdef class RandomState: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than or equal to high. The high limit may be included in the - returned array of floats due to floating-point rounding in the - equation ``low + (high-low) * random_sample()``. The default value + less than or equal to high. The high limit may be included in the + returned array of floats due to floating-point rounding in the + equation ``low + (high-low) * random_sample()``. The default value is 1.0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then @@ -2226,14 +2226,14 @@ cdef class RandomState: Does their energy intake deviate systematically from the recommended value of 7725 kJ? Our null hypothesis will be the absence of deviation, and the alternate hypothesis will be the presence of an effect that could be - either positive or negative, hence making our test 2-tailed. + either positive or negative, hence making our test 2-tailed. Because we are estimating the mean and we have N=11 values in our sample, - we have N-1=10 degrees of freedom. We set our significance level to 95% and - compute the t statistic using the empirical mean and empirical standard - deviation of our intake. We use a ddof of 1 to base the computation of our + we have N-1=10 degrees of freedom. We set our significance level to 95% and + compute the t statistic using the empirical mean and empirical standard + deviation of our intake. We use a ddof of 1 to base the computation of our empirical standard deviation on an unbiased estimate of the variance (note: - the final estimate is not unbiased due to the concave nature of the square + the final estimate is not unbiased due to the concave nature of the square root). >>> np.mean(intake) @@ -2251,18 +2251,18 @@ cdef class RandomState: >>> s = np.random.standard_t(10, size=1000000) >>> h = plt.hist(s, bins=100, density=True) - Does our t statistic land in one of the two critical regions found at + Does our t statistic land in one of the two critical regions found at both tails of the distribution? >>> np.sum(np.abs(t) < np.abs(s)) / float(len(s)) 0.018318 #random < 0.05, statistic is in critical region - The probability value for this 2-tailed test is about 1.83%, which is - lower than the 5% pre-determined significance threshold. + The probability value for this 2-tailed test is about 1.83%, which is + lower than the 5% pre-determined significance threshold. Therefore, the probability of observing values as extreme as our intake - conditionally on the null hypothesis being true is too low, and we reject - the null hypothesis of no deviation. + conditionally on the null hypothesis being true is too low, and we reject + the null hypothesis of no deviation. """ return cont(&legacy_standard_t, &self._aug_state, size, self.lock, 1, diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index b29bd526266e..eeaf6d2b4bd3 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -1,8 +1,11 @@ +import inspect import sys +import pytest + import numpy as np from numpy import random -from numpy.testing import assert_, assert_array_equal, assert_raises +from numpy.testing import IS_PYPY, assert_, assert_array_equal, assert_raises class TestRegression: @@ -146,3 +149,27 @@ def __array__(self, dtype=None, copy=None): perm = rng.permutation(m) assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) assert_array_equal(m.__array__(), np.arange(5)) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize( + "cls", + [ + random.Generator, + random.MT19937, + random.PCG64, + random.PCG64DXSM, + random.Philox, + random.RandomState, + random.SFC64, + random.BitGenerator, + random.SeedSequence, + random.bit_generator.SeedlessSeedSequence, + ], + ) + def test_inspect_signature(self, cls: type) -> None: + assert hasattr(cls, "__text_signature__") + try: + inspect.signature(cls) + except ValueError: + pytest.fail(f"invalid signature: {cls.__module__}.{cls.__qualname__}") From 3d2e7a6a2e48eace87361941defbef3dc54b2b2a Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 4 Nov 2025 14:17:15 +0100 Subject: [PATCH 0819/1018] BUG: ``dtype`` and ``dtype.newbyteorder`` runtime signatures --- numpy/_core/_add_newdocs.py | 6 +++++ numpy/_core/tests/test_dtype.py | 41 +++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 78c660f423f7..995aff7e5874 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -6192,6 +6192,9 @@ add_newdoc('numpy._core.multiarray', 'dtype', """ + dtype(dtype, align=False, copy=False, metadata={}) + -- + dtype(dtype, align=False, copy=False, [metadata]) Create a data type object. @@ -6758,6 +6761,9 @@ add_newdoc('numpy._core.multiarray', 'dtype', ('newbyteorder', """ + newbyteorder($self, new_order='S', /) + -- + newbyteorder(new_order='S', /) Return a new dtype with a different byte order. diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 611d5857fb15..c5c6ae653197 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1,5 +1,6 @@ import ctypes import gc +import inspect import operator import pickle import sys @@ -17,6 +18,7 @@ from numpy._core._rational_tests import rational from numpy.testing import ( HAS_REFCOUNT, + IS_PYPY, IS_PYSTON, IS_WASM, assert_, @@ -1994,3 +1996,42 @@ def test_creating_dtype_with_dtype_class_errors(): # Regression test for #25031, calling `np.dtype` with itself segfaulted. with pytest.raises(TypeError, match="Cannot convert np.dtype into a"): np.array(np.ones(10), dtype=np.dtype) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +def test_dtype_signature(): + sig = inspect.signature(np.dtype) + + assert len(sig.parameters) == 4 + + assert "dtype" in sig.parameters + assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["dtype"].default is inspect.Parameter.empty + + assert "align" in sig.parameters + assert sig.parameters["align"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["align"].default is False + + assert "copy" in sig.parameters + assert sig.parameters["copy"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["copy"].default is False + + assert "metadata" in sig.parameters + assert sig.parameters["metadata"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["metadata"].default == {} + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +def test_dtype_newbyteorder_signature(): + sig = inspect.signature(np.dtype.newbyteorder) + + assert len(sig.parameters) == 2 + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters["self"].default is inspect.Parameter.empty + + assert "new_order" in sig.parameters + assert sig.parameters["new_order"].kind is inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters["new_order"].default == "S" From d3d2db1ac1d1302a075622ada2ef70aad9ae9bd8 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 7 Nov 2025 02:20:31 +0100 Subject: [PATCH 0820/1018] BUG: ``dtypes.*DType`` runtime signatures --- numpy/_core/_add_newdocs.py | 50 ++++++++++++++ numpy/_core/src/multiarray/dtypemeta.c | 7 +- numpy/_core/tests/test_dtype.py | 94 ++++++++++++++++---------- 3 files changed, 109 insertions(+), 42 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 995aff7e5874..3e15e624739c 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -7391,8 +7391,58 @@ def refer_to_array_attribute(attr, method=True): """) +############################################################################## +# +# Documentation for `dtypes.*` classes +# +############################################################################## + +for _dtype_name, _signature, _sctype_name in ( + ("BoolDType", "()", "bool"), + ("Int8DType", "()", "int8"), + ("UInt8DType", "()", "uint8"), + ("Int16DType", "()", "int16"), + ("UInt16DType", "()", "uint16"), + ("Int32DType", "()", "int32"), + ("IntDType", "()", "intc"), + ("UInt32DType", "()", "uint32"), + ("UIntDType", "()", "uintc"), + ("Int64DType", "()", "int64"), + ("UInt64DType", "()", "uint64"), + ("LongLongDType", "()", "longlong"), + ("ULongLongDType", "()", "ulonglong"), + ("Float16DType", "()", "float16"), + ("Float32DType", "()", "float32"), + ("Float64DType", "()", "float64"), + ("LongDoubleDType", "()", "longdouble"), + ("Complex64DType", "()", "complex64"), + ("Complex128DType", "()", "complex128"), + ("CLongDoubleDType", "()", "clongdouble"), + ("ObjectDType", "()", "object"), + ("BytesDType", "(size, /)", "bytes_"), + ("StrDType", "(size, /)", "str_"), + ("VoidDType", "(length, /)", "void"), + ("DateTime64DType", "(unit, /)", "datetime64"), + ("TimeDelta64DType", "(unit, /)", "timedelta64"), +): + add_newdoc('numpy.dtypes', _dtype_name, + f""" + {_dtype_name}{_signature} + -- + + DType class corresponding to the `numpy.{_sctype_name}` scalar type. + + Please see `numpy.dtype` for the typical way to create dtype instances and + :ref:`arrays.dtypes` for additional information. + """) + del _dtype_name, _signature, _sctype_name # avoid namespace pollution + + add_newdoc('numpy._core.multiarray', 'StringDType', """ + StringDType(*, coerce=True, **kwargs) + -- + StringDType(*, na_object=np._NoValue, coerce=True) Create a StringDType instance. diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 1c208aa54a84..bada1addd9cc 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -1154,12 +1154,7 @@ dtypemeta_wrap_legacy_descriptor( .tp_flags = Py_TPFLAGS_DEFAULT, .tp_base = NULL, /* set below */ .tp_new = (newfunc)legacy_dtype_default_new, - .tp_doc = ( - "DType class corresponding to the scalar type and dtype of " - "the same name.\n\n" - "Please see `numpy.dtype` for the typical way to create\n" - "dtype instances and :ref:`arrays.dtypes` for additional\n" - "information."), + .tp_doc = NULL, /* set in python */ },}, .flags = NPY_DT_LEGACY, /* Further fields are not common between DTypes */ diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index c5c6ae653197..6623a7668af2 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1999,39 +1999,61 @@ def test_creating_dtype_with_dtype_class_errors(): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -def test_dtype_signature(): - sig = inspect.signature(np.dtype) - - assert len(sig.parameters) == 4 - - assert "dtype" in sig.parameters - assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD - assert sig.parameters["dtype"].default is inspect.Parameter.empty - - assert "align" in sig.parameters - assert sig.parameters["align"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD - assert sig.parameters["align"].default is False - - assert "copy" in sig.parameters - assert sig.parameters["copy"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD - assert sig.parameters["copy"].default is False - - assert "metadata" in sig.parameters - assert sig.parameters["metadata"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD - assert sig.parameters["metadata"].default == {} - -@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -def test_dtype_newbyteorder_signature(): - sig = inspect.signature(np.dtype.newbyteorder) - - assert len(sig.parameters) == 2 - - assert "self" in sig.parameters - assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY - assert sig.parameters["self"].default is inspect.Parameter.empty - - assert "new_order" in sig.parameters - assert sig.parameters["new_order"].kind is inspect.Parameter.POSITIONAL_ONLY - assert sig.parameters["new_order"].default == "S" +@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") +class TestDTypeSignatures: + def test_signature_dtype(self): + sig = inspect.signature(np.dtype) + + assert len(sig.parameters) == 4 + + assert "dtype" in sig.parameters + assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["dtype"].default is inspect.Parameter.empty + + assert "align" in sig.parameters + assert sig.parameters["align"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["align"].default is False + + assert "copy" in sig.parameters + assert sig.parameters["copy"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["copy"].default is False + + assert "metadata" in sig.parameters + assert sig.parameters["metadata"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["metadata"].default == {} + + def test_signature_dtype_newbyteorder(self): + sig = inspect.signature(np.dtype.newbyteorder) + + assert len(sig.parameters) == 2 + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters["self"].default is inspect.Parameter.empty + + assert "new_order" in sig.parameters + assert sig.parameters["new_order"].kind is inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters["new_order"].default == "S" + + @pytest.mark.parametrize("typename", np.dtypes.__all__) + def test_signature_dtypes_classes(self, typename: str): + dtype_type = getattr(np.dtypes, typename) + sig = inspect.signature(dtype_type) + + match typename.lower().removesuffix("dtype"): + case "bytes" | "str": + params_expect = {"size"} + case "void": + params_expect = {"length"} + case "datetime64" | "timedelta64": + params_expect = {"unit"} + case "string": + # `na_object` cannot be used in the text signature because of its + # `np._NoValue` default, which isn't supported by `inspect.signature`, + # so `**kwargs` is used instead. + params_expect = {"coerce", "kwargs"} + case _: + params_expect = set() + + params_actual = set(sig.parameters) + assert params_actual == params_expect From ead381527798f3d74bd2668b830138b7b82fe340 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 7 Nov 2025 03:50:17 +0100 Subject: [PATCH 0821/1018] DOC: Warn that `{Void,DateTime64,TimeDelta64}DType` cannot be instantiated --- numpy/_core/_add_newdocs.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 3e15e624739c..61c646f5aae4 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -7425,17 +7425,26 @@ def refer_to_array_attribute(attr, method=True): ("DateTime64DType", "(unit, /)", "datetime64"), ("TimeDelta64DType", "(unit, /)", "timedelta64"), ): + _extra_docs = "" + if _dtype_name in {"VoidDType", "DateTime64DType", "TimeDelta64DType"}: + _extra_docs = f""" + .. warning:: + ``np.dtypes.{_dtype_name}`` cannot be instantiated directly. + Use ``np.dtype("{_sctype_name}[{{unit}}]")`` instead. + """ + add_newdoc('numpy.dtypes', _dtype_name, f""" {_dtype_name}{_signature} -- DType class corresponding to the `numpy.{_sctype_name}` scalar type. - - Please see `numpy.dtype` for the typical way to create dtype instances and - :ref:`arrays.dtypes` for additional information. + {_extra_docs} + See `numpy.dtype` for the typical way to create dtype instances + and :ref:`arrays.dtypes` for additional information. """) - del _dtype_name, _signature, _sctype_name # avoid namespace pollution + + del _dtype_name, _signature, _sctype_name, _extra_docs # avoid namespace pollution add_newdoc('numpy._core.multiarray', 'StringDType', From 9ac8d69e141350cebbec8c9b247c8a459ef212cb Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 7 Nov 2025 04:01:07 +0100 Subject: [PATCH 0822/1018] DOC, TYP: Workaround sphinx rendering issue with (slightly incorrect) `metadata={}` --- numpy/__init__.pyi | 202 +++++++++++++++++++------------- numpy/_core/_add_newdocs.py | 2 +- numpy/_core/tests/test_dtype.py | 7 +- 3 files changed, 126 insertions(+), 85 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b03f67b1e281..6fd6fbd3898a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1177,8 +1177,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: type[float64 | ct.c_double] | _Float64Codes | _DoubleCodes | None, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ... ) -> dtype[float64]: ... @@ -1188,8 +1189,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _DTypeLike[_ScalarT], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[_ScalarT]: ... @@ -1207,48 +1209,54 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: type[builtins.bool | np.bool | ct.c_bool] | _BoolCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[np.bool]: ... @overload def __new__( cls, dtype: type[int], # also accepts `type[builtins.bool]` - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[int_ | np.bool]: ... @overload def __new__( cls, dtype: type[float], # also accepts `type[int | bool]` - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[float64 | int_ | np.bool]: ... @overload def __new__( cls, dtype: type[complex], # also accepts `type[float | int | bool]` - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[complex128 | float64 | int_ | np.bool]: ... @overload def __new__( cls, dtype: type[bytes | ct.c_char] | _BytesCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[bytes_]: ... @overload def __new__( cls, dtype: type[str] | _StrCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[str_]: ... # NOTE: These `memoryview` overloads assume PEP 688, which requires mypy to @@ -1261,8 +1269,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[void]: ... # NOTE: `_: type[object]` would also accept e.g. `type[object | complex]`, @@ -1271,8 +1280,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[object_]: ... @@ -1281,48 +1291,54 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _UInt8Codes | _UByteCodes | type[ct.c_uint8 | ct.c_ubyte], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint8]: ... @overload def __new__( cls, dtype: _UInt16Codes | _UShortCodes | type[ct.c_uint16 | ct.c_ushort], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint16]: ... @overload def __new__( cls, dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint32]: ... @overload def __new__( cls, dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint64]: ... @overload def __new__( cls, dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uintp]: ... @overload def __new__( cls, dtype: _ULongCodes | type[ct.c_ulong], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[ulong]: ... @@ -1331,48 +1347,54 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _Int8Codes | _ByteCodes | type[ct.c_int8 | ct.c_byte], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int8]: ... @overload def __new__( cls, dtype: _Int16Codes | _ShortCodes | type[ct.c_int16 | ct.c_short], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int16]: ... @overload def __new__( cls, dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int32]: ... @overload def __new__( cls, dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int64]: ... @overload def __new__( cls, dtype: _IntPCodes | type[intp | ct.c_ssize_t], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[intp]: ... @overload def __new__( cls, dtype: _LongCodes | type[ct.c_long], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[long]: ... @@ -1381,16 +1403,18 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _Float16Codes | _HalfCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[float16]: ... @overload def __new__( cls, dtype: _Float32Codes | _SingleCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[float32]: ... # float64 codes are covered by overload 1 @@ -1398,8 +1422,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[longdouble]: ... @@ -1408,24 +1433,27 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _Complex64Codes | _CSingleCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complex64]: ... @overload def __new__( cls, dtype: _Complex128Codes | _CDoubleCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complex128]: ... @overload def __new__( cls, dtype: _CLongDoubleCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[clongdouble]: ... @@ -1434,16 +1462,18 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _TD64Codes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[timedelta64]: ... @overload def __new__( cls, dtype: _DT64Codes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[datetime64]: ... @@ -1452,9 +1482,10 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: dtypes.StringDType | _StringCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ... + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., ) -> dtypes.StringDType: ... # Combined char-codes and ctypes, analogous to the scalar-type hierarchy @@ -1462,56 +1493,63 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[unsignedinteger]: ... @overload def __new__( cls, dtype: _SignedIntegerCodes | _SignedIntegerCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[signedinteger]: ... @overload def __new__( cls, dtype: _IntegerCodes | _IntegerCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[integer]: ... @overload def __new__( cls, dtype: _FloatingCodes | _FloatingCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[floating]: ... @overload def __new__( cls, dtype: _ComplexFloatingCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complexfloating]: ... @overload def __new__( cls, dtype: _InexactCodes | _FloatingCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[inexact]: ... @overload def __new__( cls, dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[character]: ... @@ -1520,8 +1558,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: builtins.str, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype: ... @@ -1535,8 +1574,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: type[object], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[object_ | Any]: ... diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 61c646f5aae4..219c6e07a176 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -6192,7 +6192,7 @@ add_newdoc('numpy._core.multiarray', 'dtype', """ - dtype(dtype, align=False, copy=False, metadata={}) + dtype(dtype, align=False, copy=False, **kwargs) -- dtype(dtype, align=False, copy=False, [metadata]) diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 6623a7668af2..4ac88d7d0250 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -2018,9 +2018,10 @@ def test_signature_dtype(self): assert sig.parameters["copy"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD assert sig.parameters["copy"].default is False - assert "metadata" in sig.parameters - assert sig.parameters["metadata"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD - assert sig.parameters["metadata"].default == {} + # the optional `metadata` parameter has no default, so `**kwargs` must be used + assert "kwargs" in sig.parameters + assert sig.parameters["kwargs"].kind is inspect.Parameter.VAR_KEYWORD + assert sig.parameters["kwargs"].default is inspect.Parameter.empty def test_signature_dtype_newbyteorder(self): sig = inspect.signature(np.dtype.newbyteorder) From a0ce2fb88f8d631045bc568923a0a4c0a1b1414e Mon Sep 17 00:00:00 2001 From: Amelia Thurdekoos Date: Fri, 7 Nov 2025 00:31:42 -0800 Subject: [PATCH 0823/1018] MAINT: Migrate einsum.c.src to C++ (einsum.cpp) (#30142) Addresses one item from #29528 No C++ templates were needed as einsum.c.src was not templated but was still a c.src file for historical reason. This is a good starting point for future c++ migration work. What this PR does: - Renamed `einsum.c.src` to `einsum.cpp` - Updated references in genapi.py and meson.build - In `einsum.cpp`: - Added `extern "C"` C linkage where necessary - Added 4 explicit casts to maintain functionality --- numpy/_core/code_generators/genapi.py | 2 +- numpy/_core/meson.build | 2 +- .../src/multiarray/{einsum.c.src => einsum.cpp} | 14 +++++++------- 3 files changed, 9 insertions(+), 9 deletions(-) rename numpy/_core/src/multiarray/{einsum.c.src => einsum.cpp} (99%) diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index 6a370a7dc3cd..e97177e46153 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -62,7 +62,7 @@ def get_processor(): join('multiarray', 'descriptor.c'), join('multiarray', 'dlpack.c'), join('multiarray', 'dtypemeta.c'), - join('multiarray', 'einsum.c.src'), + join('multiarray', 'einsum.cpp'), join('multiarray', 'public_dtype_api.c'), join('multiarray', 'flagsobject.c'), join('multiarray', 'getset.c'), diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index dc07586bcf8e..2ce11153b581 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1161,7 +1161,7 @@ src_multiarray = multiarray_gen_headers + [ 'src/multiarray/dragon4.c', 'src/multiarray/dtype_transfer.c', 'src/multiarray/dtype_traversal.c', - src_file.process('src/multiarray/einsum.c.src'), + 'src/multiarray/einsum.cpp', src_file.process('src/multiarray/einsum_sumprod.c.src'), 'src/multiarray/public_dtype_api.c', 'src/multiarray/flagsobject.c', diff --git a/numpy/_core/src/multiarray/einsum.c.src b/numpy/_core/src/multiarray/einsum.cpp similarity index 99% rename from numpy/_core/src/multiarray/einsum.c.src rename to numpy/_core/src/multiarray/einsum.cpp index 3733c436cb1b..f12cec7824d7 100644 --- a/numpy/_core/src/multiarray/einsum.c.src +++ b/numpy/_core/src/multiarray/einsum.cpp @@ -20,14 +20,14 @@ #include //PyArray_AssignRawScalar #include - +extern "C" { #include "convert.h" #include "common.h" #include "ctors.h" #include "einsum_sumprod.h" #include "einsum_debug.h" - +} /* * Parses the subscripts for one operand into an output of 'ndim' @@ -40,7 +40,6 @@ * - subscripts="abbcbc", ndim=6 -> op_labels=[97, 98, -1, 99, -3, -2] * - subscripts="ab...bc", ndim=6 -> op_labels=[97, 98, 0, 0, -3, 99] */ - static int parse_operand_subscripts(char *subscripts, int length, int ndim, int iop, char *op_labels, @@ -131,13 +130,13 @@ parse_operand_subscripts(char *subscripts, int length, /* If it is a proper label, find any duplicates of it. */ if (label > 0) { /* Search for the next matching label. */ - char *next = memchr(op_labels + idim + 1, label, ndim - idim - 1); + char *next = (char*)memchr(op_labels + idim + 1, label, ndim - idim - 1); while (next != NULL) { /* The offset from next to op_labels[idim] (negative). */ *next = (char)((op_labels + idim) - next); /* Search for the next matching label. */ - next = memchr(next + 1, label, op_labels + ndim - 1 - next); + next = (char*)memchr(next + 1, label, op_labels + ndim - 1 - next); } } } @@ -322,7 +321,7 @@ get_single_op_view(PyArrayObject *op, char *labels, Py_TYPE(op), PyArray_DESCR(op), ndim_output, new_dims, new_strides, PyArray_DATA(op), PyArray_ISWRITEABLE(op) ? NPY_ARRAY_WRITEABLE : 0, - (PyObject *)op, (PyObject *)op, 0); + (PyObject *)op, (PyObject *)op, (_NPY_CREATION_FLAGS)0); if (*ret == NULL) { return -1; @@ -472,7 +471,7 @@ prepare_op_axes(int ndim, int iop, char *labels, int *axes, } /* It's a labeled dimension, find the matching one */ else { - char *match = memchr(labels, label, ndim); + char *match = (char*)memchr(labels, label, ndim); /* If the op doesn't have the label, broadcast it */ if (match == NULL) { axes[i] = -1; @@ -1112,6 +1111,7 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop, * the strides that are fixed for the whole loop. */ stride = NpyIter_GetInnerStrideArray(iter); + sop = get_sum_of_products_function(nop, NpyIter_GetDescrArray(iter)[0]->type_num, NpyIter_GetDescrArray(iter)[0]->elsize, From 2ec5d92ea05cc5daea8974dddb98c85738611c53 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 7 Nov 2025 10:45:37 -0500 Subject: [PATCH 0824/1018] BUG: Fix handling by ``unique`` of signed zero in complex types. (#30125) --- benchmarks/benchmarks/bench_lib.py | 2 +- numpy/_core/src/multiarray/unique.cpp | 41 ++++++++++++++++++++------- numpy/lib/tests/test_arraysetops.py | 9 ++++++ 3 files changed, 40 insertions(+), 12 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 3f7988a2ab7f..33e2871fc727 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -125,7 +125,7 @@ class Unique(Benchmark): # sizes of the 1D arrays [200, int(2e5)], # percent of np.nan in arrays - [10., 90.], + [0.0, 10., 90.], # percent of unique values in arrays [0.2, 20.], # dtypes of the arrays diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index 910c5f7c731e..77083c04c519 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -42,31 +43,46 @@ size_t hash_integer(const T *value, npy_bool equal_nan) { template size_t hash_complex(const T *value, npy_bool equal_nan) { - S value_real = real(*value); - S value_imag = imag(*value); - int hasnan = npy_isnan(value_real) || npy_isnan(value_imag); + std::complex z = *reinterpret_cast *>(value); + int hasnan = npy_isnan(z.real()) || npy_isnan(z.imag()); if (equal_nan && hasnan) { return 0; } // Now, equal_nan is false or neither of the values is not NaN. // So we don't need to worry about NaN here. - const unsigned char* value_bytes = reinterpret_cast(value); - size_t hash = npy_fnv1a(value_bytes, sizeof(T)); + // Convert -0.0 to 0.0. + if (z.real() == 0.0) { + z.real(NPY_PZERO); + } + if (z.imag() == 0.0) { + z.imag(NPY_PZERO); + } + + size_t hash = npy_fnv1a(reinterpret_cast(&z), sizeof(z)); return hash; } size_t hash_complex_clongdouble(const npy_clongdouble *value, npy_bool equal_nan) { - npy_longdouble value_real = npy_creall(*value); - npy_longdouble value_imag = npy_cimagl(*value); - int hasnan = npy_isnan(value_real) || npy_isnan(value_imag); + std::complex z = + *reinterpret_cast *>(value); + int hasnan = npy_isnan(z.real()) || npy_isnan(z.imag()); if (equal_nan && hasnan) { return 0; } // Now, equal_nan is false or neither of the values is not NaN. // So we don't need to worry about NaN here. + + // Convert -0.0 to 0.0. + if (z.real() == 0.0) { + z.real(NPY_PZEROL); + } + if (z.imag() == 0.0) { + z.imag(NPY_PZEROL); + } + // Some floating-point complex dtypes (e.g., npy_complex256) include undefined or // unused bits in their binary representation // (see: https://github.com/numpy/numpy/blob/main/numpy/_core/src/npymath/npy_math_private.h#L254-L261). @@ -76,13 +92,14 @@ size_t hash_complex_clongdouble(const npy_clongdouble *value, npy_bool equal_nan #if defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE) || \ defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE) || \ defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE) + constexpr size_t SIZEOF_LDOUBLE_MAN = sizeof(ldouble_man_t); constexpr size_t SIZEOF_LDOUBLE_EXP = sizeof(ldouble_exp_t); constexpr size_t SIZEOF_LDOUBLE_SIGN = sizeof(ldouble_sign_t); constexpr size_t SIZEOF_BUFFER = 2 * (SIZEOF_LDOUBLE_MAN + SIZEOF_LDOUBLE_MAN + SIZEOF_LDOUBLE_EXP + SIZEOF_LDOUBLE_SIGN); unsigned char buffer[SIZEOF_BUFFER]; - union IEEEl2bitsrep bits_real{value_real}, bits_imag{value_imag}; + union IEEEl2bitsrep bits_real{z.real()}, bits_imag{z.imag()}; size_t offset = 0; for (const IEEEl2bitsrep &bits: {bits_real, bits_imag}) { @@ -101,8 +118,10 @@ size_t hash_complex_clongdouble(const npy_clongdouble *value, npy_bool equal_nan offset += SIZEOF_LDOUBLE_SIGN; } #else - constexpr size_t SIZEOF_BUFFER = NPY_SIZEOF_CLONGDOUBLE; - const unsigned char* buffer = reinterpret_cast(value); + + const unsigned char* buffer = reinterpret_cast(&z); + constexpr size_t SIZEOF_BUFFER = sizeof(z); + #endif size_t hash = npy_fnv1a(buffer, SIZEOF_BUFFER); diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 9d5a4d693fa5..4e8d503427de 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -1291,3 +1291,12 @@ def test_unique_axis_float_raises_typeerror(self): arr1d = np.array([np.nan, 0, 0, np.nan]) with pytest.raises(TypeError, match="integer argument expected"): np.unique(arr1d, axis=0.0, equal_nan=False) + + @pytest.mark.parametrize('dt', [np.dtype('F'), np.dtype('D')]) + @pytest.mark.parametrize('values', [[complex(0.0, -1), complex(-0.0, -1), 0], + [-200, complex(-200, -0.0), -1], + [-25, 3, -5j, complex(-25, -0.0), 3j]]) + def test_unique_complex_signed_zeros(self, dt, values): + z = np.array(values, dtype=dt) + u = np.unique(z) + assert len(u) == len(values) - 1 From 15063ae71f5608529dbedd261279408903be5434 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 7 Nov 2025 14:41:06 -0500 Subject: [PATCH 0825/1018] BUG: Fix check of PyMem_Calloc return value. (#30176) --- numpy/_core/src/umath/wrapping_array_method.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index 9b3970561f3f..b340e72bdbfd 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -114,7 +114,7 @@ get_wrapping_auxdata(void) } else { res = PyMem_Calloc(1, sizeof(wrapping_auxdata)); - if (res < 0) { + if (res == NULL) { PyErr_NoMemory(); return NULL; } From f39abd4ffbca44f01eec353115ffd2d7ad296fb2 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 7 Nov 2025 14:50:59 -0500 Subject: [PATCH 0826/1018] ENH: Updates for the `spin bench` command. (#30175) * MAINT: spin: Use sys.executable to run a Python script in the bench function. * ENH: spin: Expose --factor as an option for 'bench' --- .spin/cmds.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 453faa77d932..ea62717e4f78 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -392,6 +392,11 @@ def lint(ctx, fix): '--quick', '-q', is_flag=True, default=False, help="Run each benchmark only once (timings won't be accurate)" ) +@click.option( + '--factor', '-f', default=1.05, + help="The factor above or below which a benchmark result is " + "considered reportable. This is passed on to the asv command." +) @click.argument( 'commits', metavar='', required=False, @@ -399,7 +404,7 @@ def lint(ctx, fix): ) @meson.build_dir_option @click.pass_context -def bench(ctx, tests, compare, verbose, quick, commits, build_dir): +def bench(ctx, tests, compare, verbose, quick, factor, commits, build_dir): """🏋 Run benchmarks. \b @@ -454,7 +459,7 @@ def bench(ctx, tests, compare, verbose, quick, commits, build_dir): meson._set_pythonpath(build_dir) p = spin.util.run( - ['python', '-c', 'import numpy as np; print(np.__version__)'], + [sys.executable, '-c', 'import numpy as np; print(np.__version__)'], cwd='benchmarks', echo=False, output=False @@ -482,7 +487,7 @@ def bench(ctx, tests, compare, verbose, quick, commits, build_dir): ) cmd_compare = [ - 'asv', 'continuous', '--factor', '1.05', + 'asv', 'continuous', '--factor', str(factor), ] + bench_args + [commit_a, commit_b] _run_asv(cmd_compare) From 884aec9750f0e42d563792678b2454ea86218f69 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sun, 9 Nov 2025 14:12:49 +0100 Subject: [PATCH 0827/1018] BUG: Allow np.percentile to operate on float16 data (#29105) * BUG: Allow np.percentile to operate on float16 data * add an extra regression test * add an extra regression test * remove unused default value * add release note * review comments: part1 * review comments: part 2 * review comments: part 3 --- doc/release/upcoming_changes/29105.change.rst | 1 + numpy/lib/_function_base_impl.py | 25 +++---- numpy/lib/tests/test_function_base.py | 66 ++++++++++++++++--- 3 files changed, 72 insertions(+), 20 deletions(-) create mode 100644 doc/release/upcoming_changes/29105.change.rst diff --git a/doc/release/upcoming_changes/29105.change.rst b/doc/release/upcoming_changes/29105.change.rst new file mode 100644 index 000000000000..b5d4a9838f30 --- /dev/null +++ b/doc/release/upcoming_changes/29105.change.rst @@ -0,0 +1 @@ +* The accuracy of ``np.quantile`` and ``np.percentile`` for 16- and 32-bit floating point input data has been improved. \ No newline at end of file diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 44db48f7a00c..58ce2f63dfa2 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4219,9 +4219,7 @@ def percentile(a, if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - # Use dtype of array if possible (e.g., if q is a python int or float) - # by making the divisor have the dtype of the data array. - q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...) + q = np.true_divide(q, 100, out=...) if not _quantile_is_valid(q): raise ValueError("Percentiles must be in the range [0, 100]") @@ -4469,11 +4467,7 @@ def quantile(a, if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - # Use dtype of array if possible (e.g., if q is a python int or float). - if isinstance(q, (int, float)) and a.dtype.kind == "f": - q = np.asanyarray(q, dtype=a.dtype) - else: - q = np.asanyarray(q) + q = np.asanyarray(q) if not _quantile_is_valid(q): raise ValueError("Quantiles must be in the range [0, 1]") @@ -4549,7 +4543,7 @@ def _compute_virtual_index(n, quantiles, alpha: float, beta: float): ) - 1 -def _get_gamma(virtual_indexes, previous_indexes, method): +def _get_gamma(virtual_indexes, previous_indexes, method, dtype): """ Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation of quantiles. @@ -4570,7 +4564,7 @@ def _get_gamma(virtual_indexes, previous_indexes, method): gamma = method["fix_gamma"](gamma, virtual_indexes) # Ensure both that we have an array, and that we keep the dtype # (which may have been matched to the input array). - return np.asanyarray(gamma, dtype=virtual_indexes.dtype) + return np.asanyarray(gamma, dtype=dtype) def _lerp(a, b, t, out=None): @@ -4788,7 +4782,16 @@ def _quantile( previous = arr[previous_indexes] next = arr[next_indexes] # --- Linear interpolation - gamma = _get_gamma(virtual_indexes, previous_indexes, method_props) + if arr.dtype.kind in "iu": + gtype = None + elif arr.dtype.kind == "f": + # make sure the return value matches the input array type + gtype = arr.dtype + else: + gtype = virtual_indexes.dtype + + gamma = _get_gamma(virtual_indexes, previous_indexes, + method_props, gtype) result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) gamma = gamma.reshape(result_shape) result = _lerp(previous, diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 57228e363e71..1aaca3bae1d6 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -3277,6 +3277,16 @@ def test_period(self): assert_almost_equal(np.interp(x, xp, fp, period=360), y) +quantile_methods = [ + 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', + 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', + 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', + 'midpoint'] + + +methods_supporting_weights = ["inverted_cdf"] + + class TestPercentile: def test_basic(self): @@ -3870,15 +3880,38 @@ def test_nat_basic(self, dtype, pos): res = np.percentile(a, 30, axis=0) assert_array_equal(np.isnat(res), [False, True, False]) - -quantile_methods = [ - 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', - 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', - 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', - 'midpoint'] - - -methods_supporting_weights = ["inverted_cdf"] + @pytest.mark.parametrize("qtype", [np.float16, np.float32]) + @pytest.mark.parametrize("method", quantile_methods) + def test_percentile_gh_29003(self, qtype, method): + # test that with float16 or float32 input we do not get overflow + zero = qtype(0) + one = qtype(1) + a = np.zeros(65521, qtype) + a[:20_000] = one + z = np.percentile(a, 50, method=method) + assert z == zero + assert z.dtype == a.dtype + z = np.percentile(a, 99, method=method) + assert z == one + assert z.dtype == a.dtype + + def test_percentile_gh_29003_Fraction(self): + zero = Fraction(0) + one = Fraction(1) + a = np.array([zero] * 65521) + a[:20_000] = one + z = np.percentile(a, 50) + assert z == zero + z = np.percentile(a, Fraction(50)) + assert z == zero + assert np.array(z).dtype == a.dtype + + z = np.percentile(a, 99) + assert z == one + # test that with only Fraction input the return type is a Fraction + z = np.percentile(a, Fraction(99)) + assert z == one + assert np.array(z).dtype == a.dtype class TestQuantile: @@ -4244,6 +4277,21 @@ def test_closest_observation(self): assert_equal(4, np.quantile(arr[0:9], q, method=m)) assert_equal(5, np.quantile(arr, q, method=m)) + def test_quantile_gh_29003_Fraction(self): + r = np.quantile([1, 2], q=Fraction(1)) + assert r == Fraction(2) + assert isinstance(r, Fraction) + + r = np.quantile([1, 2], q=Fraction(.5)) + assert r == Fraction(3, 2) + assert isinstance(r, Fraction) + + def test_float16_gh_29003(self): + a = np.arange(50_001, dtype=np.float16) + q = .999 + value = np.quantile(a, q) + assert value == q * 50_000 + assert value.dtype == np.float16 class TestLerp: @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, From b2bde8bc660463e0d7caa9276d4c49739ce615ab Mon Sep 17 00:00:00 2001 From: star1327p Date: Sun, 9 Nov 2025 12:19:12 -0800 Subject: [PATCH 0828/1018] DOC: Corrected grammatical issues in code comments --- doc/source/dev/depending_on_numpy.rst | 2 +- numpy/_core/src/multiarray/array_converter.c | 2 +- numpy/_core/src/multiarray/array_method.h | 2 +- numpy/_core/src/multiarray/compiled_base.c | 2 +- numpy/_core/src/multiarray/nditer_pywrap.c | 2 +- numpy/_core/src/multiarray/textreading/rows.c | 2 +- numpy/_core/tests/test_dtype.py | 2 +- numpy/distutils/pathccompiler.py | 2 +- numpy/lib/_function_base_impl.py | 2 +- numpy/lib/mixins.py | 2 +- numpy/lib/tests/test_histograms.py | 2 +- numpy/polynomial/hermite.py | 6 +++--- numpy/polynomial/hermite_e.py | 6 +++--- numpy/random/_pcg64.pyx | 4 ++-- numpy/random/_philox.pyx | 2 +- 15 files changed, 20 insertions(+), 20 deletions(-) diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index de809462654b..7583dc9af84a 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -92,7 +92,7 @@ If a package either uses the NumPy C-API directly or it uses some other tool that depends on it like Cython or Pythran, NumPy is a *build-time* dependency of the package. -By default, NumPy exposes a API that is backward compatible with the earliest +By default, NumPy exposes an API that is backward compatible with the earliest NumPy version that supports the oldest Python version currently supported by NumPy. For example, NumPy 1.25.0 supports Python 3.9 and above; and the earliest NumPy version to support Python 3.9 was 1.19. Therefore we guarantee diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index 10dc83ac657f..578e7b1554f4 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -97,7 +97,7 @@ array_converter_new( Py_INCREF(item->DType); /* - * Check whether we were passed a an int/float/complex Python scalar. + * Check whether we were passed an int/float/complex Python scalar. * If not, set `descr` and clear pyscalar/scalar flags as needed. */ if (item->scalar_input && npy_mark_tmp_array_if_pyscalar( diff --git a/numpy/_core/src/multiarray/array_method.h b/numpy/_core/src/multiarray/array_method.h index bcf270899f13..303425e38274 100644 --- a/numpy/_core/src/multiarray/array_method.h +++ b/numpy/_core/src/multiarray/array_method.h @@ -69,7 +69,7 @@ typedef struct PyArrayMethodObject_tag { /* - * We will sometimes have to create a ArrayMethod and allow passing it around, + * We will sometimes have to create an ArrayMethod and allow passing it around, * similar to `instance.method` returning a bound method, e.g. a function like * `ufunc.resolve()` can return a bound object. * The current main purpose of the BoundArrayMethod is that it holds on to the diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 179c8e404322..e6a45554555f 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -450,7 +450,7 @@ _linear_search(const npy_double key, const npy_double *arr, const npy_intp len, /** @brief find index of a sorted array such that arr[i] <= key < arr[i + 1]. * - * If an starting index guess is in-range, the array values around this + * If a starting index guess is in-range, the array values around this * index are first checked. This allows for repeated calls for well-ordered * keys (a very common case) to use the previous index as a very good guess. * diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index b68f7ad9708d..a0e1b09a584c 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -916,7 +916,7 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self), } if (!PyTuple_Check(item) && !PyList_Check(item)) { PyErr_SetString(PyExc_ValueError, - "Each item in axes must be a an integer tuple"); + "Each item in axes must be an integer tuple"); Py_DECREF(item); return NULL; } diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index e5a294ecb01d..2fa1fb948553 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -156,7 +156,7 @@ create_conv_funcs( * returned array can differ for strings. * @param homogeneous Whether the datatype of the array is not homogeneous, * i.e. not structured. In this case the number of columns has to be - * discovered an the returned array will be 2-dimensional rather than + * discovered and the returned array will be 2-dimensional rather than * 1-dimensional. * * @returns Returns the result as an array object or NULL on error. The result diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 611d5857fb15..98403826f562 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -209,7 +209,7 @@ def test_field_order_equality(self): 'formats': ['i4', 'f4'], 'offsets': [4, 0]}) assert_equal(x == y, False) - # This is an safe cast (not equiv) due to the different names: + # This is a safe cast (not equiv) due to the different names: assert np.can_cast(x, y, casting="safe") @pytest.mark.parametrize( diff --git a/numpy/distutils/pathccompiler.py b/numpy/distutils/pathccompiler.py index 48051810ee21..1f879edf4d21 100644 --- a/numpy/distutils/pathccompiler.py +++ b/numpy/distutils/pathccompiler.py @@ -3,7 +3,7 @@ class PathScaleCCompiler(UnixCCompiler): """ - PathScale compiler compatible with an gcc built Python. + PathScale compiler compatible with a gcc built Python. """ compiler_type = 'pathcc' diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 58ce2f63dfa2..f37f69bcb5dd 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4731,7 +4731,7 @@ def _quantile( if weights is None: # --- Computation of indexes # Index where to find the value in the sorted array. - # Virtual because it is a floating point value, not an valid index. + # Virtual because it is a floating point value, not a valid index. # The nearest neighbours are used for interpolation try: method_props = _QuantileMethods[method] diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py index 9a531c6b9022..cd02bf7f4a50 100644 --- a/numpy/lib/mixins.py +++ b/numpy/lib/mixins.py @@ -70,7 +70,7 @@ class NDArrayOperatorsMixin: but that should support arithmetic and numpy universal functions like arrays as described in :external+neps:doc:`nep-0013-ufunc-overrides`. - As an trivial example, consider this implementation of an ``ArrayLike`` + As a trivial example, consider this implementation of an ``ArrayLike`` class that simply wraps a NumPy array and ensures that the result of any arithmetic operation is also an ``ArrayLike`` object: diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index fec8828d242e..cae11cfdcd65 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -136,7 +136,7 @@ def test_bool_conversion(self): a = np.array([1, 1, 0], dtype=np.uint8) int_hist, int_edges = np.histogram(a) - # Should raise an warning on booleans + # Should raise a warning on booleans # Ensure that the histograms are equivalent, need to suppress # the warnings to get the actual outputs with pytest.warns(RuntimeWarning, match='Converting input from .*'): diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index fc2ea9e4194a..c6007d19df7f 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -796,7 +796,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): def hermval(x, c, tensor=True): """ - Evaluate an Hermite series at points x. + Evaluate a Hermite series at points x. If `c` is of length ``n + 1``, this function returns the value: @@ -1439,7 +1439,7 @@ def hermcompanion(c): """Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an Hermite basis polynomial. This provides + symmetric when `c` is a Hermite basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. @@ -1693,7 +1693,7 @@ def hermweight(x): # class Hermite(ABCPolyBase): - """An Hermite series class. + """A Hermite series class. The Hermite class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 6225b9dd6b75..f5d82aa543b9 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -794,7 +794,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): def hermeval(x, c, tensor=True): """ - Evaluate an HermiteE series at points x. + Evaluate a HermiteE series at points x. If `c` is of length ``n + 1``, this function returns the value: @@ -1365,7 +1365,7 @@ def hermecompanion(c): Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an HermiteE basis polynomial. This provides + symmetric when `c` is a HermiteE basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. @@ -1595,7 +1595,7 @@ def hermeweight(x): # class HermiteE(ABCPolyBase): - """An HermiteE series class. + """A HermiteE series class. The HermiteE class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index fd2dd40db565..30a00a11aa1d 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -264,7 +264,7 @@ cdef class PCG64(BitGenerator): * The random values are simulated using a rejection-based method and so, on average, more than one value from the underlying - RNG is required to generate an single draw. + RNG is required to generate a single draw. * The number of bits required to generate a simulated value differs from the number of bits generated by the underlying RNG. For example, two 16-bit integer values can be simulated @@ -498,7 +498,7 @@ cdef class PCG64DXSM(BitGenerator): * The random values are simulated using a rejection-based method and so, on average, more than one value from the underlying - RNG is required to generate an single draw. + RNG is required to generate a single draw. * The number of bits required to generate a simulated value differs from the number of bits generated by the underlying RNG. For example, two 16-bit integer values can be simulated diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index 75fc4081ee62..da47ad21e2de 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -315,7 +315,7 @@ cdef class Philox(BitGenerator): * The random values are simulated using a rejection-based method and so, on average, more than one value from the underlying - RNG is required to generate an single draw. + RNG is required to generate a single draw. * The number of bits required to generate a simulated value differs from the number of bits generated by the underlying RNG. For example, two 16-bit integer values can be simulated From 933fd548a2ba872b732be4682f9fa6962996071b Mon Sep 17 00:00:00 2001 From: Aniket <148300120+Aniketsy@users.noreply.github.com> Date: Mon, 10 Nov 2025 19:55:08 +0530 Subject: [PATCH 0829/1018] DOC: Add concrete Meson build example for NumPy C ufunc extension (#30008) Co-authored-by: Marten Henric van Kerkwijk Co-authored-by: Ralf Gommers --- doc/source/user/c-info.ufunc-tutorial.rst | 420 +++++++++------------- 1 file changed, 170 insertions(+), 250 deletions(-) diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 8d8b267a6bd1..09daa95b7875 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -74,9 +74,9 @@ For comparison and general edification of the reader we provide a simple implementation of a C extension of ``logit`` that uses no numpy. -To do this we need two files. The first is the C file which contains -the actual code, and the second is the ``setup.py`` file used to create -the module. +To do this we need three files. The first is the C file which contains +the actual code, and the others are two project files that describe +how to create the module. .. code-block:: c @@ -157,65 +157,91 @@ the module. return m; } -To use the ``setup.py`` file, place ``setup.py`` and ``spammodule.c`` -in the same folder. Then ``python setup.py build`` will build the module to -import, or ``python setup.py install`` will install the module to your -site-packages directory. +To create the module, one proceeds as one would for a Python package, creating +a ``pyproject.toml`` file, which defines a build back-end, and then another +file for that backend which describes how to compile the code. For the backend, +we recommend ``meson-python``, as we use it for numpy itself, but below we +also show how to use the older ``setuptools``. - .. code-block:: python +.. tab-set:: - ''' - setup.py file for spammodule.c + .. tab-item:: meson - Calling - $python setup.py build_ext --inplace - will build the extension library in the current file. + Sample ``pyproject.toml`` and ``meson.build``. - Calling - $python setup.py build - will build a file that looks like ./build/lib*, where - lib* is a file that begins with lib. The library will - be in this file and end with a C library extension, - such as .so + .. code-block:: toml - Calling - $python setup.py install - will install the module in your site-packages file. + [project] + name = "spam" + version = "0.1" - See the setuptools section 'Building Extension Modules' - at setuptools.pypa.io for more information. - ''' + [build-system] + requires = ["meson-python"] + build-backend = "mesonpy" - from setuptools import setup, Extension - import numpy as np + .. code-block:: meson - module1 = Extension('spam', sources=['spammodule.c']) + project('spam', 'c') - setup(name='spam', version='1.0', ext_modules=[module1]) + py = import('python').find_installation() + sources = files('spammodule.c') -Once the spam module is imported into python, you can call logit + extension_module = py.extension_module( + 'spam', + sources, + install: true, + ) + + .. tab-item:: setuptools + + Sample ``pyproject.toml`` and ``setup.py``. + + .. code-block:: toml + + [project] + name = "spam" + version = "0.1" + + [build-system] + requires = ["setuptools"] + build-backend = "setuptools.build_meta" + + .. code-block:: python + + from setuptools import setup, Extension + + spammodule = Extension('spam', sources=['spammodule.c']) + + setup(name='spam', version='1.0', + ext_modules=[spammodule]) + +With either of the above, one can build and install the ``spam`` package with, + +.. code-block:: bash + + pip install . + +Once the ``spam`` module is imported into python, you can call logit via ``spam.logit``. Note that the function used above cannot be applied as-is to numpy arrays. To do so we must call :py:func:`numpy.vectorize` -on it. For example, if a python interpreter is opened in the file containing -the spam library or spam has been installed, one can perform the -following commands: - ->>> import numpy as np ->>> import spam ->>> spam.logit(0) --inf ->>> spam.logit(1) -inf ->>> spam.logit(0.5) -0.0 ->>> x = np.linspace(0,1,10) ->>> spam.logit(x) -TypeError: only length-1 arrays can be converted to Python scalars ->>> f = np.vectorize(spam.logit) ->>> f(x) -array([ -inf, -2.07944154, -1.25276297, -0.69314718, -0.22314355, - 0.22314355, 0.69314718, 1.25276297, 2.07944154, inf]) +on it. For example:: + + >>> import numpy as np + >>> import spam + >>> spam.logit(0) + -inf + >>> spam.logit(1) + inf + >>> spam.logit(0.5) + 0.0 + >>> x = np.linspace(0,1,10) + >>> spam.logit(x) + TypeError: only length-1 arrays can be converted to Python scalars + >>> f = np.vectorize(spam.logit) + >>> f(x) + array([ -inf, -2.07944154, -1.25276297, -0.69314718, -0.22314355, + 0.22314355, 0.69314718, 1.25276297, 2.07944154, inf]) THE RESULTING LOGIT FUNCTION IS NOT FAST! ``numpy.vectorize`` simply loops over ``spam.logit``. The loop is done at the C level, but the numpy @@ -236,8 +262,7 @@ Example NumPy ufunc for one dtype For simplicity we give a ufunc for a single dtype, the ``'f8'`` ``double``. As in the previous section, we first give the ``.c`` file -and then the ``setup.py`` file used to create the module containing the -ufunc. +and then the files used to create a ``npufunc`` module containing the ufunc. The place in the code corresponding to the actual computations for the ufunc are marked with ``/* BEGIN main ufunc computation */`` and @@ -339,59 +364,77 @@ the primary thing that must be changed to create your own ufunc. return m; } -This is a ``setup.py`` file for the above code. As before, the module -can be build via calling ``python setup.py build`` at the command prompt, -or installed to site-packages via ``python setup.py install``. The module -can also be placed into a local folder e.g. ``npufunc_directory`` below -using ``python setup.py build_ext --inplace``. +For the files needed to create the module, the main difference from our +previous example is that we now need to declare dependencies on numpy. - .. code-block:: python +.. tab-set:: - ''' - setup.py file for single_type_logit.c - Note that since this is a numpy extension - we add an include_dirs=[get_include()] so that the - extension is built with numpy's C/C++ header files. + .. tab-item:: meson - Calling - $python setup.py build_ext --inplace - will build the extension library in the npufunc_directory. + Sample ``pyproject.toml`` and ``meson.build``. - Calling - $python setup.py build - will build a file that looks like ./build/lib*, where - lib* is a file that begins with lib. The library will - be in this file and end with a C library extension, - such as .so + .. code-block:: toml - Calling - $python setup.py install - will install the module in your site-packages file. + [project] + name = "npufunc" + dependencies = ["numpy"] + version = "0.1" - See the setuptools section 'Building Extension Modules' - at setuptools.pypa.io for more information. - ''' + [build-system] + requires = ["meson-python", "numpy"] + build-backend = "mesonpy" - from setuptools import setup, Extension - from numpy import get_include + .. code-block:: meson - npufunc = Extension('npufunc', - sources=['single_type_logit.c'], - include_dirs=[get_include()]) + project('npufunc', 'c') - setup(name='npufunc', version='1.0', ext_modules=[npufunc]) + py = import('python').find_installation() + np_dep = dependency('numpy') + sources = files('single_type_logit.c') -After the above has been installed, it can be imported and used as follows. + extension_module = py.extension_module( + 'npufunc', + sources, + dependencies: [np_dep], + install: true, + ) ->>> import numpy as np ->>> import npufunc ->>> npufunc.logit(0.5) -np.float64(0.0) ->>> a = np.linspace(0,1,5) ->>> npufunc.logit(a) -array([ -inf, -1.09861229, 0. , 1.09861229, inf]) + .. tab-item:: setuptools + + Sample ``pyproject.toml`` and ``setup.py``. + + .. code-block:: toml + + [project] + name = "npufunc" + dependencies = ["numpy"] + version = "0.1" + + [build-system] + requires = ["setuptools", "numpy"] + build-backend = "setuptools.build_meta" + + .. code-block:: python + + from setuptools import setup, Extension + from numpy import get_include + + npufunc = Extension('npufunc', + sources=['single_type_logit.c'], + include_dirs=[get_include()]) + setup(name='npufunc', version='1.0', ext_modules=[npufunc]) + +After the above has been installed, it can be imported and used as follows:: + + >>> import numpy as np + >>> import npufunc + >>> npufunc.logit(0.5) + np.float64(0.0) + >>> a = np.linspace(0, 1, 5) + >>> npufunc.logit(a) + array([ -inf, -1.09861229, 0. , 1.09861229, inf]) .. _`sec:NumPy-many-loop`: @@ -402,10 +445,10 @@ Example NumPy ufunc with multiple dtypes .. index:: pair: ufunc; adding new -We finally give an example of a full ufunc, with inner loops for -half-floats, floats, doubles, and long doubles. As in the previous -sections we first give the ``.c`` file and then the corresponding -``setup.py`` file. +We now extend the above to a full ``logit`` ufunc, with inner loops for +floats, doubles, and long doubles. Here, we can use the same build files +as above, except we need to change the source file from ``single_type_logit.c`` +to ``multi_type_logit.c``. The places in the code corresponding to the actual computations for the ufunc are marked with ``/* BEGIN main ufunc computation */`` and @@ -419,7 +462,6 @@ is the primary thing that must be changed to create your own ufunc. #include #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" - #include "numpy/halffloat.h" #include /* @@ -514,39 +556,13 @@ is the primary thing that must be changed to create your own ufunc. } - static void half_float_logit(char **args, const npy_intp *dimensions, - const npy_intp *steps, void *data) - { - npy_intp i; - npy_intp n = dimensions[0]; - char *in = args[0], *out = args[1]; - npy_intp in_step = steps[0], out_step = steps[1]; - - float tmp; - - for (i = 0; i < n; i++) { - - /* BEGIN main ufunc computation */ - tmp = npy_half_to_float(*(npy_half *)in); - tmp /= 1 - tmp; - tmp = logf(tmp); - *((npy_half *)out) = npy_float_to_half(tmp); - /* END main ufunc computation */ - - in += in_step; - out += out_step; - } - } - /*This gives pointers to the above functions*/ - PyUFuncGenericFunction funcs[4] = {&half_float_logit, - &float_logit, + PyUFuncGenericFunction funcs[3] = {&float_logit, &double_logit, &long_double_logit}; - static const char types[8] = {NPY_HALF, NPY_HALF, - NPY_FLOAT, NPY_FLOAT, + static const char types[6] = {NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_LONGDOUBLE, NPY_LONGDOUBLE}; @@ -586,92 +602,40 @@ is the primary thing that must be changed to create your own ufunc. return m; } -This is a ``setup.py`` file for the above code. As before, the module -can be build via calling ``python setup.py build`` at the command prompt, -or installed to site-packages via ``python setup.py install``. - - .. code-block:: python - - ''' - setup.py file for multi_type_logit.c - Note that since this is a numpy extension - we add an include_dirs=[get_include()] so that the - extension is built with numpy's C/C++ header files. - Furthermore, we also have to include the npymath - lib for half-float d-type. - - Calling - $python setup.py build_ext --inplace - will build the extension library in the current file. - - Calling - $python setup.py build - will build a file that looks like ./build/lib*, where - lib* is a file that begins with lib. The library will - be in this file and end with a C library extension, - such as .so - - Calling - $python setup.py install - will install the module in your site-packages file. - - See the setuptools section 'Building Extension Modules' - at setuptools.pypa.io for more information. - ''' - - from setuptools import setup, Extension - from numpy import get_include - from os import path - - path_to_npymath = path.join(get_include(), '..', 'lib') - npufunc = Extension('npufunc', - sources=['multi_type_logit.c'], - include_dirs=[get_include()], - # Necessary for the half-float d-type. - library_dirs=[path_to_npymath], - libraries=["npymath"]) - - setup(name='npufunc', version='1.0', ext_modules=[npufunc]) - - After the above has been installed, it can be imported and used as follows. >>> import numpy as np >>> import npufunc >>> npufunc.logit(0.5) np.float64(0.0) ->>> a = np.linspace(0,1,5) +>>> a = np.linspace(0, 1, 5, dtype="f4") >>> npufunc.logit(a) -array([ -inf, -1.09861229, 0. , 1.09861229, inf]) +:1: RuntimeWarning: divide by zero encountered in logit +array([ -inf, -1.0986123, 0. , 1.0986123, inf], + dtype=float32) +.. note:: + Supporting ``float16`` (half-precision) in custom ufuncs is more complex + due to its non-standard C representation and conversion requirements. The + above code can process ``float16`` input, but will do so by converting it + to ``float32``. The result will then be ``float32`` too, but one can + convert it back to ``float16`` by passing in a suitable output, as in + ``npufunc.logit(a, out=np.empty_like(a))``. For examples of actual + ``float16`` loops, see the numpy source code. .. _`sec:NumPy-many-arg`: Example NumPy ufunc with multiple arguments/return values ========================================================= -Our final example is a ufunc with multiple arguments. It is a modification -of the code for a logit ufunc for data with a single dtype. We -compute ``(A * B, logit(A * B))``. - -We only give the C code as the setup.py file is exactly the same as -the ``setup.py`` file in `Example NumPy ufunc for one dtype`_, except that -the line - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['single_type_logit.c'], - include_dirs=[get_include()]) +Creating a ufunc with multiple arguments is not difficult. Here, we make a +modification of the code for a logit ufunc, where we compute ``(A * B, +logit(A * B))``. For simplicity, we only create a loop for doubles. -is replaced with - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['multi_arg_logit.c'], - include_dirs=[get_include()]) +We again only give the C code as the files needed to create the module are the +same as before, but with the source file name replaced by +``multi_arg_logit.c``. The C file is given below. The ufunc generated takes two arguments ``A`` and ``B``. It returns a tuple whose first element is ``A * B`` and whose second @@ -684,7 +648,6 @@ as well as all other properties of a ufunc. #include #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" - #include "numpy/halffloat.h" #include /* @@ -786,29 +749,12 @@ Example NumPy ufunc with structured array dtype arguments This example shows how to create a ufunc for a structured array dtype. For the example we show a trivial ufunc for adding two arrays with dtype ``'u8,u8,u8'``. The process is a bit different from the other examples since -a call to :c:func:`PyUFunc_FromFuncAndData` doesn't fully register ufuncs for +a call to :c:func:`PyUFunc_FromFuncAndData` cannot register ufuncs for custom dtypes and structured array dtypes. We need to also call :c:func:`PyUFunc_RegisterLoopForDescr` to finish setting up the ufunc. -We only give the C code as the ``setup.py`` file is exactly the same as -the ``setup.py`` file in `Example NumPy ufunc for one dtype`_, except that -the line - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['single_type_logit.c'], - include_dirs=[get_include()]) - -is replaced with - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['add_triplet.c'], - include_dirs=[get_include()]) - -The C file is given below. +We only give the C code as the files needed to construct the module are again +exactly the same as before, except that the source file is now ``add_triplet.c``. .. code-block:: c @@ -867,7 +813,7 @@ The C file is given below. static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, - "struct_ufunc_test", + "npufunc", NULL, -1, StructUfuncTestMethods, @@ -907,7 +853,7 @@ The C file is given below. dtypes[2] = dtype; /* Register ufunc for structured dtype */ - PyUFunc_RegisterLoopForDescr(add_triplet, + PyUFunc_RegisterLoopForDescr((PyUFuncObject *)add_triplet, dtype, &add_uint64_triplet, dtypes, @@ -920,37 +866,11 @@ The C file is given below. return m; } -.. index:: - pair: ufunc; adding new - -The returned ufunc object is a callable Python object. It should be -placed in a (module) dictionary under the same name as was used in the -name argument to the ufunc-creation routine. The following example is -adapted from the umath module - - .. code-block:: c +Sample usage:: - static PyUFuncGenericFunction atan2_functions[] = { - PyUFunc_ff_f, PyUFunc_dd_d, - PyUFunc_gg_g, PyUFunc_OO_O_method}; - static void *atan2_data[] = { - (void *)atan2f, (void *)atan2, - (void *)atan2l, (void *)"arctan2"}; - static const char atan2_signatures[] = { - NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, - NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, - NPY_LONGDOUBLE, NPY_LONGDOUBLE, NPY_LONGDOUBLE - NPY_OBJECT, NPY_OBJECT, NPY_OBJECT}; - ... - /* in the module initialization code */ - PyObject *f, *dict, *module; - ... - dict = PyModule_GetDict(module); - ... - f = PyUFunc_FromFuncAndData(atan2_functions, - atan2_data, atan2_signatures, 4, 2, 1, - PyUFunc_None, "arctan2", - "a safe and correct arctan(x1/x2)", 0); - PyDict_SetItemString(dict, "arctan2", f); - Py_DECREF(f); - ... + >>> import npufunc + >>> import numpy as np + >>> a = np.array([(1, 2, 3), (4, 5, 6)], "u8,u8,u8") + >>> npufunc.add_triplet(a, a) + array([(2, 4, 6), (8, 10, 12)], + dtype=[('f0', ' Date: Mon, 10 Nov 2025 18:30:27 +0100 Subject: [PATCH 0830/1018] MAINT: ``ma.asanyarray``: use ``order=None`` as default --- numpy/ma/core.py | 19 +++++++++---------- numpy/ma/core.pyi | 8 ++++---- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index e56b0cfa573b..a374410209f5 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -8616,7 +8616,7 @@ def asarray(a, dtype=None, order=None): subok=False, order=order) -def asanyarray(a, dtype=None, order='A'): +def asanyarray(a, dtype=None, order=None): """ Convert the input to a masked array, conserving subclasses. @@ -8629,14 +8629,13 @@ def asanyarray(a, dtype=None, order='A'): Input data, in any form that can be converted to an array. dtype : dtype, optional By default, the data-type is inferred from the input data. - order : {'C', 'F', 'A'}, optional - Specify the order of the array. If order is 'C', then the array - will be in C-contiguous order (last-index varies the fastest). - If order is 'F', then the returned array will be in - Fortran-contiguous order (first-index varies the fastest). - If order is 'A' (default), then the returned array may be - in any order (either C-, Fortran-contiguous, or even discontiguous), - unless a copy is required, in which case it will be C-contiguous. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array ``a``. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if ``a`` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'K'. Returns ------- @@ -8670,7 +8669,7 @@ def asanyarray(a, dtype=None, order='A'): isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype) and ( - order == 'A' + order in {None, 'A', 'K'} or order == 'C' and a.flags.carray or order == 'F' and a.flags.f_contiguous ) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index bbcd617e9699..15e6b58762b4 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2453,13 +2453,13 @@ def asarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None # keep in sync with `asarray` (but note the additional first overload) @overload -def asanyarray(a: _MArrayT, dtype: None = None, order: _OrderACF = "A") -> _MArrayT: ... +def asanyarray(a: _MArrayT, dtype: None = None, order: _OrderKACF | None = None) -> _MArrayT: ... @overload -def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderACF = "A") -> _MaskedArray[_ScalarT]: ... +def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... @overload -def asanyarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderACF = "A") -> _MaskedArray[_ScalarT]: ... +def asanyarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... @overload -def asanyarray(a: object, dtype: DTypeLike | None = None, order: _OrderACF = "A") -> _MaskedArray[_ScalarT]: ... +def asanyarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... # def is_masked(x: object) -> bool: ... From c0f3eb9fe699951b1c63c523d6a39843e873d2e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Nov 2025 17:53:01 +0000 Subject: [PATCH 0831/1018] MAINT: Bump int128/hide-comment-action from 1.46.0 to 1.47.0 Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.46.0 to 1.47.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/bddc4e774ea6f0b45b9621c3a689db72b9a3cec5...3580fff2b9b7c0e16466686530622f0eed93132a) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.47.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 95e487cd60b4..9f4cda234717 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@bddc4e774ea6f0b45b9621c3a689db72b9a3cec5 # v1.46.0 + uses: int128/hide-comment-action@3580fff2b9b7c0e16466686530622f0eed93132a # v1.47.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From 81e28edc622637769010fd11bf17ca32a031543f Mon Sep 17 00:00:00 2001 From: Johnnie Gray Date: Tue, 16 Sep 2025 11:47:43 -0700 Subject: [PATCH 0832/1018] merge implementation --- numpy/_core/einsumfunc.py | 541 ++++++++++++++++++++++++-------------- 1 file changed, 344 insertions(+), 197 deletions(-) diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 8e71e6d4b1eb..8a96f1444971 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -2,12 +2,14 @@ Implementation of optimized einsum. """ +import functools import itertools import operator -from numpy._core.multiarray import c_einsum -from numpy._core.numeric import asanyarray, tensordot +from numpy._core.multiarray import c_einsum, matmul +from numpy._core.numeric import asanyarray, reshape from numpy._core.overrides import array_function_dispatch +from numpy._core.umath import multiply __all__ = ['einsum', 'einsum_path'] @@ -440,116 +442,6 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit): return path -def _can_dot(inputs, result, idx_removed): - """ - Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. - - Parameters - ---------- - inputs : list of str - Specifies the subscripts for summation. - result : str - Resulting summation. - idx_removed : set - Indices that are removed in the summation - - - Returns - ------- - type : bool - Returns true if BLAS should and can be used, else False - - Notes - ----- - If the operations is BLAS level 1 or 2 and is not already aligned - we default back to einsum as the memory movement to copy is more - costly than the operation itself. - - - Examples - -------- - - # Standard GEMM operation - >>> _can_dot(['ij', 'jk'], 'ik', set('j')) - True - - # Can use the standard BLAS, but requires odd data movement - >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) - False - - # DDOT where the memory is not aligned - >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) - False - - """ - - # All `dot` calls remove indices - if len(idx_removed) == 0: - return False - - # BLAS can only handle two operands - if len(inputs) != 2: - return False - - input_left, input_right = inputs - - for c in set(input_left + input_right): - # can't deal with repeated indices on same input or more than 2 total - nl, nr = input_left.count(c), input_right.count(c) - if (nl > 1) or (nr > 1) or (nl + nr > 2): - return False - - # can't do implicit summation or dimension collapse e.g. - # "ab,bc->c" (implicitly sum over 'a') - # "ab,ca->ca" (take diagonal of 'a') - if nl + nr - 1 == int(c in result): - return False - - # Build a few temporaries - set_left = set(input_left) - set_right = set(input_right) - keep_left = set_left - idx_removed - keep_right = set_right - idx_removed - rs = len(idx_removed) - - # At this point we are a DOT, GEMV, or GEMM operation - - # Handle inner products - - # DDOT with aligned data - if input_left == input_right: - return True - - # DDOT without aligned data (better to use einsum) - if set_left == set_right: - return False - - # Handle the 4 possible (aligned) GEMV or GEMM cases - - # GEMM or GEMV no transpose - if input_left[-rs:] == input_right[:rs]: - return True - - # GEMM or GEMV transpose both - if input_left[:rs] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose right - if input_left[-rs:] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose left - if input_left[:rs] == input_right[:rs]: - return True - - # Einsum is faster than GEMV if we have to copy data - if not keep_left or not keep_right: - return False - - # We are a matrix-matrix product, but we need to copy data - return True - - def _parse_einsum_input(operands): """ A reproduction of einsum c side einsum parsing in python. @@ -887,13 +779,14 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): # Build a few useful list and sets input_list = input_subscripts.split(',') + num_inputs = len(input_list) input_sets = [set(x) for x in input_list] output_set = set(output_subscript) indices = set(input_subscripts.replace(',', '')) + num_indices = len(indices) # Get length of each unique dimension and ensure all dimensions are correct dimension_dict = {} - broadcast_indices = [[] for x in range(len(input_list))] for tnum, term in enumerate(input_list): sh = operands[tnum].shape if len(sh) != len(term): @@ -903,10 +796,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): for cnum, char in enumerate(term): dim = sh[cnum] - # Build out broadcast indices - if dim == 1: - broadcast_indices[tnum].append(char) - if char in dimension_dict.keys(): # For broadcasting cases we always want the largest dim size if dimension_dict[char] == 1: @@ -918,9 +807,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): else: dimension_dict[char] = dim - # Convert broadcast inds to sets - broadcast_indices = [set(x) for x in broadcast_indices] - # Compute size of each input array plus the output array size_list = [_compute_size_by_dict(term, dimension_dict) for term in input_list + [output_subscript]] @@ -931,23 +817,16 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): else: memory_arg = memory_limit - # Compute naive cost - # This isn't quite right, need to look into exactly how einsum does this - inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 - naive_cost = _flop_count( - indices, inner_product, len(input_list), dimension_dict - ) - # Compute the path if explicit_einsum_path: path = path_type[1:] elif ( (path_type is False) - or (len(input_list) in [1, 2]) + or (num_inputs in [1, 2]) or (indices == output_set) ): # Nothing to be optimized, leave it to einsum - path = [tuple(range(len(input_list)))] + path = [tuple(range(num_inputs))] elif path_type == "greedy": path = _greedy_path( input_sets, output_set, dimension_dict, memory_arg @@ -969,26 +848,18 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): contract = _find_contraction(contract_inds, input_sets, output_set) out_inds, input_sets, idx_removed, idx_contract = contract - cost = _flop_count( - idx_contract, idx_removed, len(contract_inds), dimension_dict - ) - cost_list.append(cost) - scale_list.append(len(idx_contract)) - size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) + if not einsum_call_arg: + # these are only needed for printing info + cost = _flop_count( + idx_contract, idx_removed, len(contract_inds), dimension_dict + ) + cost_list.append(cost) + scale_list.append(len(idx_contract)) + size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) - bcast = set() tmp_inputs = [] for x in contract_inds: tmp_inputs.append(input_list.pop(x)) - bcast |= broadcast_indices.pop(x) - - new_bcast_inds = bcast - idx_removed - - # If we're broadcasting, nix blas - if not len(idx_removed & bcast): - do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) - else: - do_blas = False # Last contraction if (cnum - len(path)) == -1: @@ -998,16 +869,11 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): idx_result = "".join([x[1] for x in sorted(sort_result)]) input_list.append(idx_result) - broadcast_indices.append(new_bcast_inds) einsum_str = ",".join(tmp_inputs) + "->" + idx_result - contraction = ( - contract_inds, idx_removed, einsum_str, input_list[:], do_blas - ) + contraction = (contract_inds, einsum_str, input_list[:]) contraction_list.append(contraction) - opt_cost = sum(cost_list) + 1 - if len(input_list) != 1: # Explicit "einsum_path" is usually trusted, but we detect this kind of # mistake in order to prevent from returning an intermediate value. @@ -1022,11 +888,21 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): overall_contraction = input_subscripts + "->" + output_subscript header = ("scaling", "current", "remaining") + # Compute naive cost + # This isn't quite right, need to look into exactly how einsum does this + inner_product = ( + sum(len(set(x)) for x in input_subscripts.split(',')) - num_indices + ) > 0 + naive_cost = _flop_count( + indices, inner_product, num_inputs, dimension_dict + ) + + opt_cost = sum(cost_list) + 1 speedup = naive_cost / opt_cost max_i = max(size_list) path_print = f" Complete contraction: {overall_contraction}\n" - path_print += f" Naive scaling: {len(indices)}\n" + path_print += f" Naive scaling: {num_indices}\n" path_print += " Optimized scaling: %d\n" % max(scale_list) path_print += f" Naive FLOP count: {naive_cost:.3e}\n" path_print += f" Optimized FLOP count: {opt_cost:.3e}\n" @@ -1037,7 +913,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): path_print += "-" * 74 for n, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction + _, einsum_str, remaining = contraction remaining_str = ",".join(remaining) + "->" + output_subscript path_run = (scale_list[n], einsum_str, remaining_str) path_print += "\n%4d %24s %40s" % path_run @@ -1046,6 +922,312 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): return (path, path_print) +def _parse_eq_to_pure_multiplication(a_term, shape_a, b_term, shape_b, out): + """If there are no contracted indices, then we can directly transpose and + insert singleton dimensions into ``a`` and ``b`` such that (broadcast) + elementwise multiplication performs the einsum. + + No need to cache this as it is within the cached + ``_parse_eq_to_batch_matmul``. + + """ + desired_a = "" + desired_b = "" + new_shape_a = [] + new_shape_b = [] + for ix in out: + if ix in a_term: + desired_a += ix + new_shape_a.append(shape_a[a_term.index(ix)]) + else: + new_shape_a.append(1) + if ix in b_term: + desired_b += ix + new_shape_b.append(shape_b[b_term.index(ix)]) + else: + new_shape_b.append(1) + + if desired_a != a_term: + eq_a = f"{a_term}->{desired_a}" + else: + eq_a = None + if desired_b != b_term: + eq_b = f"{b_term}->{desired_b}" + else: + eq_b = None + + return ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + None, # new_shape_ab, not needed since not fusing + None, # perm_ab, not needed as we transpose a and b first + True, # pure_multiplication=True + ) + + +@functools.lru_cache(2**12) +def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): + """Cached parsing of a two term einsum equation into the necessary + sequence of arguments for contracttion via batched matrix multiplication. + The steps we need to specify are: + + 1. Remove repeated and trivial indices from the left and right terms, + and transpose them, done as a single einsum. + 2. Fuse the remaining indices so we have two 3D tensors. + 3. Perform the batched matrix multiplication. + 4. Unfuse the output to get the desired final index order. + + """ + lhs, out = eq.split("->") + a_term, b_term = lhs.split(",") + + if len(a_term) != len(shape_a): + raise ValueError(f"Term '{a_term}' does not match shape {shape_a}.") + if len(b_term) != len(shape_b): + raise ValueError(f"Term '{b_term}' does not match shape {shape_b}.") + + bat_inds = [] # appears on A, B, O + con_inds = [] # appears on A, B, . + a_keep = [] # appears on A, ., O + b_keep = [] # appears on ., B, O + sizes = {} + singletons = set() + + # parse left term + seen = set() + for ix, d in zip(a_term, shape_a): + if d == 1: + # everything (including broadcasting) works nicely if simply ignore + # such dimensions, but we do need to track if they appear in output + # and thus should be reintroduced later + singletons.add(ix) + continue + + # set or check size + if sizes.setdefault(ix, d) != d: + raise ValueError( + f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." + ) + + if ix in seen: + continue + seen.add(ix) + + if ix in b_term: + if ix in out: + bat_inds.append(ix) + else: + con_inds.append(ix) + elif ix in out: + a_keep.append(ix) + + # parse right term + seen.clear() + for ix, d in zip(b_term, shape_b): + if d == 1: + singletons.add(ix) + continue + # broadcast indices don't appear as singletons in output + singletons.discard(ix) + + # set or check size + if sizes.setdefault(ix, d) != d: + raise ValueError( + f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." + ) + + if ix in seen: + continue + seen.add(ix) + + if ix not in a_term: + if ix in out: + b_keep.append(ix) + + if not con_inds: + # contraction is pure multiplication, prepare inputs differently + return _parse_eq_to_pure_multiplication( + a_term, shape_a, b_term, shape_b, out + ) + + # only need the size one indices that appear in the output + singletons = [ix for ix in out if ix in singletons] + + # take diagonal, remove any trivial axes and transpose left + desired_a = "".join((*bat_inds, *a_keep, *con_inds)) + if a_term != desired_a: + eq_a = f"{a_term}->{desired_a}" + else: + eq_a = None + + # take diagonal, remove any trivial axes and transpose right + desired_b = "".join((*bat_inds, *con_inds, *b_keep)) + if b_term != desired_b: + eq_b = f"{b_term}->{desired_b}" + else: + eq_b = None + + # then we want to reshape + if bat_inds: + lgroups = (bat_inds, a_keep, con_inds) + rgroups = (bat_inds, con_inds, b_keep) + ogroups = (bat_inds, a_keep, b_keep) + else: + # avoid size 1 batch dimension if no batch indices + lgroups = (a_keep, con_inds) + rgroups = (con_inds, b_keep) + ogroups = (a_keep, b_keep) + + if any(len(group) != 1 for group in lgroups): + # need to fuse 'kept' and contracted indices + # (though could allow batch indices to be broadcast) + new_shape_a = tuple( + functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1) + for ix_group in lgroups + ) + else: + new_shape_a = None + + if any(len(group) != 1 for group in rgroups): + # need to fuse 'kept' and contracted indices + # (though could allow batch indices to be broadcast) + new_shape_b = tuple( + functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1) + for ix_group in rgroups + ) + else: + new_shape_b = None + + if any(len(group) != 1 for group in ogroups) or singletons: + new_shape_ab = (1,) * len(singletons) + tuple( + sizes[ix] for ix_group in ogroups for ix in ix_group + ) + else: + new_shape_ab = None + + # then we want to permute the matmul produced output: + out_produced = "".join((*singletons, *bat_inds, *a_keep, *b_keep)) + perm_ab = tuple(out_produced.index(ix) for ix in out) + if perm_ab == tuple(range(len(perm_ab))): + perm_ab = None + + return ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + new_shape_ab, + perm_ab, + False, # pure_multiplication=False + ) + + +@functools.lru_cache(maxsize=64) +def _parse_output_order(order, a_is_fcontig, b_is_fcontig): + order = order.upper() + if order == "K": + return None + elif order in "CF": + return order + elif order == "A": + if a_is_fcontig and b_is_fcontig: + return "F" + else: + return "C" + else: + raise ValueError( + "ValueError: order must be one of " + f"'C', 'F', 'A', or 'K' (got '{order}')" + ) + + +def bmm_einsum(eq, a, b, out=None, **kwargs): + """Perform arbitrary pairwise einsums using only ``matmul``, or + ``multiply`` if no contracted indices are involved (plus maybe single term + ``einsum`` to prepare the terms individually). The logic for each is cached + based on the equation and array shape, and each step is only performed if + necessary. + + Parameters + ---------- + eq : str + The einsum equation. + a : array_like + The first array to contract. + b : array_like + The second array to contract. + + Returns + ------- + array_like + """ + ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + new_shape_ab, + perm_ab, + pure_multiplication, + ) = _parse_eq_to_batch_matmul(eq, a.shape, b.shape) + + # n.b. one could special case various cases to call c_einsum directly here + + # need to handle `order` a little manually, since we do transpose + # operations before and potentially after the ufunc calls + output_order = _parse_output_order( + kwargs.pop("order", "K"), a.flags.f_contiguous, b.flags.f_contiguous + ) + + # prepare left + if eq_a is not None: + # diagonals, sums, and tranpose + a = c_einsum(eq_a, a) + if new_shape_a is not None: + a = reshape(a, new_shape_a) + + # prepare right + if eq_b is not None: + # diagonals, sums, and tranpose + b = c_einsum(eq_b, b) + if new_shape_b is not None: + b = reshape(b, new_shape_b) + + if pure_multiplication: + # no contracted indices + if output_order is not None: + kwargs["order"] = output_order + + # do the 'contraction' via multiplication! + return multiply(a, b, out=out, **kwargs) + + # can only supply out here if no other reshaping / transposing + matmul_out_compatible = (new_shape_ab is None) and (perm_ab is None) + if matmul_out_compatible: + kwargs["out"] = out + + # do the contraction! + ab = matmul(a, b, **kwargs) + + # prepare the output + if new_shape_ab is not None: + ab = reshape(ab, new_shape_ab) + if perm_ab is not None: + ab = ab.transpose(perm_ab) + + if (out is not None) and (not matmul_out_compatible): + # handle case where out is specified, but we also needed + # to reshape / transpose ``ab`` after the matmul + out[:] = ab + ab = out + elif output_order is not None: + ab = asanyarray(ab, order=output_order) + + return ab + + def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs): # Arguably we dispatch on more arguments than we really should; see note in # _einsum_path_dispatcher for why. @@ -1434,58 +1616,23 @@ def einsum(*operands, out=None, optimize=False, **kwargs): operands, contraction_list = einsum_path(*operands, optimize=optimize, einsum_call=True) - # Handle order kwarg for output array, c_einsum allows mixed case - output_order = kwargs.pop('order', 'K') - if output_order.upper() == 'A': - if all(arr.flags.f_contiguous for arr in operands): - output_order = 'F' - else: - output_order = 'C' - # Start contraction loop for num, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction + inds, einsum_str, _ = contraction tmp_operands = [operands.pop(x) for x in inds] # Do we need to deal with the output? handle_out = specified_out and ((num + 1) == len(contraction_list)) - # Call tensordot if still possible - if blas: - # Checks have already been handled - input_str, results_index = einsum_str.split('->') - input_left, input_right = input_str.split(',') - - tensor_result = input_left + input_right - for s in idx_rm: - tensor_result = tensor_result.replace(s, "") - - # Find indices to contract over - left_pos, right_pos = [], [] - for s in sorted(idx_rm): - left_pos.append(input_left.find(s)) - right_pos.append(input_right.find(s)) - - # Contract! - new_view = tensordot( - *tmp_operands, axes=(tuple(left_pos), tuple(right_pos)) - ) - - # Build a new view if needed - if (tensor_result != results_index) or handle_out: - if handle_out: - kwargs["out"] = out - new_view = c_einsum( - tensor_result + '->' + results_index, new_view, **kwargs - ) + # If out was specified + if handle_out: + kwargs["out"] = out - # Call einsum + if len(tmp_operands) == 2: + # Call (batched) matrix multiplication if possible + new_view = bmm_einsum(einsum_str, *tmp_operands, **kwargs) else: - # If out was specified - if handle_out: - kwargs["out"] = out - - # Do the contraction + # Call einsum new_view = c_einsum(einsum_str, *tmp_operands, **kwargs) # Append new items and dereference what we can @@ -1495,4 +1642,4 @@ def einsum(*operands, out=None, optimize=False, **kwargs): if specified_out: return out else: - return asanyarray(operands[0], order=output_order) + return operands[0] From d2c1ccfcda6218f3a35263316377ceb73e7fa6e4 Mon Sep 17 00:00:00 2001 From: Johnnie Gray Date: Wed, 5 Nov 2025 14:25:01 -0800 Subject: [PATCH 0833/1018] add note and tweak parsing --- numpy/_core/einsumfunc.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 8a96f1444971..dd5c65bc8307 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -1107,10 +1107,11 @@ def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): else: new_shape_ab = None - # then we want to permute the matmul produced output: + # then we might need to permute the matmul produced output: out_produced = "".join((*singletons, *bat_inds, *a_keep, *b_keep)) - perm_ab = tuple(out_produced.index(ix) for ix in out) - if perm_ab == tuple(range(len(perm_ab))): + if out_produced != out: + perm_ab = tuple(out_produced.index(ix) for ix in out) + else: perm_ab = None return ( @@ -1162,6 +1163,11 @@ def bmm_einsum(eq, a, b, out=None, **kwargs): Returns ------- array_like + + Notes + ----- + A fuller description of this algorithm, and original source for this + implementation, can be found at https://github.com/jcmgray/einsum_bmm. """ ( eq_a, From 5b67bc86871c37d172a49c41d4a1fee04d7fdb81 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 11 Nov 2025 01:10:12 +0100 Subject: [PATCH 0834/1018] BUG: ``[u]longlong.bit_count`` runtime signature --- numpy/_core/_add_newdocs_scalars.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index 7305f232510c..3f9eca5e47f3 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -358,7 +358,8 @@ def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None """)) for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', - 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'): + 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64', + 'longlong', 'ulonglong'): # Add negative examples for signed cases by checking typecode add_newdoc('numpy._core.numerictypes', int_name, ('bit_count', f""" From db6bd44f1f8ec6f0322516d4f78865503b06e72c Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 11 Nov 2025 01:32:39 +0100 Subject: [PATCH 0835/1018] BUG: ``np.generic`` method runtime signatures --- numpy/_core/_add_newdocs.py | 992 ++++++++--------------- numpy/_core/tests/test_scalar_methods.py | 112 ++- 2 files changed, 388 insertions(+), 716 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 78c660f423f7..5869d645cdf2 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -9,6 +9,8 @@ """ +import textwrap + from numpy._core.function_base import add_newdoc from numpy._core.overrides import get_array_function_like_doc # noqa: F401 @@ -3140,18 +3142,6 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_namespace__', - """ - __array_namespace__($self, /, *, api_version=None) - -- - - a.__array_namespace__(*, api_version=None) - - For Array API compatibility. - - """)) - - add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_ufunc__', """ __array_ufunc__($self, ufunc, method, /, *inputs, **kwargs) @@ -3208,32 +3198,6 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__', - """ - __copy__($self, /) - -- - - a.__copy__() - - Used if :func:`copy.copy` is called on an array. Returns a copy of the array. - - Equivalent to ``a.copy(order='K')``. - - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('__deepcopy__', - """ - __deepcopy__($self, memo, /) - -- - - a.__deepcopy__(memo, /) - - Used if :func:`copy.deepcopy` is called on an array. - - """)) - - add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', """ __dlpack__($self, /, *, stream=None, max_version=None, dl_device=None, copy=None) @@ -3308,119 +3272,243 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('all', +add_newdoc('numpy._core.multiarray', 'ndarray', ('dot', """ - all($self, /, axis=None, out=None, keepdims=False, *, where=True) + dot($self, other, /, out=None) -- - a.all(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) - - Returns True if all elements evaluate to True. + a.dot(other, /, out=None) - Refer to `numpy.all` for full documentation. + Refer to :func:`numpy.dot` for full documentation. See Also -------- - numpy.all : equivalent function + numpy.dot : equivalent function """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('any', +add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', """ - any($self, /, axis=None, out=None, keepdims=False, *, where=True) + argpartition($self, kth, /, axis=-1, kind='introselect', order=None) -- - a.any(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) + a.argpartition(kth, axis=-1, kind='introselect', order=None) - Returns True if any of the elements of `a` evaluate to True. + Returns the indices that would partition this array. - Refer to `numpy.any` for full documentation. + Refer to `numpy.argpartition` for full documentation. See Also -------- - numpy.any : equivalent function + numpy.argpartition : equivalent function """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('argmax', +add_newdoc('numpy._core.multiarray', 'ndarray', ('partition', """ - argmax($self, /, axis=None, out=None, *, keepdims=False) + partition($self, kth, /, axis=-1, kind='introselect', order=None) -- - a.argmax(axis=None, out=None, *, keepdims=False) + a.partition(kth, axis=-1, kind='introselect', order=None) - Return indices of the maximum values along the given axis. + Partially sorts the elements in the array in such a way that the value of + the element in k-th position is in the position it would be in a sorted + array. In the output array, all elements smaller than the k-th element + are located to the left of this element and all equal or greater are + located to its right. The ordering of the elements in the two partitions + on the either side of the k-th element in the output array is undefined. - Refer to `numpy.argmax` for full documentation. + Parameters + ---------- + kth : int or sequence of ints + Element index to partition by. The kth element value will be in its + final sorted position and all smaller elements will be moved before it + and all equal or greater elements behind it. + The order of all elements in the partitions is undefined. + If provided with a sequence of kth it will partition all elements + indexed by kth of them into their sorted position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need to be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. See Also -------- - numpy.argmax : equivalent function + numpy.partition : Return a partitioned copy of an array. + argpartition : Indirect partition. + sort : Full sort. + + Notes + ----- + See ``np.partition`` for notes on the different algorithms. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([3, 4, 2, 1]) + >>> a.partition(3) + >>> a + array([2, 1, 3, 4]) # may vary + + >>> a.partition((1, 3)) + >>> a + array([1, 2, 3, 4]) """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('argmin', +############################################################################## +# +# methods from both `ndarray` and `generic` +# +############################################################################## + +_METHOD_DOC_TEMPLATE = """{name}({params}) +-- + +{doc}""" + +def _array_method_doc(name: str, params: str, doc: str) -> None: """ - argmin($self, /, axis=None, out=None, *, keepdims=False) - -- + Interenal helper function for adding docstrings to a common method of + `numpy.ndarray` and `numpy.generic`. - a.argmin(axis=None, out=None, *, keepdims=False) + The provided docstring will be added to the given `numpy.ndarray` method. + For the `numpy.generic` method, a shorter docstring indicating that it is + identical to the `ndarray` method will be created. + Both methods will have a proper and identical `__text_signature__`. - Return indices of the minimum values along the given axis. + Parameters + ---------- + name : str + Name of the method. + params : str + Parameter signature for the method without parentheses, for example, + ``"a, /, dtype=None, *, copy=False"``. + Parameter defaults must be understood by `ast.literal_eval`, i.e. strings, + bytes, numbers, tuples, lists, dicts, sets, booleans, or None. + doc : str + The full docstring for the `ndarray` method. + """ - Refer to `numpy.argmin` for detailed documentation. + # prepend the pos-only `$self` parameter to the method signature + if "/" not in params: + params = f"/, {params}" if params else "/" + params = f"$self, {params}" - See Also - -------- - numpy.argmin : equivalent function + # add docstring to `np.ndarray.{name}` + doc = textwrap.dedent(doc).strip() + doc_array = _METHOD_DOC_TEMPLATE.format(name=name, params=params, doc=doc) + add_newdoc("numpy._core.multiarray", "ndarray", (name, doc_array)) - """)) + # add docstring to `np.generic.{name}` + doc_scalar = f"Scalar method identical to `ndarray.{name}`." + doc_scalar = _METHOD_DOC_TEMPLATE.format(name=name, params=params, doc=doc_scalar) + add_newdoc("numpy._core.numerictypes", "generic", (name, doc_scalar)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('argsort', +_array_method_doc('__array_namespace__', "*, api_version=None", """ - argsort($self, /, axis=-1, kind=None, order=None, *, stable=None) - -- + a.__array_namespace__(*, api_version=None) - a.argsort(axis=-1, kind=None, order=None, *, stable=None) + For Array API compatibility. + """) - Returns the indices that would sort this array. +_array_method_doc('__copy__', "", + """ + a.__copy__() - Refer to `numpy.argsort` for full documentation. + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + + Equivalent to ``a.copy(order='K')``. + """) + +_array_method_doc('__deepcopy__', "memo, /", + """ + a.__deepcopy__(memo, /) + + Used if :func:`copy.deepcopy` is called on an array. + """) + +_array_method_doc('all', "axis=None, out=None, keepdims=False, *, where=True", + """ + a.all(axis=None, out=None, *, keepdims=, where=) + + Returns True if all elements evaluate to True. + + Refer to `numpy.all` for full documentation. See Also -------- - numpy.argsort : equivalent function + numpy.all : equivalent function + """) - """)) +_array_method_doc('any', "axis=None, out=None, keepdims=False, *, where=True", + """ + a.any(axis=None, out=None, *, keepdims=, where=) + Returns True if any of the elements of `a` evaluate to True. -add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', - """ - argpartition($self, kth, /, axis=-1, kind='introselect', order=None) - -- + Refer to `numpy.any` for full documentation. - a.argpartition(kth, axis=-1, kind='introselect', order=None) + See Also + -------- + numpy.any : equivalent function + """) - Returns the indices that would partition this array. +_array_method_doc('argmax', "axis=None, out=None, *, keepdims=False", + """ + a.argmax(axis=None, out=None, *, keepdims=False) - Refer to `numpy.argpartition` for full documentation. + Return indices of the maximum values along the given axis. + + Refer to `numpy.argmax` for full documentation. See Also -------- - numpy.argpartition : equivalent function + numpy.argmax : equivalent function + """) - """)) +_array_method_doc('argmin', "axis=None, out=None, *, keepdims=False", + """ + a.argmin(axis=None, out=None, *, keepdims=False) + + Return indices of the minimum values along the given axis. + + Refer to `numpy.argmin` for detailed documentation. + See Also + -------- + numpy.argmin : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('astype', +_array_method_doc('argsort', "axis=-1, kind=None, order=None, *, stable=None", """ - astype($self, /, dtype, order='K', casting='unsafe', subok=True, copy=True) - -- + a.argsort(axis=-1, kind=None, order=None, *, stable=None) + + Returns the indices that would sort this array. + + Refer to `numpy.argsort` for full documentation. + + See Also + -------- + numpy.argsort : equivalent function + """) +_array_method_doc('astype', "dtype, order='K', casting='unsafe', subok=True, copy=True", + """ a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) Copy of the array, cast to a specified type. @@ -3495,14 +3583,10 @@ >>> x[:2].astype(int, casting="same_value") array([1, 2]) - """)) - + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('byteswap', +_array_method_doc('byteswap', "inplace=False", """ - byteswap($self, /, inplace=False) - -- - a.byteswap(inplace=False) Swap the bytes of the array elements @@ -3552,15 +3636,10 @@ >>> A.view(np.uint8) array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3], dtype=uint8) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('choose', +_array_method_doc('choose', "choices, out=None, mode='raise'", """ - choose($self, /, choices, out=None, mode='raise') - -- - a.choose(choices, out=None, mode='raise') Use an index array to construct a new array from a set of choices. @@ -3570,16 +3649,11 @@ See Also -------- numpy.choose : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('clip', +_array_method_doc('clip', "min=None, max=None, out=None, **kwargs", """ - clip($self, /, min=None, max=None, out=None, **kwargs) - -- - - a.clip(min=np._NoValue, max=np._NoValue, out=None, **kwargs) + a.clip(min=, max=, out=None, **kwargs) Return an array whose values are limited to ``[min, max]``. One of max or min must be given. @@ -3589,15 +3663,10 @@ See Also -------- numpy.clip : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('compress', +_array_method_doc('compress', "condition, axis=None, out=None", """ - compress($self, /, condition, axis=None, out=None) - -- - a.compress(condition, axis=None, out=None) Return selected slices of this array along given axis. @@ -3607,15 +3676,10 @@ See Also -------- numpy.compress : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('conj', +_array_method_doc('conj', "", """ - conj($self, /) - -- - a.conj() Complex-conjugate all elements. @@ -3625,15 +3689,10 @@ See Also -------- numpy.conjugate : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('conjugate', +_array_method_doc('conjugate', "", """ - conjugate($self, /) - -- - a.conjugate() Return the complex conjugate, element-wise. @@ -3643,15 +3702,10 @@ See Also -------- numpy.conjugate : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('copy', +_array_method_doc('copy', "order='C'", """ - copy($self, /, order='C') - -- - a.copy(order='C') Return a copy of the array. @@ -3719,15 +3773,10 @@ array([1, 'm', list([10, 3, 4])], dtype=object) >>> a array([1, 'm', list([2, 3, 4])], dtype=object) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('cumprod', +_array_method_doc('cumprod', "axis=None, dtype=None, out=None", """ - cumprod($self, /, axis=None, dtype=None, out=None) - -- - a.cumprod(axis=None, dtype=None, out=None) Return the cumulative product of the elements along the given axis. @@ -3737,15 +3786,10 @@ See Also -------- numpy.cumprod : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('cumsum', +_array_method_doc('cumsum', "axis=None, dtype=None, out=None", """ - cumsum($self, /, axis=None, dtype=None, out=None) - -- - a.cumsum(axis=None, dtype=None, out=None) Return the cumulative sum of the elements along the given axis. @@ -3755,15 +3799,10 @@ See Also -------- numpy.cumsum : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('diagonal', +_array_method_doc('diagonal', "offset=0, axis1=0, axis2=1", """ - diagonal($self, /, offset=0, axis1=0, axis2=1) - -- - a.diagonal(offset=0, axis1=0, axis2=1) Return specified diagonals. In NumPy 1.9 the returned array is a @@ -3775,27 +3814,10 @@ See Also -------- numpy.diagonal : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dot', - """ - dot($self, other, /, out=None) - -- - - a.dot(other, /, out=None) - - Refer to :func:`numpy.dot` for full documentation. - - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dump', +_array_method_doc('dump', "file", """ - dump($self, /, file) - -- - a.dump(file) Dump a pickle of the array to the specified file. @@ -3805,32 +3827,22 @@ ---------- file : str or Path A string naming the dump file. + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dumps', +_array_method_doc('dumps', "", """ - dumps($self, /) - -- - a.dumps() Returns the pickle of the array as a string. - pickle.loads will convert the string back to an array. + ``pickle.loads`` will convert the string back to an array. Parameters ---------- None + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('fill', +_array_method_doc('fill', "value", """ - fill($self, /, value) - -- - a.fill(value) Fill the array with a scalar value. @@ -3869,15 +3881,10 @@ >>> a[...] = np.array(3) >>> a array([3, 3], dtype=object) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('flatten', +_array_method_doc('flatten', "order='C'", """ - flatten($self, /, order='C') - -- - a.flatten(order='C') Return a copy of the array collapsed into one dimension. @@ -3911,15 +3918,10 @@ array([1, 2, 3, 4]) >>> a.flatten('F') array([1, 3, 2, 4]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('getfield', +_array_method_doc('getfield', "dtype, offset=0", """ - getfield($self, /, dtype, offset=0) - -- - a.getfield(dtype, offset=0) Returns a field of the given array as a certain type. @@ -3957,15 +3959,10 @@ >>> x.getfield(np.float64, offset=8) array([[1., 0.], [0., 4.]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('item', +_array_method_doc('item', "*args", """ - item($self, /, *args) - -- - a.item(*args) Copy an element of an array to a standard Python scalar and return it. @@ -4026,16 +4023,13 @@ >>> a = np.array([np.int64(1)], dtype=object) >>> a.item() #return np.int64 np.int64(1) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('max', - """ - max($self, /, axis=None, out=None, **kwargs) - -- +_KWARGS_REDUCE = "keepdims=, initial=, where=" - a.max(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) +_array_method_doc('max', "axis=None, out=None, **kwargs", + f""" + a.max(axis=None, out=None, *, {_KWARGS_REDUCE}) Return the maximum along a given axis. @@ -4044,107 +4038,89 @@ See Also -------- numpy.amax : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('mean', - """ - mean($self, /, axis=None, dtype=None, out=None, **kwargs) - -- - - a.mean(axis=None, dtype=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) +_array_method_doc('min', "axis=None, out=None, **kwargs", + f""" + a.min(axis=None, out=None, *, {_KWARGS_REDUCE}) - Returns the average of the array elements along given axis. + Return the minimum along a given axis. - Refer to `numpy.mean` for full documentation. + Refer to `numpy.amin` for full documentation. See Also -------- - numpy.mean : equivalent function + numpy.amin : equivalent function + """) - """)) +_array_method_doc('prod', "axis=None, dtype=None, out=None, **kwargs", + f""" + a.prod(axis=None, dtype=None, out=None, *, {_KWARGS_REDUCE}) + Return the product of the array elements over the given axis -add_newdoc('numpy._core.multiarray', 'ndarray', ('min', - """ - min($self, /, axis=None, out=None, **kwargs) - -- + Refer to `numpy.prod` for full documentation. - a.min(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) + See Also + -------- + numpy.prod : equivalent function + """) - Return the minimum along a given axis. +_array_method_doc('sum', "axis=None, dtype=None, out=None, **kwargs", + f""" + a.sum(axis=None, dtype=None, out=None, *, {_KWARGS_REDUCE}) - Refer to `numpy.amin` for full documentation. + Return the sum of the array elements over the given axis. + + Refer to `numpy.sum` for full documentation. See Also -------- - numpy.amin : equivalent function - - """)) - + numpy.sum : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('nonzero', +_array_method_doc('mean', "axis=None, dtype=None, out=None, **kwargs", """ - nonzero($self, /) - -- + a.mean(axis=None, dtype=None, out=None, *, keepdims=, where=) - a.nonzero() - - Return the indices of the elements that are non-zero. + Returns the average of the array elements along given axis. - Refer to `numpy.nonzero` for full documentation. + Refer to `numpy.mean` for full documentation. See Also -------- - numpy.nonzero : equivalent function - - """)) - + numpy.mean : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('prod', +_array_method_doc('nonzero', "", """ - prod($self, /, axis=None, dtype=None, out=None, **kwargs) - -- - - a.prod(axis=None, dtype=None, out=None, keepdims=np._NoValue, - initial=np._NoValue, where=np._NoValue) + a.nonzero() - Return the product of the array elements over the given axis + Return the indices of the elements that are non-zero. - Refer to `numpy.prod` for full documentation. + Refer to `numpy.nonzero` for full documentation. See Also -------- - numpy.prod : equivalent function - - """)) - + numpy.nonzero : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('put', +_array_method_doc('put', "indices, values, /, mode='raise'", """ - put($self, indices, values, /, mode='raise') - -- - a.put(indices, values, mode='raise') - Set ``a.flat[n] = values[n]`` for all `n` in indices. + Set ``a.flat[n] = values[n]`` for all ``n`` in indices. Refer to `numpy.put` for full documentation. See Also -------- numpy.put : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('ravel', +_array_method_doc('ravel', "order='C'", """ - ravel($self, /, order='C') - -- - - a.ravel([order]) + a.ravel(order='C') Return a flattened array. @@ -4153,17 +4129,11 @@ See Also -------- numpy.ravel : equivalent function - ndarray.flat : a flat iterator on the array. + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('repeat', +_array_method_doc('repeat', "repeats, /, axis=None", """ - repeat($self, repeats, /, axis=None) - -- - a.repeat(repeats, axis=None) Repeat elements of an array. @@ -4173,16 +4143,12 @@ See Also -------- numpy.repeat : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('reshape', +_array_method_doc('reshape', "*shape, order='C', copy=None", """ - reshape($self, /, *shape, order='C', copy=None) - -- - a.reshape(shape, /, *, order='C', copy=None) + a.reshape(*shape, order='C', copy=None) Returns an array containing the same data with a new shape. @@ -4196,18 +4162,13 @@ ----- Unlike the free function `numpy.reshape`, this method on `ndarray` allows the elements of the shape parameter to be passed in as separate arguments. - For example, ``a.reshape(10, 11)`` is equivalent to - ``a.reshape((10, 11))``. - - """)) - + For example, ``a.reshape(4, 2)`` is equivalent to ``a.reshape((4, 2))``. + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('resize', +_array_method_doc('resize', "*new_shape, refcheck=True", """ - resize($self, /, *new_shape, refcheck=True) - -- - - a.resize(new_shape, refcheck=True) + a.resize(new_shape, /, *, refcheck=True) + a.resize(*new_shape, refcheck=True) Change shape and size of array in-place. @@ -4295,15 +4256,10 @@ array([[0]]) >>> c array([[0]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('round', +_array_method_doc('round', "decimals=0, out=None", """ - round($self, /, decimals=0, out=None) - -- - a.round(decimals=0, out=None) Return `a` with each element rounded to the given number of decimals. @@ -4313,33 +4269,23 @@ See Also -------- numpy.around : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('searchsorted', +_array_method_doc('searchsorted', "v, /, side='left', sorter=None", """ - searchsorted($self, v, /, side='left', sorter=None) - -- - a.searchsorted(v, side='left', sorter=None) - Find indices where elements of v should be inserted in a to maintain order. + Find indices where elements of `v` should be inserted in `a` to maintain order. - For full documentation, see `numpy.searchsorted` + For full documentation, see `numpy.searchsorted`. See Also -------- numpy.searchsorted : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('setfield', +_array_method_doc('setfield', "val, /, dtype, offset=0", """ - setfield($self, val, /, dtype, offset=0) - -- - a.setfield(val, dtype, offset=0) Put a value into a specified place in a field defined by a data-type. @@ -4386,15 +4332,10 @@ array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('setflags', +_array_method_doc('setflags', "*, write=None, align=None, uic=None", """ - setflags($self, /, *, write=None, align=None, uic=None) - -- - a.setflags(write=None, align=None, uic=None) Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY, @@ -4466,15 +4407,10 @@ Traceback (most recent call last): File "", line 1, in ValueError: cannot set WRITEBACKIFCOPY flag to True + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('sort', +_array_method_doc('sort', "axis=-1, kind=None, order=None, *, stable=None", """ - sort($self, /, axis=-1, kind=None, order=None, *, stable=None) - -- - a.sort(axis=-1, kind=None, order=None, *, stable=None) Sort an array in-place. Refer to `numpy.sort` for full documentation. @@ -4536,77 +4472,10 @@ >>> a array([(b'c', 1), (b'a', 2)], dtype=[('x', 'S1'), ('y', '>> import numpy as np - >>> a = np.array([3, 4, 2, 1]) - >>> a.partition(3) - >>> a - array([2, 1, 3, 4]) # may vary - - >>> a.partition((1, 3)) - >>> a - array([1, 2, 3, 4]) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('squeeze', +_array_method_doc('squeeze', "axis=None", """ - squeeze($self, /, axis=None) - -- - a.squeeze(axis=None) Remove axes of length one from `a`. @@ -4616,16 +4485,13 @@ See Also -------- numpy.squeeze : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('std', - """ - std($self, /, axis=None, dtype=None, out=None, ddof=0, **kwargs) - -- +_KWARGS_STD = "*, keepdims=, where=, mean=" - a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) +_array_method_doc('std', "axis=None, dtype=None, out=None, ddof=0, **kwargs", + f""" + a.std(axis=None, dtype=None, out=None, ddof=0, {_KWARGS_STD}) Returns the standard deviation of the array elements along given axis. @@ -4634,33 +4500,23 @@ See Also -------- numpy.std : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('sum', - """ - sum($self, /, axis=None, dtype=None, out=None, **kwargs) - -- - - a.sum(axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) +_array_method_doc('var', "axis=None, dtype=None, out=None, ddof=0, **kwargs", + f""" + a.var(axis=None, dtype=None, out=None, ddof=0, {_KWARGS_STD}) - Return the sum of the array elements over the given axis. + Returns the variance of the array elements, along given axis. - Refer to `numpy.sum` for full documentation. + Refer to `numpy.var` for full documentation. See Also -------- - numpy.sum : equivalent function - - """)) - + numpy.var : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('swapaxes', +_array_method_doc('swapaxes', "axis1, axis2, /", """ - swapaxes($self, axis1, axis2, /) - -- - a.swapaxes(axis1, axis2, /) Return a view of the array with `axis1` and `axis2` interchanged. @@ -4670,15 +4526,10 @@ See Also -------- numpy.swapaxes : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('take', +_array_method_doc('take', "indices, /, axis=None, out=None, mode='raise'", """ - take($self, indices, /, axis=None, out=None, mode='raise') - -- - a.take(indices, axis=None, out=None, mode='raise') Return an array formed from the elements of `a` at the given indices. @@ -4688,15 +4539,10 @@ See Also -------- numpy.take : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('to_device', +_array_method_doc('to_device', "device, /, *, stream=None", """ - to_device($self, device, /, *, stream=None) - -- - a.to_device(device, /, *, stream=None) For Array API compatibility. Since NumPy only supports CPU arrays, this @@ -4713,16 +4559,11 @@ ------- out : Self Returns the same array. + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('tofile', +_array_method_doc('tofile', "fid, /, sep='', format='%s'", """ - tofile($self, fid, /, sep='', format='%s') - -- - - a.tofile(fid, sep='', format='%s') + a.tofile(fid, /, sep='', format='%s') Write array to a file as text or binary (default). @@ -4756,22 +4597,17 @@ file, bypassing the file object's ``write`` method. As a result, tofile cannot be used with files objects supporting compression (e.g., GzipFile) or file-like objects that do not support ``fileno()`` (e.g., BytesIO). + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('tolist', +_array_method_doc('tolist', "", """ - tolist($self, /) - -- - a.tolist() Return the array as an ``a.ndim``-levels deep nested list of Python scalars. Return a copy of the array data as a (nested) Python list. Data items are converted to the nearest compatible builtin Python type, via - the `~numpy.ndarray.item` function. + the `~numpy.ndarray.item` method. If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will not be a list at all, but a simple Python scalar. @@ -4825,14 +4661,10 @@ TypeError: iteration over a 0-d array >>> a.tolist() 1 - """)) - + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('tobytes', +_array_method_doc('tobytes', "order='C'", """ - tobytes($self, /, order='C') - -- - a.tobytes(order='C') Construct Python bytes containing the raw data bytes in the array. @@ -4869,15 +4701,10 @@ True >>> x.tobytes('F') b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('trace', +_array_method_doc('trace', "offset=0, axis1=0, axis2=1, dtype=None, out=None", """ - trace($self, /, offset=0, axis1=0, axis2=1, dtype=None, out=None) - -- - a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) Return the sum along diagonals of the array. @@ -4887,15 +4714,10 @@ See Also -------- numpy.trace : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('transpose', +_array_method_doc('transpose', "*axes", """ - transpose($self, /, *axes) - -- - a.transpose(*axes) Returns a view of the array with axes transposed. @@ -4947,33 +4769,10 @@ array([1, 2, 3, 4]) >>> a.transpose() array([1, 2, 3, 4]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('var', - """ - var($self, /, axis=None, dtype=None, out=None, ddof=0, **kwargs) - -- - - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) - - Returns the variance of the array elements, along given axis. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - numpy.var : equivalent function - - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('view', +_array_method_doc('view', "*args, **kwargs", """ - view($self, /, *args, **kwargs) - -- - a.view([dtype][, type]) New view of array with the same data. @@ -5089,8 +4888,7 @@ [[2312, 2826], [5396, 5910]]], dtype=int16) - - """)) + """) ############################################################################## @@ -7100,21 +6898,11 @@ # Attributes -def refer_to_array_attribute(attr, method=True): - docstring = """ - Scalar {} identical to the corresponding array attribute. - - Please see `ndarray.{}`. - """ - - return attr, docstring.format("method" if method else "attribute", attr) - +add_newdoc('numpy._core.numerictypes', 'generic', ('T', + """Scalar attribute identical to `ndarray.T`.""")) -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('T', method=False)) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('base', method=False)) +add_newdoc('numpy._core.numerictypes', 'generic', ('base', + """Scalar attribute identical to `ndarray.base`.""")) add_newdoc('numpy._core.numerictypes', 'generic', ('data', """Pointer to start of data.""")) @@ -7151,150 +6939,12 @@ def refer_to_array_attribute(attr, method=True): # Methods -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('all')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('any')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argmax')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argmin')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argsort')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('astype')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('byteswap')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('choose')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('clip')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('compress')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('conjugate')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('copy')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('cumprod')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('cumsum')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('diagonal')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('dump')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('dumps')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('fill')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('flatten')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('getfield')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('item')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('max')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('mean')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('min')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('nonzero')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('prod')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('put')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('ravel')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('repeat')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('reshape')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('resize')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('round')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('searchsorted')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('setfield')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('setflags')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('sort')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('squeeze')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('std')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('sum')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('swapaxes')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('take')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tofile')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tolist')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('trace')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('transpose')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('var')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('view')) - add_newdoc('numpy._core.numerictypes', 'number', ('__class_getitem__', """ - __class_getitem__(item, /) + __class_getitem__($cls, item, /) + -- + + number.__class_getitem__(item, /) Return a parametrized wrapper around the `~numpy.number` type. diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index 7f1d7d6d7bdb..b993a8f3df29 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -257,50 +257,72 @@ def test_array_wrap(scalar): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -@pytest.mark.parametrize( - "sctype", - [ - *sctypes["int"], - *sctypes["uint"], - *sctypes["float"], - *sctypes["complex"], - *sctypes["others"], - np.datetime64, - np.timedelta64, - ], -) -def test_constructor_signatures(sctype: type[np.generic]) -> None: - try: - sig = inspect.signature(sctype) - except ValueError: - pytest.fail(f"missing signature: {sctype}") - - assert sig.parameters +@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") +class TestSignature: + # test that scalar types have a valid __text_signature__ or __signature__ set + @pytest.mark.parametrize( + "sctype", + [ + *sctypes["int"], + *sctypes["uint"], + *sctypes["float"], + *sctypes["complex"], + *sctypes["others"], + np.datetime64, + np.timedelta64, + ], + ) + def test_constructor_signatures(self, sctype: type[np.generic]): + try: + sig = inspect.signature(sctype) + except ValueError: + pytest.fail(f"missing signature: {sctype}") -@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -@pytest.mark.parametrize( - "sctype", - [np.integer, *sctypes["int"], *sctypes["uint"], *sctypes["float"]], -) -def test_method_signatures_is_integer(sctype: type[np.integer | np.floating]) -> None: - try: - sig = inspect.signature(sctype.is_integer) - except ValueError: - pytest.fail(f"missing signature: {sctype.__name__}.is_integer") - - assert len(sig.parameters) == 1 - assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters -@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -@pytest.mark.parametrize("sctype", sctypes["float"]) -def test_method_signatures_as_integer_ratio(sctype: type[np.floating]) -> None: - try: - sig = inspect.signature(sctype.as_integer_ratio) - except ValueError: - pytest.fail(f"missing signature: {sctype.__name__}.as_integer_ratio") - - assert len(sig.parameters) == 1 - assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + @pytest.mark.parametrize( + "sctype", + [np.integer, *sctypes["int"], *sctypes["uint"], *sctypes["float"]], + ) + def test_method_signatures_is_integer(self, sctype: type[np.integer | np.floating]): + try: + sig = inspect.signature(sctype.is_integer) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.is_integer") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize("sctype", sctypes["float"]) + def test_method_signatures_as_integer_ratio(self, sctype: type[np.floating]): + try: + sig = inspect.signature(sctype.as_integer_ratio) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.as_integer_ratio") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize( + "method_name", + [ + "__array_namespace__", "__copy__", "__deepcopy__", "all", "any", "argmax", + "argmin", "argsort", "astype", "byteswap", "choose", "clip", "compress", + "conj", "conjugate", "copy", "cumprod", "cumsum", "diagonal", "dump", + "dumps", "fill", "flatten", "getfield", "item", "max", "mean", "min", + "nonzero", "prod", "put", "ravel", "repeat", "reshape", "resize", "round", + "searchsorted", "setfield", "setflags", "sort", "squeeze", "std", "sum", + "swapaxes", "take", "to_device", "tobytes", "tofile", "tolist", "trace", + "transpose", "var", "view", + ], + ) + def test_array_scalar_method_signatures(self, method_name: str): + # methods shared by np.generic and np.ndarray should have the same signature + fn_generic = getattr(np.generic, method_name) + sig_generic = inspect.signature(fn_generic) + assert "self" in sig_generic.parameters + assert sig_generic.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + + fn_ndarray = getattr(np.ndarray, method_name) + sig_ndarray = inspect.signature(fn_ndarray) + assert sig_generic == sig_ndarray From 42d91bf1883196d59872002b8918d6460a22da79 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 11 Nov 2025 01:33:20 +0100 Subject: [PATCH 0836/1018] TYP: ``np.generic`` method stubs updates --- numpy/__init__.pyi | 87 +++++++++++++++++++++------------------------- 1 file changed, 40 insertions(+), 47 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b03f67b1e281..bd6c8addcb38 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3682,21 +3682,24 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): def trace( # type: ignore[misc] self: Never, /, - offset: Never = ..., - axis1: Never = ..., - axis2: Never = ..., - dtype: Never = ..., - out: Never = ..., + offset: L[0] = 0, + axis1: L[0] = 0, + axis2: L[1] = 1, + dtype: None = None, + out: None = None, ) -> Never: ... - def diagonal(self: Never, /, offset: Never = ..., axis1: Never = ..., axis2: Never = ...) -> Never: ... # type: ignore[misc] + def diagonal(self: Never, /, offset: L[0] = 0, axis1: L[0] = 0, axis2: L[1] = 1) -> Never: ... # type: ignore[misc] def swapaxes(self: Never, axis1: Never, axis2: Never, /) -> Never: ... # type: ignore[misc] - def sort(self: Never, /, axis: Never = ..., kind: Never = ..., order: Never = ...) -> Never: ... # type: ignore[misc] + def sort(self: Never, /, axis: L[-1] = -1, kind: None = None, order: None = None, *, stable: None = None) -> Never: ... # type: ignore[misc] def nonzero(self: Never, /) -> Never: ... # type: ignore[misc] - def setfield(self: Never, /, val: Never, dtype: Never, offset: Never = ...) -> None: ... # type: ignore[misc] - def searchsorted(self: Never, /, v: Never, side: Never = ..., sorter: Never = ...) -> Never: ... # type: ignore[misc] + def setfield(self: Never, val: Never, /, dtype: Never, offset: L[0] = 0) -> None: ... # type: ignore[misc] + def searchsorted(self: Never, v: Never, /, side: L["left"] = "left", sorter: None = None) -> Never: ... # type: ignore[misc] # NOTE: this wont't raise, but won't do anything either - def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: builtins.bool = False) -> None: ... + @overload + def resize(self, /, *, refcheck: builtins.bool = True) -> None: ... + @overload + def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: builtins.bool = True) -> None: ... # def byteswap(self, /, inplace: L[False] = False) -> Self: ... @@ -3705,84 +3708,74 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @overload def astype( self, + /, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: builtins.bool = True, + copy: builtins.bool | _CopyMode = True, ) -> _ScalarT: ... @overload def astype( self, + /, dtype: DTypeLike | None, - order: _OrderKACF = ..., - casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., - ) -> Any: ... + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: builtins.bool = True, + copy: builtins.bool | _CopyMode = True, + ) -> Incomplete: ... # NOTE: `view` will perform a 0D->scalar cast, # thus the array `type` is irrelevant to the output type @overload - def view(self, type: type[NDArray[Any]] = ...) -> Self: ... + def view(self, type: type[ndarray] = ...) -> Self: ... @overload - def view( - self, - dtype: _DTypeLike[_ScalarT], - type: type[NDArray[Any]] = ..., - ) -> _ScalarT: ... + def view(self, /, dtype: _DTypeLike[_ScalarT], type: type[ndarray] = ...) -> _ScalarT: ... @overload - def view( - self, - dtype: DTypeLike, - type: type[NDArray[Any]] = ..., - ) -> Any: ... + def view(self, /, dtype: DTypeLike, type: type[ndarray] = ...) -> Incomplete: ... @overload - def getfield( - self, - dtype: _DTypeLike[_ScalarT], - offset: SupportsIndex = ... - ) -> _ScalarT: ... + def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> _ScalarT: ... @overload - def getfield( - self, - dtype: DTypeLike, - offset: SupportsIndex = ... - ) -> Any: ... + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> Incomplete: ... @overload def take( self, indices: _IntLike_co, - axis: SupportsIndex | None = ..., + /, + axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> Self: ... @overload def take( self, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + /, + axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> NDArray[Self]: ... @overload def take( self, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + /, + axis: SupportsIndex | None = None, *, out: _ArrayT, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload def take( self, indices: _ArrayLikeInt_co, + /, axis: SupportsIndex | None, out: _ArrayT, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> _ArrayT: ... def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... From 45454c634628be64412035a89d10746a91ba199e Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 11 Nov 2025 01:33:53 +0100 Subject: [PATCH 0837/1018] TYP: update ``_core._add_newdocs`` stubs --- numpy/_core/_add_newdocs.pyi | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/_core/_add_newdocs.pyi b/numpy/_core/_add_newdocs.pyi index b23c3b1adedd..2d004814fdcf 100644 --- a/numpy/_core/_add_newdocs.pyi +++ b/numpy/_core/_add_newdocs.pyi @@ -1,3 +1,2 @@ +from .function_base import add_newdoc as add_newdoc from .overrides import get_array_function_like_doc as get_array_function_like_doc - -def refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ... From a2d99b745e397e9e3ea0cf8a4033b7290bb3be3b Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Fri, 26 Sep 2025 20:24:43 -0400 Subject: [PATCH 0838/1018] ENH: Add fast path in ufuncs for numerical scalars. This adds a fast check for numerical scalars, improving the speed by about a factor of 6, with essentially no cost for arrays. At the moment, just for single-input, single-output ufuncs, with the assumption that the output has the same type as the input (if that fails, the regular path is taken). --- .../upcoming_changes/29819.improvement.rst | 6 + numpy/_core/src/umath/ufunc_object.c | 148 +++++++++++++++++- 2 files changed, 149 insertions(+), 5 deletions(-) create mode 100644 doc/release/upcoming_changes/29819.improvement.rst diff --git a/doc/release/upcoming_changes/29819.improvement.rst b/doc/release/upcoming_changes/29819.improvement.rst new file mode 100644 index 000000000000..fa4ac07f2a08 --- /dev/null +++ b/doc/release/upcoming_changes/29819.improvement.rst @@ -0,0 +1,6 @@ +Performance increase for scalar calculations +-------------------------------------------- +The speed of calculations on scalars has been improved by about a factor 6 for +ufuncs that take only one input (like ``np.sin(scalar)``), reducing the speed +difference from their ``math`` equivalents from a factor 19 to 3 (the speed +for arrays is left unchanged). diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 0bdc14e04cde..a9f00f351459 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -66,6 +66,7 @@ #include "npy_static_data.h" #include "multiarraymodule.h" #include "number.h" +#include "scalartypes.h" // for is_anyscalar_exact and scalar_value /********** PRINTF DEBUG TRACING **************/ #define NPY_UF_DBG_TRACING 0 @@ -3721,7 +3722,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, if (ret == NULL) { goto fail; } - + Py_XDECREF(out); Py_DECREF(signature[0]); @@ -4270,6 +4271,135 @@ replace_with_wrapped_result_and_return(PyUFuncObject *ufunc, return NULL; } +/* + * Check whether the input object is a known scalar and whether the ufunc has + * a suitable inner loop for it, which takes and returns the data type of the + * input (this function is not called if output or any other argument was given). + * If a loop was found, call it and store the result. + * + * Returns -2 if a short-cut is not possible, 0 on success and -1 on error. + */ +static int +try_trivial_scalar_call( + PyUFuncObject *ufunc, PyObject *const obj, PyObject **result) +{ + assert(ufunc->nin == 1 && ufunc->nout == 1 && !ufunc->core_enabled); + npy_clongdouble cin, cout; // aligned storage, using longest type. + char *in = (char *)&cin, *out = (char *)&cout; + char *data[] = {in, out}; + int ret = -2; + PyArray_Descr *dt; + /* + * For supported input, get input pointer and descriptor. Otherwise, bail. + */ + if (obj == Py_False || obj == Py_True) { + *(npy_bool *)in = (obj == Py_True); + dt = PyArray_DescrFromType(NPY_BOOL); + } + else if (PyFloat_CheckExact(obj)) { + *(double *)in = PyFloat_AS_DOUBLE(obj); + dt = PyArray_DescrFromType(NPY_FLOAT64); + } + else if (PyLong_CheckExact(obj)) { + int overflow; + npy_intp val = PyLong_AsLongAndOverflow(obj, &overflow); + if (overflow) { + return -2; // bail, main code perhaps deals with this. + } + if (error_converting(val)) { + return -1; // should never happen; pass on it if does. + } + *(npy_intp *)in = val; + dt = PyArray_DescrFromType(NPY_INTP); + } + else if (PyComplex_CheckExact(obj)) { + Py_complex oop = PyComplex_AsCComplex(obj); + if (error_converting(oop.real)) { + return -1; // should never happen; pass on it if does. + } + *(double *)in = oop.real; + *(double *)(in+sizeof(double)) = oop.imag; + dt = PyArray_DescrFromType(NPY_COMPLEX128); + } + else if (is_anyscalar_exact(obj)) { + dt = PyArray_DescrFromScalar(obj); + if (!PyDataType_ISNUMBER(dt)) { + goto bail; + } + data[0] = scalar_value(obj, dt); + } + else { + return -2; + } + /* + * Check the ufunc supports our descriptor, bailing (return -2) if not. + */ + // Try getting info from the (private) cache. Fall back if not found, + // so that the the dtype gets registered and things will work next time. + PyArray_DTypeMeta *op_dtypes[2] = {NPY_DTYPE(dt), NULL}; + PyObject *info = PyArrayIdentityHash_GetItem( // borrowed reference. + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); + if (info == NULL) { + goto bail; + } + // Check actual dtype is correct (can be wrong with promotion). + PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); + if ((PyTuple_GET_ITEM(all_dtypes, 0) != (PyObject *)NPY_DTYPE(dt)) || + (PyTuple_GET_ITEM(all_dtypes, 1) != (PyObject *)NPY_DTYPE(dt))) { + goto bail; + } + // Get method, bailing if not an arraymethod (e.g., a promotor). + PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); + if (!PyObject_TypeCheck(method, &PyArrayMethod_Type)) { + goto bail; + } + // Get loop, requiring that the output and input dtype are the same. + PyArrayMethod_Context context; + PyArray_Descr *descrs[2] = {dt, dt}; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc; + context.method = method; + npy_intp strides[2] = {0, 0}; // 0 ensures scalar math, not SIMD for half. + PyArrayMethod_StridedLoop *strided_loop; + NpyAuxData *auxdata = NULL; + NPY_ARRAYMETHOD_FLAGS flags = 0; + if (method->get_strided_loop(&context, 1, 0, strides, + &strided_loop, &auxdata, &flags) < 0) { + ret = -1; // Should not happen, so raise error if it does anyway. + goto bail; + } + /* + * Call loop with single element, checking floating point errors. + */ + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + npy_clear_floatstatus(); + } + npy_intp n = 1; + ret = strided_loop(&context, data, &n, strides, auxdata); + NPY_AUXDATA_FREE(auxdata); + if (ret == 0) { + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + // Check for any unmasked floating point errors (note: faster + // than _check_ufunc_fperr as one doesn't need mask up front). + int fpe_errors = npy_get_floatstatus(); + if (fpe_errors) { + if (PyUFunc_GiveFloatingpointErrors( + ufunc_get_name_cstr(ufunc), fpe_errors) < 0) { + ret = -1; // Real error, falling back would not help. + goto bail; + } + } + } + *result = PyArray_Scalar(out, dt, NULL); + if (*result == NULL) { + ret = -1; // Real error (should never happen). + } + } + bail: + Py_DECREF(dt); + return ret; +} /* * Main ufunc call implementation. @@ -4288,6 +4418,14 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, int errval; int nin = ufunc->nin, nout = ufunc->nout, nop = ufunc->nargs; + if (len_args == 1 && kwnames == NULL && !PyArray_Check(args[0]) + && nin == 1 && nout == 1 && !ufunc->core_enabled) { + // Possibly scalar input, try the fast path, falling back on failure. + PyObject *result = NULL; + if (try_trivial_scalar_call(ufunc, args[0], &result) != -2) { + return result; + } + } /* All following variables are cleared in the `fail` error path */ ufunc_full_args full_args = {NULL, NULL}; PyArrayObject *wheremask = NULL; @@ -4301,7 +4439,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, return NULL; } memset(scratch_objs, 0, sizeof(void *) * (nop * 4 + 2)); - + PyArray_DTypeMeta **signature = (PyArray_DTypeMeta **)scratch_objs; PyArrayObject **operands = (PyArrayObject **)(signature + nop); PyArray_DTypeMeta **operand_DTypes = (PyArray_DTypeMeta **)(operands + nop + 1); @@ -4366,17 +4504,17 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* Extra positional args but no keywords */ /* DEPRECATED NumPy 2.4, 2025-08 */ if ((PyObject *)ufunc == n_ops.maximum || (PyObject *)ufunc == n_ops.minimum) { - + if (DEPRECATE( "Passing more than 2 positional arguments to np.maximum and np.minimum " - "is deprecated. If you meant to use the third argument as an output, " + "is deprecated. If you meant to use the third argument as an output, " "use the `out` keyword argument instead. If you hoped to work with " "more than 2 inputs, combine them into a single array and get the extrema " "for the relevant axis.") < 0) { return NULL; } } - + if (all_none) { Py_SETREF(full_args.out, NULL); } From c86f0420333c1cc97ec000944f45021027ca58aa Mon Sep 17 00:00:00 2001 From: Veit Heller Date: Tue, 11 Nov 2025 13:41:16 +0100 Subject: [PATCH 0839/1018] ENH,MAINT: rewrite np.fix to use np.trunc internally (#30197) * DEP: rewrite np.fix to use np.trunc internally * TST: add trunc to object for ufunclike tests --- numpy/lib/_ufunclike_impl.py | 10 +--------- numpy/typing/tests/data/pass/ufunclike.py | 3 +++ 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 79cec5aa08b6..569840697d81 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -56,15 +56,7 @@ def fix(x, out=None): array([ 2., 2., -2., -2.]) """ - # promote back to an array if flattened - res = nx.ceil(x, out=... if out is None else out) - res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) - - # when no out argument is passed and no subclasses are involved, flatten - # scalars - if out is None and type(res) is nx.ndarray: - res = res[()] - return res + return nx.trunc(x, out=out) @array_function_dispatch(_dispatcher, verify=False, module='numpy') diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index c02a68cc062c..f69ec8eaf4ed 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -12,6 +12,9 @@ def __ceil__(self) -> Object: def __floor__(self) -> Object: return self + def __trunc__(self) -> Object: + return self + def __ge__(self, value: object) -> bool: return True From 3c5121a299045ef5242c9ea984338553859daf4a Mon Sep 17 00:00:00 2001 From: Veit Heller Date: Tue, 11 Nov 2025 15:29:17 +0100 Subject: [PATCH 0840/1018] DOC: update result_type docs to link to promotion rules --- numpy/_core/multiarray.py | 36 ++---------------------------------- 1 file changed, 2 insertions(+), 34 deletions(-) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 5599494720b6..4393f096fa9d 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -701,19 +701,8 @@ def result_type(*arrays_and_dtypes): result_type(*arrays_and_dtypes) Returns the type that results from applying the NumPy - type promotion rules to the arguments. - - Type promotion in NumPy works similarly to the rules in languages - like C++, with some slight differences. When both scalars and - arrays are used, the array's type takes precedence and the actual value - of the scalar is taken into account. - - For example, calculating 3*a, where a is an array of 32-bit floats, - intuitively should result in a 32-bit float output. If the 3 is a - 32-bit integer, the NumPy rules indicate it can't convert losslessly - into a 32-bit float, so a 64-bit float should be the result type. - By examining the value of the constant, '3', we see that it fits in - an 8-bit integer, which can be cast losslessly into the 32-bit float. + type promotion rules to the arguments + (`see also `_). Parameters ---------- @@ -729,27 +718,6 @@ def result_type(*arrays_and_dtypes): -------- dtype, promote_types, min_scalar_type, can_cast - Notes - ----- - The specific algorithm used is as follows. - - Categories are determined by first checking which of boolean, - integer (int/uint), or floating point (float/complex) the maximum - kind of all the arrays and the scalars are. - - If there are only scalars or the maximum category of the scalars - is higher than the maximum category of the arrays, - the data types are combined with :func:`promote_types` - to produce the return value. - - Otherwise, `min_scalar_type` is called on each scalar, and - the resulting data types are all combined with :func:`promote_types` - to produce the return value. - - The set of int values is not a subset of the uint values for types - with the same number of bits, something not reflected in - :func:`min_scalar_type`, but handled as a special case in `result_type`. - Examples -------- >>> import numpy as np From be14bb7574f7513646d073c72c53f73da60481c2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Nov 2025 17:05:47 +0000 Subject: [PATCH 0841/1018] MAINT: Bump actions/dependency-review-action from 4.8.1 to 4.8.2 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.8.1 to 4.8.2. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/40c09b7dc99638e5ddb0bfd91c1673effc064d8a...3c4e3dcb1aa7874d2c16be7d79418e9b7efd6261) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.8.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 797918b2ceff..b2fd18051ddd 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@40c09b7dc99638e5ddb0bfd91c1673effc064d8a # v4.8.1 + uses: actions/dependency-review-action@3c4e3dcb1aa7874d2c16be7d79418e9b7efd6261 # v4.8.2 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 From c46282261eabe3c61288d854e6a316ad64222a9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Nov 2025 17:05:55 +0000 Subject: [PATCH 0842/1018] MAINT: Bump astral-sh/setup-uv from 7.1.2 to 7.1.3 Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.1.2 to 7.1.3. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41...5a7eac68fb9809dea845d802897dc5c723910fa3) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.1.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy.yml | 2 +- .github/workflows/stubtest.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 5fbc4afba8e9..0e9e63d53d1a 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -58,7 +58,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2 + - uses: astral-sh/setup-uv@5a7eac68fb9809dea845d802897dc5c723910fa3 # v7.1.3 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index f5b2fbfbbbae..43e60fae80b9 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2 + - uses: astral-sh/setup-uv@5a7eac68fb9809dea845d802897dc5c723910fa3 # v7.1.3 with: python-version: ${{ matrix.py }} activate-environment: true From 576e0a10f95e190577f73a760a61abfe7803d2e4 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Tue, 11 Nov 2025 18:50:01 +0100 Subject: [PATCH 0843/1018] DOC: Fix some broken refs and Typos. (#30196) The default role is set to autolink (which fallbacks to emphasis if not resolved) And I believe there is typo in ALIGNMENT vs ALIGNENT. I would suggest moving away from autolink for default role, and have it raise an error if not resolved. This would also make sure users use double backticks for code. [skip actions][skip azp][skip cirrus] --- doc/source/reference/c-api/array.rst | 2 +- doc/source/reference/c-api/types-and-structures.rst | 2 +- doc/source/user/c-info.beyond-basics.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 7a1bace2c63e..ebed18de2196 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -784,7 +784,7 @@ cannot not be accessed directly. Allows setting of the itemsize, this is *only* relevant for string/bytes datatypes as it is the current pattern to define one with a new size. -.. c:function:: npy_intp PyDataType_ALIGNENT(PyArray_Descr *descr) +.. c:function:: npy_intp PyDataType_ALIGNMENT(PyArray_Descr *descr) The alignment of the datatype. diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 49704913037b..a039af130860 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -365,7 +365,7 @@ PyArrayDescr_Type and PyArray_Descr places an item of this type: ``offsetof(struct {char c; type v;}, v)`` - See `PyDataType_ALIGNMENT` for a way to access this field in a NumPy 1.x + See :c:func:`PyDataType_ALIGNMENT` for a way to access this field in a NumPy 1.x compatible way. .. c:member:: PyObject *metadata diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst index 7bf793ae2e47..eadeafe51e8e 100644 --- a/doc/source/user/c-info.beyond-basics.rst +++ b/doc/source/user/c-info.beyond-basics.rst @@ -268,7 +268,7 @@ specifies your data-type. This type number should be stored and made available by your module so that other modules can use it to recognize your data-type. -Note that this API is inherently thread-unsafe. See `thread_safety` for more +Note that this API is inherently thread-unsafe. See :ref:`thread_safety` for more details about thread safety in NumPy. From 330e2c17f868dd54837c726ac43b9d96a2cb2f2a Mon Sep 17 00:00:00 2001 From: Veit Heller Date: Wed, 12 Nov 2025 12:07:01 +0100 Subject: [PATCH 0844/1018] DOC: simplify link in docstring for result_type [skip azp][skip actions][skip cirrus] --- numpy/_core/multiarray.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 4393f096fa9d..3b1eed0e82e4 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -701,8 +701,7 @@ def result_type(*arrays_and_dtypes): result_type(*arrays_and_dtypes) Returns the type that results from applying the NumPy - type promotion rules to the arguments - (`see also `_). + :ref:`type promotion ` rules to the arguments. Parameters ---------- From eba60dc3e61528953b2fa0a85d4d7ee009814797 Mon Sep 17 00:00:00 2001 From: Aadya Chinubhai <77720426+aadya940@users.noreply.github.com> Date: Wed, 12 Nov 2025 03:48:33 -0800 Subject: [PATCH 0845/1018] ENH: Reduce compute time for `tobytes` in non-contiguos paths (#30170) * Add optimal copy path for non-contiguos arrays in ToString C Impl. * Add tests for obytes new path * Add imports * fix comments * fix memory issues and add benchmarks * run ruff --fix * simplify function * minor fix * minor typos and move tests --- benchmarks/benchmarks/bench_core.py | 4 ++ numpy/_core/src/multiarray/convert.c | 82 +++++++++++++++++----------- numpy/_core/tests/test_multiarray.py | 13 ++++- 3 files changed, 67 insertions(+), 32 deletions(-) diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index a9a6c88b87a0..ea7aae007fdc 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -14,6 +14,7 @@ def setup(self): self.l_view = [memoryview(a) for a in self.l] self.l10x10 = np.ones((10, 10)) self.float64_dtype = np.dtype(np.float64) + self.arr = np.arange(10000).reshape(100, 100) def time_array_1(self): np.array(1) @@ -48,6 +49,9 @@ def time_array_l_view(self): def time_can_cast(self): np.can_cast(self.l10x10, self.float64_dtype) + def time_tobytes_noncontiguous(self): + self.arr.T.tobytes() + def time_can_cast_same_kind(self): np.can_cast(self.l10x10, self.float64_dtype, casting="same_kind") diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index 983d9bc19ce6..ccd883f2b0f4 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -335,11 +335,7 @@ NPY_NO_EXPORT PyObject * PyArray_ToString(PyArrayObject *self, NPY_ORDER order) { npy_intp numbytes; - npy_intp i; - char *dptr; - int elsize; PyObject *ret; - PyArrayIterObject *it; if (order == NPY_ANYORDER) order = PyArray_ISFORTRAN(self) ? NPY_FORTRANORDER : NPY_CORDER; @@ -354,41 +350,65 @@ PyArray_ToString(PyArrayObject *self, NPY_ORDER order) numbytes = PyArray_NBYTES(self); if ((PyArray_IS_C_CONTIGUOUS(self) && (order == NPY_CORDER)) || (PyArray_IS_F_CONTIGUOUS(self) && (order == NPY_FORTRANORDER))) { - ret = PyBytes_FromStringAndSize(PyArray_DATA(self), (Py_ssize_t) numbytes); + return PyBytes_FromStringAndSize(PyArray_DATA(self), (Py_ssize_t) numbytes); } - else { - PyObject *new; - if (order == NPY_FORTRANORDER) { - /* iterators are always in C-order */ - new = PyArray_Transpose(self, NULL); - if (new == NULL) { - return NULL; - } + + /* Avoid Ravel where possible for fewer copies. */ + if (!PyDataType_REFCHK(PyArray_DESCR(self)) && + ((PyArray_DESCR(self)->flags & NPY_NEEDS_INIT) == 0)) { + + /* Allocate final Bytes Object */ + ret = PyBytes_FromStringAndSize(NULL, (Py_ssize_t) numbytes); + if (ret == NULL) { + return NULL; } - else { - Py_INCREF(self); - new = (PyObject *)self; + + /* Writable Buffer */ + char* dest = PyBytes_AS_STRING(ret); + + int flags = NPY_ARRAY_WRITEABLE; + if (order == NPY_FORTRANORDER) { + flags |= NPY_ARRAY_F_CONTIGUOUS; } - it = (PyArrayIterObject *)PyArray_IterNew(new); - Py_DECREF(new); - if (it == NULL) { + + Py_INCREF(PyArray_DESCR(self)); + /* Array view */ + PyArrayObject *dest_array = (PyArrayObject *)PyArray_NewFromDescr( + &PyArray_Type, + PyArray_DESCR(self), + PyArray_NDIM(self), + PyArray_DIMS(self), + NULL, // strides + dest, + flags, + NULL + ); + + if (dest_array == NULL) { + Py_DECREF(ret); return NULL; } - ret = PyBytes_FromStringAndSize(NULL, (Py_ssize_t) numbytes); - if (ret == NULL) { - Py_DECREF(it); + + /* Copy directly from source to destination with proper ordering */ + if (PyArray_CopyInto(dest_array, self) < 0) { + Py_DECREF(dest_array); + Py_DECREF(ret); return NULL; } - dptr = PyBytes_AS_STRING(ret); - i = it->size; - elsize = PyArray_ITEMSIZE(self); - while (i--) { - memcpy(dptr, it->dataptr, elsize); - dptr += elsize; - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); + + Py_DECREF(dest_array); + return ret; + + } + + /* Non-contiguous, Has References and/or Init Path. */ + PyArrayObject *contig = (PyArrayObject *)PyArray_Ravel(self, order); + if (contig == NULL) { + return NULL; } + + ret = PyBytes_FromStringAndSize(PyArray_DATA(contig), numbytes); + Py_DECREF(contig); return ret; } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 2193887cb43f..051a81d99d1f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -3835,6 +3835,18 @@ class ArraySubclass(np.ndarray): assert_(isinstance(a.ravel('A'), ArraySubclass)) assert_(isinstance(a.ravel('K'), ArraySubclass)) + @pytest.mark.parametrize("shape", [(3, 224, 224), (8, 512, 512)]) + def test_tobytes_no_copy_fastpath(self, shape): + # Test correctness of non-contiguous paths for `tobytes` + rng = np.random.default_rng(0) + arr = rng.standard_normal(shape, dtype=np.float32) + noncontig = arr.transpose(1, 2, 0) + + # correctness + expected = np.ascontiguousarray(noncontig).tobytes() + got = noncontig.tobytes() + assert got == expected + def test_swapaxes(self): a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy() idx = np.indices(a.shape) @@ -10546,7 +10558,6 @@ def test_getfield(): pytest.raises(ValueError, a.getfield, 'uint8', 16) pytest.raises(ValueError, a.getfield, 'uint64', 0) - class TestViewDtype: """ Verify that making a view of a non-contiguous array works as expected. From 41c06bc51f5848925ad647bd8dfd1bfc7aee4f64 Mon Sep 17 00:00:00 2001 From: Veit Heller Date: Wed, 12 Nov 2025 12:51:00 +0100 Subject: [PATCH 0846/1018] ENH: Detect Fortran vs C order in array_assign_boolean_subscript (#30201) This PR fixes gh-30156 by detecting when to set KEEPORDER order in array_assign_boolean_subscript and adds a benchmark for it. --- benchmarks/benchmarks/bench_indexing.py | 16 ++++++++++++++++ numpy/_core/src/multiarray/mapping.c | 2 ++ 2 files changed, 18 insertions(+) diff --git a/benchmarks/benchmarks/bench_indexing.py b/benchmarks/benchmarks/bench_indexing.py index 81c812e81e19..f1153489f515 100644 --- a/benchmarks/benchmarks/bench_indexing.py +++ b/benchmarks/benchmarks/bench_indexing.py @@ -84,6 +84,22 @@ def time_assign_cast(self, ndim): arr[indx] = val +class BooleanAssignmentOrder(Benchmark): + params = ['C', 'F'] + param_names = ['order'] + + def setup(self, order): + shape = (64, 64, 64) + # emulate gh-30156: boolean assignment into a Fortran/C array + self.base = np.zeros(shape, dtype=np.uint32, order=order) + mask = np.random.RandomState(0).rand(*self.base.shape) > 0.5 + self.mask = mask.copy(order) + self.value = np.uint32(7) + + def time_boolean_assign_scalar(self, order): + self.base[self.mask] = self.value + + class IndexingSeparate(Benchmark): def setup(self): self.tmp_dir = mkdtemp() diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 997f21928db3..d6128f74621a 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -1173,6 +1173,8 @@ array_assign_boolean_subscript(PyArrayObject *self, } else { v_stride = 0; + /* If the same value is repeated, iteration order does not matter */ + order = NPY_KEEPORDER; } v_data = PyArray_DATA(v); From 011385f3a8d52a32bba62fcf2ac380e92083bf0f Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Fri, 10 Oct 2025 16:19:24 -0400 Subject: [PATCH 0847/1018] MAINT: Avoid "fooling deallocator" in array_descr_set Found this caused problems while trying to implement storing the dimensions and strides on the instance. Splitting it out since it is generally bad form to assume too much about the inner workings of, e.g., the deallocator, and it has no real benefits beyond trying to be clever. --- numpy/_core/src/multiarray/getset.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 16876639897c..1aff38476d50 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -500,15 +500,23 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) if (temp == NULL) { return -1; } + /* create new dimensions cache and fill it */ + npy_intp new_nd = PyArray_NDIM(temp); + npy_intp *new_dims = npy_alloc_cache_dim(2 * new_nd); + if (new_dims == NULL) { + Py_DECREF(temp); + PyErr_NoMemory(); + return -1; + } + memcpy(new_dims, PyArray_DIMS(temp), new_nd * sizeof(npy_intp)); + memcpy(new_dims + new_nd, PyArray_STRIDES(temp), new_nd * sizeof(npy_intp)); + /* Update self with new cache */ npy_free_cache_dim_array(self); - ((PyArrayObject_fields *)self)->dimensions = PyArray_DIMS(temp); - ((PyArrayObject_fields *)self)->nd = PyArray_NDIM(temp); - ((PyArrayObject_fields *)self)->strides = PyArray_STRIDES(temp); + ((PyArrayObject_fields *)self)->nd = new_nd; + ((PyArrayObject_fields *)self)->dimensions = new_dims; + ((PyArrayObject_fields *)self)->strides = new_dims + new_nd; newtype = PyArray_DESCR(temp); - Py_INCREF(PyArray_DESCR(temp)); - /* Fool deallocator not to delete these*/ - ((PyArrayObject_fields *)temp)->nd = 0; - ((PyArrayObject_fields *)temp)->dimensions = NULL; + Py_INCREF(newtype); Py_DECREF(temp); } From 930b0c241b53f71458dd949e809c0c04af559cc8 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Fri, 10 Oct 2025 16:53:26 -0400 Subject: [PATCH 0848/1018] MAINT,BUG: create PyArray_Resize_int, which returns int, and use it internally. Also fix the minimum allocation used inside, which should be 1, not elsize. And use the default function to deallocate dimension & strides. --- doc/source/reference/c-api/array.rst | 25 ++++++------ numpy/_core/src/multiarray/methods.c | 8 ++-- numpy/_core/src/multiarray/shape.c | 59 +++++++++++++++++----------- numpy/_core/src/multiarray/shape.h | 6 +++ 4 files changed, 56 insertions(+), 42 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index ebed18de2196..d07a00ebde73 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -2248,19 +2248,18 @@ Shape Manipulation PyArrayObject* self, PyArray_Dims* newshape, int refcheck, \ NPY_ORDER fortran) - Equivalent to :meth:`ndarray.resize` (*self*, *newshape*, refcheck - ``=`` *refcheck*, order= fortran ). This function only works on - single-segment arrays. It changes the shape of *self* inplace and - will reallocate the memory for *self* if *newshape* has a - different total number of elements then the old shape. If - reallocation is necessary, then *self* must own its data, have - *self* - ``>base==NULL``, have *self* - ``>weakrefs==NULL``, and - (unless refcheck is 0) not be referenced by any other array. - The fortran argument can be :c:data:`NPY_ANYORDER`, :c:data:`NPY_CORDER`, - or :c:data:`NPY_FORTRANORDER`. It currently has no effect. Eventually - it could be used to determine how the resize operation should view - the data when constructing a differently-dimensioned array. - Returns None on success and NULL on error. + Equivalent to :meth:`ndarray.resize` (*self*, *newshape*, *refcheck*). + This function only works on single-segment arrays. It changes the shape of + *self* inplace and will reallocate the memory for *self* if *newshape* has + a different total number of elements then the old shape. If reallocation is + necessary, then *self* must own its data, have *self* - ``>base==NULL``, + have *self* - ``>weakrefs==NULL``, and (unless refcheck is 0) not be + referenced by any other array. The fortran argument can be + :c:data:`NPY_ANYORDER`, :c:data:`NPY_CORDER`, or + :c:data:`NPY_FORTRANORDER`. It currently has no effect. Eventually it + could be used to determine how the resize operation should view the data + when constructing a differently-dimensioned array. Returns None on success + and NULL on error. .. c:function:: PyObject* PyArray_Transpose( \ PyArrayObject* self, PyArray_Dims* permute) diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 26b5a6e179e0..6dcc349dcd03 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1175,8 +1175,7 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) Py_ssize_t size = PyTuple_Size(args); int refcheck = 1; PyArray_Dims newshape; - PyObject *ret, *obj; - + PyObject *obj; if (!NpyArg_ParseKeywords(kwds, "|i", kwlist, &refcheck)) { return NULL; @@ -1199,12 +1198,11 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) return NULL; } - ret = PyArray_Resize(self, &newshape, refcheck, NPY_ANYORDER); + int ret = PyArray_Resize_int(self, &newshape, refcheck); npy_free_cache_dim_obj(newshape); - if (ret == NULL) { + if (ret < 0) { return NULL; } - Py_DECREF(ret); Py_RETURN_NONE; } diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index 83de9f19a574..c98dba9abcb2 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -26,15 +26,8 @@ static int _attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims, npy_intp *newstrides, int is_f_order); -/*NUMPY_API - * Resize (reallocate data). Only works if nothing else is referencing this - * array and it is contiguous. If refcheck is 0, then the reference count is - * not checked and assumed to be 1. You still must own this data and have no - * weak-references and no base object. - */ -NPY_NO_EXPORT PyObject * -PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, - NPY_ORDER NPY_UNUSED(order)) +NPY_NO_EXPORT int +PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) { npy_intp oldnbytes, newnbytes; npy_intp oldsize, newsize; @@ -48,7 +41,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, if (!PyArray_ISONESEGMENT(self)) { PyErr_SetString(PyExc_ValueError, "resize only works on single-segment arrays"); - return NULL; + return -1; } /* Compute total size of old and new arrays. The new size might overflow */ @@ -62,10 +55,11 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, if (new_dimensions[k] < 0) { PyErr_SetString(PyExc_ValueError, "negative dimensions not allowed"); - return NULL; + return -1; } if (npy_mul_sizes_with_overflow(&newsize, newsize, new_dimensions[k])) { - return PyErr_NoMemory(); + PyErr_NoMemory(); + return -1; } } @@ -73,14 +67,15 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, elsize = PyArray_ITEMSIZE(self); oldnbytes = oldsize * elsize; if (npy_mul_sizes_with_overflow(&newnbytes, newsize, elsize)) { - return PyErr_NoMemory(); + PyErr_NoMemory(); + return -1; } if (oldnbytes != newnbytes) { if (!(PyArray_FLAGS(self) & NPY_ARRAY_OWNDATA)) { PyErr_SetString(PyExc_ValueError, "cannot resize this array: it does not own its data"); - return NULL; + return -1; } if (PyArray_BASE(self) != NULL @@ -89,14 +84,14 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, "cannot resize an array that " "references or is referenced\n" "by another array in this way. Use the np.resize function."); - return NULL; + return -1; } if (refcheck) { #ifdef PYPY_VERSION PyErr_SetString(PyExc_ValueError, "cannot resize an array with refcheck=True on PyPy.\n" "Use the np.resize function or refcheck=False"); - return NULL; + return -1; #else refcnt = Py_REFCNT(self); #endif /* PYPY_VERSION */ @@ -110,7 +105,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, "references or is referenced\n" "by another array in this way.\n" "Use the np.resize function or refcheck=False"); - return NULL; + return -1; } /* Reallocate space if needed - allocating 0 is forbidden */ @@ -119,24 +114,24 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, /* This can happen if someone arbitrarily sets NPY_ARRAY_OWNDATA */ PyErr_SetString(PyExc_RuntimeError, "no memory handler found but OWNDATA flag set"); - return NULL; + return -1; } if (newnbytes < oldnbytes) { /* Clear now removed data (if dtype has references) */ if (PyArray_ClearBuffer( PyArray_DESCR(self), PyArray_BYTES(self) + newnbytes, elsize, oldsize-newsize, PyArray_ISALIGNED(self)) < 0) { - return NULL; + return -1; } } new_data = PyDataMem_UserRENEW(PyArray_DATA(self), - newnbytes == 0 ? elsize : newnbytes, + newnbytes == 0 ? 1 : newnbytes, handler); if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); - return NULL; + return -1; } ((PyArrayObject_fields *)self)->data = new_data; @@ -148,7 +143,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, int aligned = PyArray_ISALIGNED(self); if (PyArray_ZeroContiguousBuffer(PyArray_DESCR(self), data, stride, size, aligned) < 0) { - return NULL; + return -1; } } } @@ -162,7 +157,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, if (dimptr == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); - return NULL; + return -1; } ((PyArrayObject_fields *)self)->dimensions = dimptr; ((PyArrayObject_fields *)self)->strides = dimptr + new_nd; @@ -175,11 +170,27 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, memmove(PyArray_STRIDES(self), new_strides, new_nd*sizeof(npy_intp)); } else { - PyDimMem_FREE(((PyArrayObject_fields *)self)->dimensions); + npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = 0; ((PyArrayObject_fields *)self)->dimensions = NULL; ((PyArrayObject_fields *)self)->strides = NULL; } + return 0; +} + +/*NUMPY_API + * Resize (reallocate data). Only works if nothing else is referencing this + * array and it is contiguous. If refcheck is 0, then the reference count is + * not checked and assumed to be 1. You still must own this data and have no + * weak-references and no base object. + */ +NPY_NO_EXPORT PyObject * +PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, + NPY_ORDER NPY_UNUSED(order)) +{ + if (PyArray_Resize_int(self, newshape, refcheck) < 0) { + return NULL; + } Py_RETURN_NONE; } diff --git a/numpy/_core/src/multiarray/shape.h b/numpy/_core/src/multiarray/shape.h index a9b91feb0b4a..5e87116f08df 100644 --- a/numpy/_core/src/multiarray/shape.h +++ b/numpy/_core/src/multiarray/shape.h @@ -3,6 +3,12 @@ #include "conversion_utils.h" +/* + * Internal version of PyArray_Resize that returns -1 on error, 0 otherwise. + */ +NPY_NO_EXPORT int +PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck); + /* * Creates a sorted stride perm matching the KEEPORDER behavior * of the NpyIter object. Because this operates based on multiple From c70bf1e7f45d53dce81970ca565a7235a5b445db Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Fri, 10 Oct 2025 16:28:29 -0400 Subject: [PATCH 0849/1018] MAINT: Use PyArray_Resize to grow arrays in FromFile and FromIter Found this while trying to implement storage on the instance, but factored it out since it seems better form to just have one place in numpy where arrays are resized inplace (which ideally would not happen at all, but here is quite logical). For PyArray_FromFile, there is little benefit beyond that, but in PyArray_FromIter, it allows removing quite a bit of code that duplicated what PyArray_Resize does. --- numpy/_core/src/multiarray/ctors.c | 92 ++++++------------- numpy/_core/src/multiarray/textreading/rows.c | 27 ++---- 2 files changed, 36 insertions(+), 83 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 0a66b7fa3be1..3a43e1bd983b 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2390,7 +2390,7 @@ PyArray_FromInterface(PyObject *origin) goto fail; } if (use_scalar_assign) { - /* + /* * NOTE(seberg): I honestly doubt anyone is using this scalar path and we * could probably just deprecate (or just remove it in a 3.0 version). */ @@ -3732,21 +3732,20 @@ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char *sep) } if (((npy_intp) nread) < num) { /* - * Realloc memory for smaller number of elements, use original dtype - * which may have include a subarray (and is used for `nread`). + * Resize array to smaller number of elements. Note that original + * dtype may have included a subarray, so we may not be 1-d. */ - const size_t nsize = PyArray_MAX(nread,1) * dtype->elsize; - char *tmp; - - /* The handler is always valid */ - if((tmp = PyDataMem_UserRENEW(PyArray_DATA(ret), nsize, - PyArray_HANDLER(ret))) == NULL) { + npy_intp dims[NPY_MAXDIMS]; + dims[0] = (npy_intp)nread; + for (int i = 1; i < PyArray_NDIM(ret); i++) { + dims[i] = PyArray_DIMS(ret)[i]; + } + PyArray_Dims new_dims = {dims, PyArray_NDIM(ret)}; + if (PyArray_Resize_int(ret, &new_dims, 0) < 0) { Py_DECREF(dtype); Py_DECREF(ret); - return PyErr_NoMemory(); + return NULL; } - ((PyArrayObject_fields *)ret)->data = tmp; - PyArray_DIMS(ret)[0] = nread; } Py_DECREF(dtype); return (PyObject *)ret; @@ -3998,6 +3997,7 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) PyObject *iter = NULL; PyArrayObject *ret = NULL; npy_intp i, elsize, elcount; + npy_intp dims[NPY_MAXDIMS]; if (dtype == NULL) { return NULL; @@ -4037,6 +4037,9 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) if (ret == NULL) { goto done; } + /* set up for possible resizing */ + memcpy(dims, PyArray_DIMS(ret), PyArray_NDIM(ret)*sizeof(npy_intp)); + PyArray_Dims new_dims = {dims, PyArray_NDIM(ret)}; char *item = PyArray_BYTES(ret); for (i = 0; i < count || count == -1; i++, item += elsize) { @@ -4044,14 +4047,12 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) if (value == NULL) { if (PyErr_Occurred()) { /* Fetching next item failed perhaps due to exhausting iterator */ - goto done; + goto fail; } break; } if (NPY_UNLIKELY(i >= elcount) && elsize != 0) { - char *new_data = NULL; - npy_intp nbytes; /* Grow PyArray_DATA(ret): this is similar for the strategy for PyListObject, but we use @@ -4060,31 +4061,18 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) be suitable to reuse here. */ elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; - if (!npy_mul_sizes_with_overflow(&nbytes, elcount, elsize)) { - /* The handler is always valid */ - new_data = PyDataMem_UserRENEW( - PyArray_BYTES(ret), nbytes, PyArray_HANDLER(ret)); - } - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate array memory"); + dims[0] = elcount; + if (PyArray_Resize_int(ret, &new_dims, 0) < 0) { Py_DECREF(value); - goto done; + goto fail; } - ((PyArrayObject_fields *)ret)->data = new_data; - /* resize array for cleanup: */ - PyArray_DIMS(ret)[0] = elcount; /* Reset `item` pointer to point into realloc'd chunk */ - item = new_data + i * elsize; - if (PyDataType_FLAGCHK(dtype, NPY_NEEDS_INIT)) { - /* Initialize new chunk: */ - memset(item, 0, nbytes - i * elsize); - } + item = ((char *)PyArray_DATA(ret)) + i * elsize; } if (PyArray_Pack(dtype, item, value) < 0) { Py_DECREF(value); - goto done; + goto fail; } Py_DECREF(value); } @@ -4093,46 +4081,22 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) PyErr_Format(PyExc_ValueError, "iterator too short: Expected %zd but iterator had only %zd " "items.", (Py_ssize_t)count, (Py_ssize_t)i); - goto done; + goto fail; } /* * Realloc the data so that don't keep extra memory tied up and fix * the arrays first dimension (there could be more than one). */ - if (i == 0 || elsize == 0) { - /* The size cannot be zero for realloc. */ + dims[0] = i; + if (!PyArray_Resize_int(ret, &new_dims, 0)) { + goto done; } - else { - /* Resize array to actual final size (it may be too large) */ - /* The handler is always valid */ - char *new_data = PyDataMem_UserRENEW( - PyArray_DATA(ret), i * elsize, PyArray_HANDLER(ret)); - - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate array memory"); - goto done; - } - ((PyArrayObject_fields *)ret)->data = new_data; - if (count < 0) { - /* - * If the count was smaller than zero, the strides may be all 0 - * (even in the later dimensions for `count < 0`! - * Thus, fix all strides here again for C-contiguity. - */ - int oflags; - _array_fill_strides( - PyArray_STRIDES(ret), PyArray_DIMS(ret), PyArray_NDIM(ret), - PyArray_ITEMSIZE(ret), NPY_ARRAY_C_CONTIGUOUS, &oflags); - PyArray_STRIDES(ret)[0] = elsize; - assert(oflags & NPY_ARRAY_C_CONTIGUOUS); - } - } - PyArray_DIMS(ret)[0] = i; + fail: + Py_CLEAR(ret); - done: + done: Py_XDECREF(iter); Py_XDECREF(dtype); if (PyErr_Occurred()) { diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 2fa1fb948553..90eea23b22e2 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -118,7 +118,7 @@ create_conv_funcs( if (error) { goto error; } - + return conv_funcs; error: @@ -175,8 +175,6 @@ read_rows(stream *s, npy_intp row_size = out_descr->elsize; PyObject **conv_funcs = NULL; - bool needs_init = PyDataType_FLAGCHK(out_descr, NPY_NEEDS_INIT); - int ndim = homogeneous ? 2 : 1; npy_intp result_shape[2] = {0, 1}; @@ -311,9 +309,6 @@ read_rows(stream *s, if (data_array == NULL) { goto error; } - if (needs_init) { - memset(PyArray_BYTES(data_array), 0, PyArray_NBYTES(data_array)); - } } else { assert(max_rows >=0); @@ -354,22 +349,16 @@ read_rows(stream *s, "providing a maximum number of rows to read may help."); goto error; } - - char *new_data = PyDataMem_UserRENEW( - PyArray_BYTES(data_array), alloc_size ? alloc_size : 1, - PyArray_HANDLER(data_array)); - if (new_data == NULL) { - PyErr_NoMemory(); + /* + * Resize the array. + */ + result_shape[0] = new_rows; + PyArray_Dims new_dims = {dims, 2}; + if (PyArray_Resize(data_array, &new_dims, 0, 0) == NULL) { goto error; } - /* Replace the arrays data since it may have changed */ - ((PyArrayObject_fields *)data_array)->data = new_data; - ((PyArrayObject_fields *)data_array)->dimensions[0] = new_rows; - data_ptr = new_data + row_count * row_size; + data_ptr = PyArray_DATA(data_array) + row_count * row_size; data_allocated_rows = new_rows; - if (needs_init) { - memset(data_ptr, '\0', (new_rows - row_count) * row_size); - } } for (Py_ssize_t i = 0; i < actual_num_fields; ++i) { From 9b8027f0c9e6cc3132d54f133d1bf925435e5250 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 11 Oct 2025 22:07:32 -0400 Subject: [PATCH 0850/1018] MAINT: use PyArray_Resize inside text reader. --- numpy/_core/src/multiarray/textreading/rows.c | 22 +++++++------------ 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 90eea23b22e2..7f3797b58928 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -8,6 +8,7 @@ #include "numpy/npy_3kcompat.h" #include "npy_pycompat.h" #include "alloc.h" +#include "shape.h" // For PyArray_Resize_int #include #include @@ -177,6 +178,7 @@ read_rows(stream *s, int ndim = homogeneous ? 2 : 1; npy_intp result_shape[2] = {0, 1}; + PyArray_Dims new_dims = {result_shape, ndim}; /* for resizing */ bool data_array_allocated = data_array == NULL; /* Make sure we own `data_array` for the purpose of error handling */ @@ -353,11 +355,10 @@ read_rows(stream *s, * Resize the array. */ result_shape[0] = new_rows; - PyArray_Dims new_dims = {dims, 2}; - if (PyArray_Resize(data_array, &new_dims, 0, 0) == NULL) { + if (PyArray_Resize_int(data_array, &new_dims, 0) < 0) { goto error; } - data_ptr = PyArray_DATA(data_array) + row_count * row_size; + data_ptr = (char *)PyArray_DATA(data_array) + row_count * row_size; data_allocated_rows = new_rows; } @@ -463,20 +464,13 @@ read_rows(stream *s, /* * Note that if there is no data, `data_array` may still be NULL and - * row_count is 0. In that case, always realloc just in case. + * row_count is 0. In that case, always resize just in case. */ if (data_array_allocated && data_allocated_rows != row_count) { - size_t size = row_count * row_size; - char *new_data = PyDataMem_UserRENEW( - PyArray_BYTES(data_array), size ? size : 1, - PyArray_HANDLER(data_array)); - if (new_data == NULL) { - Py_DECREF(data_array); - PyErr_NoMemory(); - return NULL; + result_shape[0] = row_count; + if (PyArray_Resize_int(data_array, &new_dims, 0) < 0) { + goto error; } - ((PyArrayObject_fields *)data_array)->data = new_data; - ((PyArrayObject_fields *)data_array)->dimensions[0] = row_count; } /* From fb22f55e6fae12836eab6ec7e34b7c1d8999155e Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Wed, 12 Nov 2025 19:07:11 -0800 Subject: [PATCH 0851/1018] DOC: an Mercurial -> a Mercurial [skip actions][skip azp][skip cirrus] --- numpy/distutils/misc_util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index 09145e1ddf52..ca7bcf0fbdd0 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -2069,7 +2069,7 @@ def make_hg_version_py(self, delete=True): If __hg_version__.py existed before, nothing is done. This is intended for working with source directories that are - in an Mercurial repository. + in a Mercurial repository. """ target = njoin(self.local_path, '__hg_version__.py') revision = self._get_hg_revision(self.local_path) From ae098dd478faa191eac81b42c968a5549962a450 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Nov 2025 11:36:16 -0700 Subject: [PATCH 0852/1018] MAINT: Bump pypa/cibuildwheel from 3.2.1 to 3.3.0 (#30209) Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.2.1 to 3.3.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/9c00cb4f6b517705a3794b22395aedc36257242c...63fd63b352a9a8bdcc24791c9dbee952ee9a8abc) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.3.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 159f6b719fde..c765855825a3 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -34,7 +34,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@9c00cb4f6b517705a3794b22395aedc36257242c # v3.2.1 + - uses: pypa/cibuildwheel@63fd63b352a9a8bdcc24791c9dbee952ee9a8abc # v3.3.0 env: CIBW_PLATFORM: pyodide CIBW_BUILD: cp312-* diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 8049b79edef0..1ceefc2e540d 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -94,7 +94,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@9c00cb4f6b517705a3794b22395aedc36257242c # v3.2.1 + uses: pypa/cibuildwheel@63fd63b352a9a8bdcc24791c9dbee952ee9a8abc # v3.3.0 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From fabf1844667c81344ec0d0ab4958cde9cb9a845c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 13 Nov 2025 19:55:45 +0100 Subject: [PATCH 0853/1018] DOC: Release notes for the runtime signature changes (#30208) --- doc/release/upcoming_changes/30208.highlight.rst | 2 ++ doc/release/upcoming_changes/30208.improvement.rst | 12 ++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 doc/release/upcoming_changes/30208.highlight.rst create mode 100644 doc/release/upcoming_changes/30208.improvement.rst diff --git a/doc/release/upcoming_changes/30208.highlight.rst b/doc/release/upcoming_changes/30208.highlight.rst new file mode 100644 index 000000000000..c13c46c056cc --- /dev/null +++ b/doc/release/upcoming_changes/30208.highlight.rst @@ -0,0 +1,2 @@ +* Runtime signature introspection support has been significantly improved. See the + corresponding improvement note for details. diff --git a/doc/release/upcoming_changes/30208.improvement.rst b/doc/release/upcoming_changes/30208.improvement.rst new file mode 100644 index 000000000000..065e91eca33d --- /dev/null +++ b/doc/release/upcoming_changes/30208.improvement.rst @@ -0,0 +1,12 @@ +Runtime signature introspection support has been significantly improved +----------------------------------------------------------------------- +Many NumPy functions, classes, and methods that previously raised ``ValueError`` when passed +to ``inspect.signature()`` now return meaningful signatures. This improves support for runtime type +checking, IDE autocomplete, documentation generation, and runtime introspection capabilities across +the NumPy API. + +Over three hundred classes and functions have been updated in total, including, but not limited to, +core classes such as `ndarray`, `generic`, `dtype`, `ufunc`, `broadcast`, `nditer`, etc., +most methods of `ndarray` and scalar types, array constructor functions (`array`, `empty`, +`arange`, `fromiter`, etc.), and many other commonly used functions, including `dot`, `concat`, +`where`, `bincount`, `can_cast`, and numerous others. From 7c8625e6d56f6938529346b6d24dc34a7d9f5446 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 13 Nov 2025 20:24:56 +0100 Subject: [PATCH 0854/1018] Move test to Python --- numpy/__init__.py | 17 ++++++ numpy/_core/src/common/blas_utils.c | 52 +++---------------- numpy/_core/src/common/blas_utils.h | 9 ++-- numpy/_core/src/multiarray/multiarraymodule.c | 23 ++++---- numpy/testing/_private/utils.py | 2 +- 5 files changed, 41 insertions(+), 62 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index a0178b211258..ef7c1ed7678a 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -870,6 +870,23 @@ def _mac_os_check(): del w del _mac_os_check + def blas_fpe_check(): + # Check if BLAS adds spurious FPEs, mostly seen on M4 arms with Accelerate. + with errstate(all='raise'): + x = ones((20, 20)) + try: + x @ x + except FloatingPointError: + res = _core._multiarray_umath._blas_supports_fpe(False) + if res: # res was not modified (hardcoded to True for now) + warnings.warn( + "Spurious warnings given by blas but suppression not " + "set up on this platform. Please open a NumPy issue.", + UserWarning, stacklevel=2) + + blas_fpe_check() + del blas_fpe_check + def hugepage_setup(): """ We usually use madvise hugepages support, but on some old kernels it diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c index 365289067b88..cbf8e0dc05c5 100644 --- a/numpy/_core/src/common/blas_utils.c +++ b/numpy/_core/src/common/blas_utils.c @@ -1,5 +1,3 @@ -#include - #include "numpy/npy_math.h" // npy_get_floatstatus_barrier #include "numpy/numpyconfig.h" // NPY_VISIBILITY_HIDDEN #include "blas_utils.h" @@ -17,46 +15,7 @@ /* * Static variable to cache runtime check of BLAS FPE support. */ - static bool blas_supports_fpe = true; - -/* - * ARM Scalable Matrix Extension (SME) raises all floating-point error flags - * when it's used regardless of values or operations. As a consequence, - * when SME is used, all FPE state is lost and special handling is needed. - * - * For NumPy, SME is not currently used directly, but can be used via - * BLAS / LAPACK libraries. This function does a runtime check for whether - * BLAS / LAPACK can use SME and special handling around FPE is required. - * - * This may be an Accelerate bug (at least OpenBLAS consider it that way) - * but when we find an ARM system with SVE we do a runtime check for whether - * FPEs are spuriously given. - */ -static inline int -set_BLAS_causes_spurious_FPEs(void) -{ - // These are all small, so just work on stack to not worry about error - // handling. - double *x = PyMem_Calloc(20 * 20 * 3, sizeof(double)); - if (x == NULL) { - PyErr_NoMemory(); - return -1; - } - double *y = x + 20 * 20; - double *res = y + 20 * 20; - - npy_clear_floatstatus_barrier((char *)x); - - CBLAS_FUNC(cblas_dgemm)( - CblasRowMajor, CblasNoTrans, CblasNoTrans, 20, 20, 20, 1., - x, 20, y, 20, 0., res, 20); - PyMem_Free(x); - - int fpe_status = npy_get_floatstatus_barrier((char *)x); - // Entries were all zero, so we shouldn't see any FPEs - blas_supports_fpe = fpe_status != 0; - return 0; -} +static bool blas_supports_fpe = true; #endif // NPY_BLAS_CHECK_FPE_SUPPORT @@ -71,13 +30,14 @@ npy_blas_supports_fpe(void) #endif } -NPY_VISIBILITY_HIDDEN int -npy_blas_init(void) +NPY_VISIBILITY_HIDDEN bool +npy_set_blas_supports_fpe(bool value) { #if NPY_BLAS_CHECK_FPE_SUPPORT - return set_BLAS_causes_spurious_FPEs(); + blas_supports_fpe = (bool)value; + return blas_supports_fpe; #endif - return 0; + return true; // ignore input not set up on this platform } NPY_VISIBILITY_HIDDEN int diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h index 3f5bb735281d..840c4940a87e 100644 --- a/numpy/_core/src/common/blas_utils.h +++ b/numpy/_core/src/common/blas_utils.h @@ -15,11 +15,6 @@ #define NPY_BLAS_CHECK_FPE_SUPPORT 0 #endif -/* Initialize BLAS environment, if needed - */ -NPY_VISIBILITY_HIDDEN int -npy_blas_init(void); - /* Runtime check if BLAS supports floating-point errors. * true - BLAS supports FPE and one can rely on them to indicate errors * false - BLAS does not support FPE. Special handling needed for FPE state @@ -27,6 +22,10 @@ npy_blas_init(void); NPY_VISIBILITY_HIDDEN bool npy_blas_supports_fpe(void); +/* Allow setting the BLAS FPE flag from Python.*/ +NPY_VISIBILITY_HIDDEN bool +npy_set_blas_supports_fpe(bool value); + /* If BLAS supports FPE, exactly the same as npy_get_floatstatus_barrier(). * Otherwise, we can't rely on FPE state and need special handling. */ diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 44ac8a678bbb..73ef0760d979 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4429,7 +4429,6 @@ _populate_finfo_constants(PyObject *NPY_UNUSED(self), PyObject *args) } - static PyObject * _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) { @@ -4449,12 +4448,20 @@ _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) static PyObject * -_blas_supports_fpe(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { - if (npy_blas_supports_fpe()) { - Py_RETURN_TRUE; +_blas_supports_fpe(PyObject *NPY_UNUSED(self), PyObject *arg) { + if (arg == Py_None) { + return PyBool_FromLong(npy_blas_supports_fpe()); + } + else if (arg == Py_True) { + return PyBool_FromLong(npy_set_blas_supports_fpe(true)); + } + else if (arg == Py_False) { + return PyBool_FromLong(npy_set_blas_supports_fpe(false)); } else { - Py_RETURN_FALSE; + PyErr_SetString(PyExc_TypeError, + "BLAS FPE support must be None, True, or False"); + return NULL; } } @@ -4700,7 +4707,7 @@ static struct PyMethodDef array_module_methods[] = { {"_set_madvise_hugepage", (PyCFunction)_set_madvise_hugepage, METH_O, NULL}, {"_blas_supports_fpe", (PyCFunction)_blas_supports_fpe, - METH_NOARGS, NULL}, + METH_O, "BLAS FPE support pass None, True, or False and returns new value"}, {"_reload_guard", (PyCFunction)_reload_guard, METH_NOARGS, "Give a warning on reload and big warning in sub-interpreters."}, @@ -4917,10 +4924,6 @@ _multiarray_umath_exec(PyObject *m) { return -1; } - if (npy_blas_init() < 0) { - return -1; - } - #if defined(MS_WIN64) && defined(__GNUC__) PyErr_WarnEx(PyExc_Warning, "Numpy built with MINGW-W64 on Windows 64 bits is experimental, " \ diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 967d67e14a13..ed928a5ec7b4 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -90,7 +90,7 @@ class KnownFailureException(Exception): IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON -BLAS_SUPPORTS_FPE = np._core._multiarray_umath._blas_supports_fpe() +BLAS_SUPPORTS_FPE = np._core._multiarray_umath._blas_supports_fpe(None) HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 From 96fc947fdecddf33bb21fec32ac94adfb561d03f Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 14 Nov 2025 10:43:24 +0200 Subject: [PATCH 0855/1018] DOC: fix links for newly rebuilt numpy-tutorials site [skip azp][skip cirrus][skip actions] --- doc/source/user/how-to-how-to.rst | 2 +- doc/source/user/quickstart.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user/how-to-how-to.rst b/doc/source/user/how-to-how-to.rst index 91a5c2afe09b..07640f881141 100644 --- a/doc/source/user/how-to-how-to.rst +++ b/doc/source/user/how-to-how-to.rst @@ -105,7 +105,7 @@ deep dives intended to give understanding rather than immediate assistance, and *References*, which give complete, authoritative data on some concrete part of NumPy (like its API) but aren't obligated to paint a broader picture. -For more on tutorials, see :doc:`numpy-tutorials:content/tutorial-style-guide` +For more on tutorials, see :doc:`numpy-tutorials:tutorial-style-guide` ****************************************************************************** Is this page an example of a how-to? diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 3f97f005898b..ef4a0467c706 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -1479,4 +1479,4 @@ Further reading - `SciPy Tutorial `__ - `SciPy Lecture Notes `__ - A `matlab, R, IDL, NumPy/SciPy dictionary `__ -- :doc:`tutorial-svd ` +- :doc:`tutorial-svd ` From 7e17ceb3384facaad9cc7d3ba93ec995e9c9219a Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 14 Nov 2025 10:58:26 +0100 Subject: [PATCH 0856/1018] Report FPE ignoring in matmul in `show_runtime()` --- numpy/lib/_utils_impl.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/lib/_utils_impl.py b/numpy/lib/_utils_impl.py index 2e1ee23d7d58..164aa4ee3d8c 100644 --- a/numpy/lib/_utils_impl.py +++ b/numpy/lib/_utils_impl.py @@ -61,6 +61,11 @@ def show_runtime(): "not_found": features_not_found } }) + config_found.append({ + "ignore_floating_point_errors_in_matmul": + not np._core._multiarray_umath._blas_supports_fpe(None), + }) + try: from threadpoolctl import threadpool_info config_found.extend(threadpool_info()) From c3e5a2039558e7bc2c17982dc5bdbba2611f7326 Mon Sep 17 00:00:00 2001 From: yasiribmcon <107119183+yasiribmcon@users.noreply.github.com> Date: Fri, 14 Nov 2025 22:41:26 +0530 Subject: [PATCH 0857/1018] BUG: Fix build on s390x with clang (#30214) For clang 20+ (_VEC_ >= 10305), fallback check relied on macro definitions for vector operations, but clang provides regular functions instead. This caused the fallback to redefine them as macros, overriding the actual vector operation logic. Signed-off-by: Yasir Ashfaq --- numpy/_core/src/common/simd/vec/utils.h | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/numpy/_core/src/common/simd/vec/utils.h b/numpy/_core/src/common/simd/vec/utils.h index f8b28cfebd8c..7e4a7b8de8fa 100644 --- a/numpy/_core/src/common/simd/vec/utils.h +++ b/numpy/_core/src/common/simd/vec/utils.h @@ -25,14 +25,16 @@ #ifndef vec_neg #define vec_neg(a) (-(a)) #endif - #ifndef vec_and - #define vec_and(a, b) ((a) & (b)) // Vector AND - #endif - #ifndef vec_or - #define vec_or(a, b) ((a) | (b)) // Vector OR - #endif - #ifndef vec_xor - #define vec_xor(a, b) ((a) ^ (b)) // Vector XOR + #if !(defined(__clang__) && __VEC__ >= 10305) + #ifndef vec_and + #define vec_and(a, b) ((a) & (b)) // Vector AND + #endif + #ifndef vec_or + #define vec_or(a, b) ((a) | (b)) // Vector OR + #endif + #ifndef vec_xor + #define vec_xor(a, b) ((a) ^ (b)) // Vector XOR + #endif #endif #ifndef vec_sl #define vec_sl(a, b) ((a) << (b)) // Vector Shift Left From c83c349d20ca009743b72d1cf4618a89d3c7dfdb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Nov 2025 11:36:11 -0700 Subject: [PATCH 0858/1018] MAINT: Bump github/codeql-action from 4.31.2 to 4.31.3 (#30219) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.2 to 4.31.3. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/0499de31b99561a6d14a36a5f662c2a54f91beee...014f16e7ab1402f30e7c3329d33797e7948572db) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 45b88b851312..1546cdc10adf 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 + uses: github/codeql-action/init@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 + uses: github/codeql-action/autobuild@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 + uses: github/codeql-action/analyze@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 0a65e932b12d..227f2f3788b0 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # v2.1.27 + uses: github/codeql-action/upload-sarif@014f16e7ab1402f30e7c3329d33797e7948572db # v2.1.27 with: sarif_file: results.sarif From 73af2357f38fdef50b1e2919fa2bfa3c012edce2 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 15 Nov 2025 04:58:51 +0100 Subject: [PATCH 0859/1018] STY: fix ``ma.MaskedAArray.tolist`` docstring indentation --- numpy/ma/core.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index a374410209f5..9b27d90852e7 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -6653,14 +6653,14 @@ def filled(self, fill_value=None): def tolist(self): """ - Transforms the mvoid object into a tuple. + Transforms the mvoid object into a tuple. - Masked fields are replaced by None. + Masked fields are replaced by None. - Returns - ------- - returned_tuple - Tuple of fields + Returns + ------- + returned_tuple + Tuple of fields """ _mask = self._mask if _mask is nomask: From 106a7c7051989c4416ef871f7159c8dbec7649a3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 15 Nov 2025 06:29:12 +0100 Subject: [PATCH 0860/1018] TYP: stub ``ma.core.get_masked_subclass`` --- numpy/ma/core.pyi | 3 +++ 1 file changed, 3 insertions(+) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 15e6b58762b4..31d66fff0ab8 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -574,6 +574,9 @@ def fix_invalid( fill_value: _ScalarLike_co | None = None, ) -> _MaskedArray[Incomplete]: ... +# +def get_masked_subclass(*arrays: object) -> type[MaskedArray]: ... + # @overload def getdata(a: np.ndarray[_ShapeT, _DTypeT], subok: bool = True) -> np.ndarray[_ShapeT, _DTypeT]: ... From cc0f02c7346e3dcc7fdd1395ab0c3f2e5dc9a05b Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sat, 15 Nov 2025 18:49:12 +1100 Subject: [PATCH 0861/1018] CI: fixes https://github.com/numpy/numpy/security/code-scanning/364 --- .github/workflows/emscripten.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index c765855825a3..0d1936b55b60 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -10,6 +10,9 @@ on: - '**.md' - '**.rst' +permissions: + contents: read # to fetch code (actions/checkout) + env: FORCE_COLOR: 3 @@ -20,8 +23,6 @@ concurrency: jobs: build-wasm-emscripten: - permissions: - contents: read # to fetch code (actions/checkout) name: Pyodide test runs-on: ubuntu-22.04 # To enable this workflow on a fork, comment out: From 467897cd5eee996f8552c7d5ab10823242350954 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 16 Nov 2025 07:06:25 +0100 Subject: [PATCH 0862/1018] Update numpy/_core/src/common/blas_utils.h Co-authored-by: Matti Picus --- numpy/_core/src/common/blas_utils.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h index 840c4940a87e..79d1e5ce274c 100644 --- a/numpy/_core/src/common/blas_utils.h +++ b/numpy/_core/src/common/blas_utils.h @@ -9,7 +9,7 @@ * quite clear. * This checks always on all ARM (it is a small check overall). */ -#if defined(__aarch64__) && defined(HAVE_CBLAS) +#if defined(__APPLE__) && defined(__aarch64__) && defined(HAVE_CBLAS) #define NPY_BLAS_CHECK_FPE_SUPPORT 1 #else #define NPY_BLAS_CHECK_FPE_SUPPORT 0 From 42d1a0d6832e6cf52420da71cee133584d0bc049 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 16 Nov 2025 12:33:25 +0100 Subject: [PATCH 0863/1018] MAINT: undo change to `fromstring` text signature for 2.4.0 This reverts a very small part of gh-30147, because it broken Pythran (see pythran#2368). While a new Pythran 0.18.1 release is available, that doesn't help for the SciPy 1.15.x releases which allow numpy<2.5 and pythran<0.18, see https://github.com/scipy/scipy/blob/maintenance/1.15.x/pyproject.toml#L30-L41 So introducing this signature change only for 2.5.0 is much better. In addition, doing so doesn't leave us only a very narrow window with compatible NumPy and Pythran releases which would otherwise bite distro packagers as well. --- numpy/_core/_add_newdocs.py | 4 +--- numpy/_core/tests/test_multiarray.py | 1 - 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index eded752b2721..6b937389defe 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1482,11 +1482,9 @@ """) +# Signature can be updated for 2.5.0 release, see gh-30235 for details add_newdoc('numpy._core.multiarray', 'fromstring', """ - fromstring(string, dtype=None, count=-1, *, sep, like=None) - -- - fromstring(string, dtype=float, count=-1, *, sep, like=None) A new 1-D array initialized from text data in a string. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 051a81d99d1f..2cba4eb3ec75 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10982,7 +10982,6 @@ def test_c_func_dispatcher_signature(self, func): (np.fromfile, ("file", "dtype", "count", "sep", "offset", "like")), (np.fromiter, ("iter", "dtype", "count", "like")), (np.frompyfunc, ("func", "nin", "nout", "kwargs")), - (np.fromstring, ("string", "dtype", "count", "sep", "like")), (np.nested_iters, ( "op", "axes", "flags", "op_flags", "op_dtypes", "order", "casting", "buffersize", From e468d88310a65c16ba9181b06462f78400e80257 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Mon, 17 Nov 2025 00:17:33 +1100 Subject: [PATCH 0864/1018] CI: remove (mainly windows) jobs from Azure pipelines (#30222) --- .github/workflows/linux.yml | 4 +++ .github/workflows/windows.yml | 62 +++++++++++++++++++++++++++++++++-- azure-pipelines.yml | 52 +++++------------------------ 3 files changed, 73 insertions(+), 45 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 74edb50d9273..bbca13925342 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -53,6 +53,10 @@ jobs: BASE_REF: ${{ github.base_ref }} run: python tools/linter.py + - name: Check Python.h is first file included + run: | + python tools/check_python_h_first.py + smoke_test: # To enable this job on a fork, comment out: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 96c175c07725..3302e054d438 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -19,7 +19,7 @@ permissions: jobs: clangcl_python64bit_openblas32: - name: x86-64, LP64 OpenBLAS + name: Clang-cl, x86-64, fast, openblas32 runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' @@ -66,8 +66,10 @@ jobs: run: | spin test -- --timeout=600 --durations=10 + + #======================================================================================= msvc_python32bit_no_openblas: - name: MSVC, ${{ matrix.architecture }} Python , no BLAS + name: MSVC, ${{ matrix.architecture }}, fast, no BLAS runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -117,3 +119,59 @@ jobs: run: | cd tools python -m pytest --pyargs numpy -m "not slow" -n2 --timeout=600 --durations=10 + + #======================================================================================= + msvc_python64bit_openblas: + name: MSVC, x86_64, ${{ matrix.TEST_MODE }}, openblas${{ matrix.BLAS }} + runs-on: windows-2022 + strategy: + fail-fast: false + matrix: + include: + - BLAS: 64 + TEST_MODE: full + pyver: '3.14' + - BLAS: 32 + TEST_MODE: fast + pyver: '3.11' + + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' + steps: + - name: Checkout + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - name: Setup Python + uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + with: + python-version: ${{ matrix.pyver }} + + - name: pkg-config + run: | + choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + + - name: Dependencies + run: | + python -m pip install -r requirements/test_requirements.txt + + - name: Build and install + run: | + pip install -r requirements/ci_requirements.txt + spin config-openblas --with-scipy-openblas=${{ matrix.BLAS }} + $env:PKG_CONFIG_PATH="$pwd/.openblas" + python -m pip install . -v -Csetup-args="--vsenv" -Csetup-args="-Dallow-noblas=false" + + - name: Run test suite ${{ matrix.TEST_MODE }} + run: | + cd tools + # Get a gfortran onto the path for f2py tests + $env:PATH = "c:\\rtools45\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" + If ( "${{ matrix.TEST_MODE }}" -eq "full" ) { + python -m pytest --pyargs numpy -rsx -n2 --durations=10 + } else { + python -m pytest --pyargs numpy -m "not slow" -n2 --timeout=600 --durations=10 -rsx + } diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 89dee2267352..11dab7f42c6b 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -37,29 +37,6 @@ stages: dependsOn: Check jobs: - - job: Lint - condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest')) - pool: - vmImage: 'ubuntu-22.04' - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: '3.11' - addToPath: true - architecture: 'x64' - - script: >- - python -m pip install -r requirements/linter_requirements.txt - displayName: 'Install tools' - # pip 21.1 emits a pile of garbage messages to annoy users :) - # failOnStderr: true - - script: | - python tools/linter.py - displayName: 'Run Lint Checks' - failOnStderr: true - - script: | - python tools/check_python_h_first.py - displayName: 'Check Python.h is first file included' - - job: Linux_Python_311_32bit_full_with_asserts pool: vmImage: 'ubuntu-22.04' @@ -75,24 +52,13 @@ stages: /bin/bash -xc "source /numpy/tools/ci/run_32_bit_linux_docker.sh" displayName: 'Run 32-bit manylinux2014 Docker Build / Tests' - - job: Windows - timeoutInMinutes: 120 - pool: - vmImage: 'windows-2022' - strategy: - maxParallel: 3 - matrix: - Python311-64bit-fast: - PYTHON_VERSION: '3.11' - PYTHON_ARCH: 'x64' - TEST_MODE: fast - BITS: 64 - Python312-64bit-full: - PYTHON_VERSION: '3.12' - PYTHON_ARCH: 'x64' - TEST_MODE: full - BITS: 64 - _USE_BLAS_ILP64: '1' +# - job: Windows +# timeoutInMinutes: 120 +# pool: +# vmImage: 'windows-2022' +# strategy: +# maxParallel: 3 +# matrix: # TODO pypy: uncomment when pypy3.11 comes out # PyPy311-64bit-fast: # PYTHON_VERSION: 'pypy3.11' @@ -101,5 +67,5 @@ stages: # BITS: 64 # _USE_BLAS_ILP64: '1' - steps: - - template: azure-steps-windows.yml +# steps: +# - template: azure-steps-windows.yml From 13ce032f519ef88be58a9523d5a84112765603cb Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sun, 16 Nov 2025 14:41:21 -0800 Subject: [PATCH 0865/1018] DOC: Correct an equation error in numpy.random.Generator.pareto --- numpy/random/_generator.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index bace56b59422..e19c3919b49e 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -2083,7 +2083,7 @@ cdef class Generator: ----- The probability density for the Pareto II distribution is - .. math:: p(x) = \\frac{a}{{x+1}^{a+1}} , x \ge 0 + .. math:: p(x) = \\frac{a}{{(x+1)}^{a+1}} , x \ge 0 where :math:`a > 0` is the shape. From 952843f526556f59707a2f690963d79d45c276be Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sun, 16 Nov 2025 14:43:13 -0800 Subject: [PATCH 0866/1018] DOC: Add labels to save CI resources [skip actions][skip azp][skip cirrus] From 54b69fb71a8d4bbad53f9d55fcd49f779cb3fbf3 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 16 Nov 2025 16:57:21 -0700 Subject: [PATCH 0867/1018] MAINT: Update main after 2.3.5 release. - Forwardport 2.3.5-changelog.rst - Forwardport 2.3.5-notes.rst - Update .mailmap - Update release.rst [skip azp] [skip cirrus] [skip actions] --- .mailmap | 2 ++ doc/changelog/2.3.5-changelog.rst | 40 ++++++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.3.5-notes.rst | 50 ++++++++++++++++++++++++++++++ 4 files changed, 93 insertions(+) create mode 100644 doc/changelog/2.3.5-changelog.rst create mode 100644 doc/source/release/2.3.5-notes.rst diff --git a/.mailmap b/.mailmap index ddadaf22a62f..18cfb272618f 100644 --- a/.mailmap +++ b/.mailmap @@ -752,6 +752,8 @@ Yash Pethe Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra Yashasvi Misra <54177363+yashasvimisra2798@users.noreply.github.com> +Yasir Ashfaq +Yasir Ashfaq <107119183+yasiribmcon@users.noreply.github.com> Yichi Zhang Yogesh Raisinghani <46864533+raisinghanii@users.noreply.github.com> Younes Sandi diff --git a/doc/changelog/2.3.5-changelog.rst b/doc/changelog/2.3.5-changelog.rst new file mode 100644 index 000000000000..123e1e9d0453 --- /dev/null +++ b/doc/changelog/2.3.5-changelog.rst @@ -0,0 +1,40 @@ + +Contributors +============ + +A total of 10 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aaron Kollasch + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Rafael Laboissière + +* Sayed Awad +* Sebastian Berg +* Warren Weckesser +* Yasir Ashfaq + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29979 `__: MAINT: Prepare 2.3.x for further development +* `#30026 `__: SIMD, BLD: Backport FPMATH mode on x86-32 and filter successor... +* `#30029 `__: MAINT: Backport write_release.py +* `#30041 `__: TYP: Various typing updates +* `#30059 `__: BUG: Fix np.strings.slice if stop=None or start and stop >= len... +* `#30063 `__: BUG: Fix np.strings.slice if start > stop +* `#30076 `__: BUG: avoid negating INT_MIN in PyArray_Round implementation (#30071) +* `#30090 `__: BUG: Fix resize when it contains references (#29970) +* `#30129 `__: BLD: update scipy-openblas, use -Dpkg_config_path (#30049) +* `#30130 `__: BUG: Avoid compilation error of wrapper file generated with SWIG... +* `#30157 `__: BLD: use scipy-openblas 0.3.30.7 (#30132) +* `#30158 `__: DOC: Remove nonexistent ``order`` parameter docs of ``ma.asanyarray``... +* `#30185 `__: BUG: Fix check of PyMem_Calloc return value. (#30176) +* `#30217 `__: DOC: fix links for newly rebuilt numpy-tutorials site +* `#30218 `__: BUG: Fix build on s390x with clang (#30214) +* `#30237 `__: ENH: Make FPE blas check a runtime check for all apple arm systems + diff --git a/doc/source/release.rst b/doc/source/release.rst index 6f1c1a22f11d..190d0512faeb 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.4.0 + 2.3.5 2.3.4 2.3.3 2.3.2 diff --git a/doc/source/release/2.3.5-notes.rst b/doc/source/release/2.3.5-notes.rst new file mode 100644 index 000000000000..8013ef468055 --- /dev/null +++ b/doc/source/release/2.3.5-notes.rst @@ -0,0 +1,50 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.5 Release Notes +========================= + +The NumPy 2.3.5 release is a patch release split between a number of maintenance +updates and bug fixes. This release supports Python versions 3.11-3.14. + + +Contributors +============ + +A total of 10 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aaron Kollasch + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Rafael Laboissière + +* Sayed Awad +* Sebastian Berg +* Warren Weckesser +* Yasir Ashfaq + + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29979 `__: MAINT: Prepare 2.3.x for further development +* `#30026 `__: SIMD, BLD: Backport FPMATH mode on x86-32 and filter successor... +* `#30029 `__: MAINT: Backport write_release.py +* `#30041 `__: TYP: Various typing updates +* `#30059 `__: BUG: Fix np.strings.slice if stop=None or start and stop >= len... +* `#30063 `__: BUG: Fix np.strings.slice if start > stop +* `#30076 `__: BUG: avoid negating INT_MIN in PyArray_Round implementation (#30071) +* `#30090 `__: BUG: Fix resize when it contains references (#29970) +* `#30129 `__: BLD: update scipy-openblas, use -Dpkg_config_path (#30049) +* `#30130 `__: BUG: Avoid compilation error of wrapper file generated with SWIG... +* `#30157 `__: BLD: use scipy-openblas 0.3.30.7 (#30132) +* `#30158 `__: DOC: Remove nonexistent ``order`` parameter docs of ``ma.asanyarray``... +* `#30185 `__: BUG: Fix check of PyMem_Calloc return value. (#30176) +* `#30217 `__: DOC: fix links for newly rebuilt numpy-tutorials site +* `#30218 `__: BUG: Fix build on s390x with clang (#30214) +* `#30237 `__: ENH: Make FPE blas check a runtime check for all apple arm systems + From 697def6914200d61a5b64f1d7310488fcf082d78 Mon Sep 17 00:00:00 2001 From: Johnnie Gray Date: Sun, 16 Nov 2025 18:39:51 -0800 Subject: [PATCH 0868/1018] fix parsing --- numpy/_core/einsumfunc.py | 61 +++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 31 deletions(-) diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index dd5c65bc8307..2e38e0cb52c6 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -988,15 +988,11 @@ def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): if len(b_term) != len(shape_b): raise ValueError(f"Term '{b_term}' does not match shape {shape_b}.") - bat_inds = [] # appears on A, B, O - con_inds = [] # appears on A, B, . - a_keep = [] # appears on A, ., O - b_keep = [] # appears on ., B, O sizes = {} singletons = set() - # parse left term - seen = set() + # parse left term to unique indices with size > 1 + left = dict() for ix, d in zip(a_term, shape_a): if d == 1: # everything (including broadcasting) works nicely if simply ignore @@ -1004,47 +1000,50 @@ def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): # and thus should be reintroduced later singletons.add(ix) continue - - # set or check size if sizes.setdefault(ix, d) != d: + # set or check size raise ValueError( f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." ) + left[ix] = True - if ix in seen: - continue - seen.add(ix) - - if ix in b_term: - if ix in out: - bat_inds.append(ix) - else: - con_inds.append(ix) - elif ix in out: - a_keep.append(ix) - - # parse right term - seen.clear() + # parse right term to unique indices with size > 1 + right = dict() for ix, d in zip(b_term, shape_b): + # broadcast indices (size 1 on one input and size != 1 + # on the other) should not be treated as singletons if d == 1: - singletons.add(ix) + if ix not in left: + singletons.add(ix) continue - # broadcast indices don't appear as singletons in output singletons.discard(ix) - # set or check size if sizes.setdefault(ix, d) != d: + # set or check size raise ValueError( f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." ) + right[ix] = True - if ix in seen: - continue - seen.add(ix) - - if ix not in a_term: + # now we classify the unique size > 1 indices only + bat_inds = [] # appears on A, B, O + con_inds = [] # appears on A, B, . + a_keep = [] # appears on A, ., O + b_keep = [] # appears on ., B, O + # other indices (appearing on A or B only) will + # be summed or traced out prior to the matmul + for ix in left: + if right.pop(ix, False): if ix in out: - b_keep.append(ix) + bat_inds.append(ix) + else: + con_inds.append(ix) + elif ix in out: + a_keep.append(ix) + # now only indices unique to right remain + for ix in right: + if ix in out: + b_keep.append(ix) if not con_inds: # contraction is pure multiplication, prepare inputs differently From b0aaffba0571c28427ad988a3df90404b3ce71bb Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sun, 16 Nov 2025 18:48:58 -0800 Subject: [PATCH 0869/1018] Update _generator.pyx [skip actions][skip azp][skip cirrus] Co-authored-by: Joren Hammudoglu --- numpy/random/_generator.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index e19c3919b49e..ed9b6a3d12a8 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -2083,7 +2083,7 @@ cdef class Generator: ----- The probability density for the Pareto II distribution is - .. math:: p(x) = \\frac{a}{{(x+1)}^{a+1}} , x \ge 0 + .. math:: p(x) = \\frac{a}{(x+1)^{a+1}} , x \ge 0 where :math:`a > 0` is the shape. From 59ae764e3391226952a0d6d6bd4eef7da751316f Mon Sep 17 00:00:00 2001 From: Johnnie Gray Date: Sun, 16 Nov 2025 18:54:24 -0800 Subject: [PATCH 0870/1018] test_einsum, add singleton broadcasting tests --- numpy/_core/tests/test_einsum.py | 35 ++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 6403d47034d7..46660e59792b 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -1119,6 +1119,41 @@ def test_output_order(self): tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt) assert_(tmp.flags.c_contiguous) + def test_singleton_broadcasting(self): + eq = "ijp,ipq,ikq->ijk" + shapes = (3, 1, 1), (3, 1, 3), (1, 3, 3) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "jhcabhijaci,dfijejgh->fgje" + shapes = ( + (1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1), + (3, 1, 3, 1, 1, 1, 1, 2), + ) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "baegffahgc,hdggeff->dhg" + shapes = ((2, 1, 4, 1, 1, 1, 1, 2, 1, 1), (1, 1, 1, 1, 4, 1, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "cehgbaifff,fhhdegih->cdghbi" + shapes = ((1, 1, 1, 1, 1, 1, 1, 1, 1, 1), (2, 1, 1, 2, 4, 1, 1, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "gah,cdbcghefg->ef" + shapes = ((2, 3, 1), (1, 3, 1, 1, 1, 2, 1, 4, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq ="'cacc,bcb->" + shapes = ((1, 1, 1, 1), (1, 4, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + class TestEinsumPath: def build_operands(self, string, size_dict=global_size_dict): From 72bce0bc18d6bb1796de5328ff1935d1e871ad60 Mon Sep 17 00:00:00 2001 From: Johnnie Gray Date: Sun, 16 Nov 2025 19:07:59 -0800 Subject: [PATCH 0871/1018] fix stray character --- numpy/_core/tests/test_einsum.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 46660e59792b..619db6f02897 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -1148,7 +1148,7 @@ def test_singleton_broadcasting(self): arrays = [np.random.rand(*shape) for shape in shapes] self.optimize_compare(eq, operands=arrays) - eq ="'cacc,bcb->" + eq = "cacc,bcb->" shapes = ((1, 1, 1, 1), (1, 4, 1)) arrays = [np.random.rand(*shape) for shape in shapes] self.optimize_compare(eq, operands=arrays) From 0a0bcdb4b3fc2bccd9724bc1f8431e20870e385c Mon Sep 17 00:00:00 2001 From: Johnnie Gray Date: Sun, 16 Nov 2025 21:28:38 -0800 Subject: [PATCH 0872/1018] address review comments --- numpy/_core/einsumfunc.py | 8 ++++---- numpy/_core/tests/test_einsum.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 2e38e0cb52c6..9461994f5795 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -992,7 +992,7 @@ def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): singletons = set() # parse left term to unique indices with size > 1 - left = dict() + left = {} for ix, d in zip(a_term, shape_a): if d == 1: # everything (including broadcasting) works nicely if simply ignore @@ -1001,14 +1001,14 @@ def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): singletons.add(ix) continue if sizes.setdefault(ix, d) != d: - # set or check size + # set and check size raise ValueError( f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." ) left[ix] = True # parse right term to unique indices with size > 1 - right = dict() + right = {} for ix, d in zip(b_term, shape_b): # broadcast indices (size 1 on one input and size != 1 # on the other) should not be treated as singletons @@ -1019,7 +1019,7 @@ def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): singletons.discard(ix) if sizes.setdefault(ix, d) != d: - # set or check size + # set and check size raise ValueError( f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." ) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 619db6f02897..375ef03c1dd7 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -1121,7 +1121,7 @@ def test_output_order(self): def test_singleton_broadcasting(self): eq = "ijp,ipq,ikq->ijk" - shapes = (3, 1, 1), (3, 1, 3), (1, 3, 3) + shapes = ((3, 1, 1), (3, 1, 3), (1, 3, 3)) arrays = [np.random.rand(*shape) for shape in shapes] self.optimize_compare(eq, operands=arrays) From 02c8c3d4e8cf1987d8121e80828fda420b97ad1d Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Mon, 17 Nov 2025 21:04:36 +1100 Subject: [PATCH 0873/1018] CI: removes azure pipelines (#30232) --- .github/workflows/linux.yml | 44 ++++++++++++++++++ azure-pipelines.yml | 71 ----------------------------- azure-steps-windows.yml | 55 ---------------------- tools/ci/run_32_bit_linux_docker.sh | 14 ------ 4 files changed, 44 insertions(+), 140 deletions(-) delete mode 100644 azure-pipelines.yml delete mode 100644 azure-steps-windows.yml delete mode 100644 tools/ci/run_32_bit_linux_docker.sh diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index bbca13925342..5d1f29693155 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -366,3 +366,47 @@ jobs: rm -rf build-install ./vendored-meson/meson/meson.py install -C build --destdir ../build-install --tags=runtime,python-runtime,devel python tools/check_installed_files.py $(find ./build-install -path '*/site-packages/numpy') --no-tests + + + Linux_Python_311_32bit_full: + name: i686, cp311, full + needs: [smoke_test] + runs-on: ubuntu-latest + container: + # There are few options for i686 images at https://quay.io/organization/pypa, + # use the glibc2.28 one + image: quay.io/pypa/manylinux_2_28_i686 + + steps: + - name: Checkout and initialize submodules + # actions/checkout doesn't work in a container image + run: | + git config --global --add safe.directory $PWD + if [ $GITHUB_EVENT_NAME != pull_request ]; then + git clone --recursive --branch=$GITHUB_REF_NAME https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE + git reset --hard $GITHUB_SHA + else + git clone --recursive https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE + git fetch origin $GITHUB_REF:my_ref_name + git checkout $GITHUB_BASE_REF + git -c user.email="you@example.com" merge --no-commit my_ref_name + fi + git submodule update --init --recursive + + - name: build + run: | + python3.11 -m venv venv + source venv/bin/activate + pip install --upgrade pip + pip install -r requirements/ci32_requirements.txt + pip install -r requirements/test_requirements.txt + + spin config-openblas --with-scipy-openblas=32 + export PKG_CONFIG_PATH=$(pwd)/.openblas + python -m pip install . -v -Csetup-args="-Dallow-noblas=false" + + - name: test + run: | + source venv/bin/activate + cd tools + python -m pytest --pyargs numpy diff --git a/azure-pipelines.yml b/azure-pipelines.yml deleted file mode 100644 index 11dab7f42c6b..000000000000 --- a/azure-pipelines.yml +++ /dev/null @@ -1,71 +0,0 @@ -trigger: - # start a new build for every push - batch: False - branches: - include: - - main - - maintenance/* - - -pr: - branches: - include: - - '*' # must quote since "*" is a YAML reserved character; we want a string - - -stages: - -- stage: Check - jobs: - - job: Skip - pool: - vmImage: 'ubuntu-22.04' - variables: - DECODE_PERCENTS: 'false' - RET: 'true' - steps: - - bash: | - git_log=`git log --max-count=1 --skip=1 --pretty=format:"%B" | tr "\n" " "` - echo "##vso[task.setvariable variable=log]$git_log" - - bash: echo "##vso[task.setvariable variable=RET]false" - condition: or(contains(variables.log, '[skip azp]'), contains(variables.log, '[azp skip]'), contains(variables.log, '[skip ci]'), contains(variables.log, '[ci skip]')) - - bash: echo "##vso[task.setvariable variable=start_main;isOutput=true]$RET" - name: result - -- stage: ComprehensiveTests - condition: and(succeeded(), eq(dependencies.Check.outputs['Skip.result.start_main'], 'true')) - dependsOn: Check - jobs: - - - job: Linux_Python_311_32bit_full_with_asserts - pool: - vmImage: 'ubuntu-22.04' - steps: - - script: | - git submodule update --init - displayName: 'Fetch submodules' - - script: | - # There are few options for i686 images at https://quay.io/organization/pypa, - # use the glibc2.17 one (manylinux2014) - docker run -v $(pwd):/numpy -e CFLAGS="-msse2 -std=c99 -UNDEBUG" \ - -e F77=gfortran-5 -e F90=gfortran-5 quay.io/pypa/manylinux2014_i686 \ - /bin/bash -xc "source /numpy/tools/ci/run_32_bit_linux_docker.sh" - displayName: 'Run 32-bit manylinux2014 Docker Build / Tests' - -# - job: Windows -# timeoutInMinutes: 120 -# pool: -# vmImage: 'windows-2022' -# strategy: -# maxParallel: 3 -# matrix: -# TODO pypy: uncomment when pypy3.11 comes out -# PyPy311-64bit-fast: -# PYTHON_VERSION: 'pypy3.11' -# PYTHON_ARCH: 'x64' -# TEST_MODE: fast -# BITS: 64 -# _USE_BLAS_ILP64: '1' - -# steps: -# - template: azure-steps-windows.yml diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml deleted file mode 100644 index 0baf374e1e3f..000000000000 --- a/azure-steps-windows.yml +++ /dev/null @@ -1,55 +0,0 @@ -steps: -- script: git submodule update --init - displayName: 'Fetch submodules' -- task: UsePythonVersion@0 - inputs: - versionSpec: $(PYTHON_VERSION) - addToPath: true - architecture: $(PYTHON_ARCH) - -- script: python -m pip install --upgrade pip wheel - displayName: 'Install tools' - -- script: python -m pip install -r requirements/test_requirements.txt - displayName: 'Install dependencies; some are optional to avoid test skips' - -- powershell: | - choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite - displayName: 'Install utilities' - -- powershell: | - # Note: ensure the `pip install .` command remains the last one here, - # to avoid "green on failure" issues - If ( Test-Path env:DISABLE_BLAS ) { - python -m pip install . -v -Csetup-args="--vsenv" -Csetup-args="-Dblas=none" -Csetup-args="-Dlapack=none" -Csetup-args="-Dallow-noblas=true" - } - elseif ( Test-Path env:_USE_BLAS_ILP64 ) { - pip install -r requirements/ci_requirements.txt - spin config-openblas --with-scipy-openblas=64 - $env:PKG_CONFIG_PATH="$pwd/.openblas" - python -m pip install . -v -Csetup-args="--vsenv" - } else { - pip install -r requirements/ci_requirements.txt - spin config-openblas --with-scipy-openblas=32 - $env:PKG_CONFIG_PATH="$pwd/.openblas" - python -m pip install . -v -Csetup-args="--vsenv" - } - displayName: 'Build NumPy' - -- powershell: | - cd tools # avoid root dir to not pick up source tree - # Get a gfortran onto the path for f2py tests - $env:PATH = "c:\\rtools43\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" - If ( $env:TEST_MODE -eq "full" ) { - pytest --pyargs numpy -rsx --junitxml=junit/test-results.xml - } else { - pytest --pyargs numpy -m "not slow" -rsx --junitxml=junit/test-results.xml - } - displayName: 'Run NumPy Test Suite' - -- task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testResultsFiles: '**/test-*.xml' - failTaskOnFailedTests: true - testRunTitle: 'Publish test results for Python $(PYTHON_VERSION) $(BITS)-bit $(TEST_MODE) Windows' diff --git a/tools/ci/run_32_bit_linux_docker.sh b/tools/ci/run_32_bit_linux_docker.sh deleted file mode 100644 index bb0aedf88fcf..000000000000 --- a/tools/ci/run_32_bit_linux_docker.sh +++ /dev/null @@ -1,14 +0,0 @@ -set -xe - -git config --global --add safe.directory /numpy -cd /numpy -/opt/python/cp311-cp311/bin/python -mvenv venv -source venv/bin/activate -pip install -r requirements/ci32_requirements.txt -python3 -m pip install -r requirements/test_requirements.txt -echo CFLAGS \$CFLAGS -spin config-openblas --with-scipy-openblas=32 -export PKG_CONFIG_PATH=/numpy/.openblas -python3 -m pip install . -cd tools -python3 -m pytest --pyargs numpy From e684ebbee9aa01649c5d913ad3527fa1f7200135 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 17 Nov 2025 10:59:47 +0100 Subject: [PATCH 0874/1018] BUG: Add missing `PyErr_Occurred()` check to fast-path We could side-step this check unless it's a legacy ufunc, but there is little point as this is barely measureable. Since someone would otherwise ask: No you can't move it into the legacy wrapping stuff because this has to happen at finalization when the GIL is held again. --- numpy/_core/src/umath/_umath_tests.c.src | 9 +++++++++ numpy/_core/src/umath/ufunc_object.c | 4 ++++ numpy/_core/tests/test_umath.py | 9 +++++++++ 3 files changed, 22 insertions(+) diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index 845f51ebc94f..a1b64ecc0444 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -460,6 +460,15 @@ addUfuncs(PyObject *dictionary) { } PyDict_SetItemString(dictionary, "always_error", f); Py_DECREF(f); + f = PyUFunc_FromFuncAndData(always_error_functions, always_error_data, + always_error_signatures, 1, 1, 1, PyUFunc_None, "always_error_unary", + "simply, broken, ufunc that sets an error (but releases the GIL).", + 0); + if (f == NULL) { + return -1; + } + PyDict_SetItemString(dictionary, "always_error_unary", f); + Py_DECREF(f); f = PyUFunc_FromFuncAndDataAndSignature(always_error_functions, always_error_data, always_error_signatures, 1, 2, 1, PyUFunc_None, "always_error_gufunc", diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index a9f00f351459..6bf6832dfb9b 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4379,6 +4379,10 @@ try_trivial_scalar_call( ret = strided_loop(&context, data, &n, strides, auxdata); NPY_AUXDATA_FREE(auxdata); if (ret == 0) { + if (PyErr_Occurred()) { + ret = -1; + goto bail; + } if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { // Check for any unmasked floating point errors (note: faster // than _check_ufunc_fperr as one doesn't need mask up front). diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index cd0ec7d04f69..f99441180182 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4893,6 +4893,15 @@ def test_bad_legacy_ufunc_silent_errors(): ncu_tests.always_error.at(arr, [0, 1, 2], arr) +def test_bad_legacy_unary_ufunc_silent_errors(): + # Unary has a special scalar path right now, so test it explicitly. + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_unary(np.arange(3).astype(np.float64)) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_unary(1.5) + + @pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]]) def test_bad_legacy_gufunc_silent_errors(x1): # Verify that an exception raised in a gufunc loop propagates correctly. From a50c9d1d3440a412729c2479159a19d2b214792e Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 17 Nov 2025 21:14:41 +0100 Subject: [PATCH 0875/1018] TYP: ``ravel``: less awkward return types (#30224) --- numpy/_core/fromnumeric.pyi | 9 +++------ numpy/typing/tests/data/reveal/fromnumeric.pyi | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 29431efef532..2a9762240e3d 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -561,14 +561,11 @@ def ravel(a: str | _NestedSequence[str], order: _OrderKACF = "C") -> _Array1D[np @overload def ravel(a: bool | _NestedSequence[bool], order: _OrderKACF = "C") -> _Array1D[np.bool]: ... @overload -def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | np.bool]: ... +def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | Any]: ... @overload -def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | np.int_ | np.bool]: ... +def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | Any]: ... @overload -def ravel( - a: complex | _NestedSequence[complex], - order: _OrderKACF = "C", -) -> _Array1D[np.complex128 | np.float64 | np.int_ | np.bool]: ... +def ravel(a: complex | _NestedSequence[complex], order: _OrderKACF = "C") -> _Array1D[np.complex128 | Any]: ... @overload def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ... diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index f6cc9b9fe0d7..58ea2c5f8732 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -127,7 +127,7 @@ assert_type(np.trace(AR_f4, out=AR_subclass, dtype=None), NDArraySubclass) assert_type(np.ravel(b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | np.int_ | np.bool]]) +assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) From 25c22b43eda3c0808efcd897131c95fa53d410b2 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 17 Nov 2025 21:19:41 +0100 Subject: [PATCH 0876/1018] TYP, DEP: ``numpy.fix`` pending deprecation (#30168) * TYP, DEP: ``numpy.fix`` pending deprecation * DOC: release note for pending deprecation of ``np.fix`` --- .../upcoming_changes/30168.deprecation.rst | 5 ++++ numpy/lib/_ufunclike_impl.pyi | 25 +++++++------------ numpy/typing/tests/data/pass/ufunclike.py | 12 ++++----- numpy/typing/tests/data/reveal/ufunclike.pyi | 12 ++++----- 4 files changed, 26 insertions(+), 28 deletions(-) create mode 100644 doc/release/upcoming_changes/30168.deprecation.rst diff --git a/doc/release/upcoming_changes/30168.deprecation.rst b/doc/release/upcoming_changes/30168.deprecation.rst new file mode 100644 index 000000000000..81673397590d --- /dev/null +++ b/doc/release/upcoming_changes/30168.deprecation.rst @@ -0,0 +1,5 @@ +``np.fix`` is pending deprecation +--------------------------------- +The `numpy.fix` function will be deprecated in a future release. It is recommended to use +`numpy.trunc` instead, as it provides the same functionality of truncating decimal values to their +integer parts. Static type checkers might already report a warning for the use of `numpy.fix`. diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index d5c3151fb892..0f00767356e0 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -1,4 +1,5 @@ from typing import Any, TypeVar, overload +from typing_extensions import deprecated import numpy as np from numpy import floating, object_ @@ -14,25 +15,17 @@ __all__ = ["fix", "isneginf", "isposinf"] _ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) @overload -def fix( # type: ignore[misc] - x: _FloatLike_co, - out: None = None, -) -> floating: ... +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _FloatLike_co, out: None = None) -> floating: ... @overload -def fix( - x: _ArrayLikeFloat_co, - out: None = None, -) -> NDArray[floating]: ... +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[floating]: ... @overload -def fix( - x: _ArrayLikeObject_co, - out: None = None, -) -> NDArray[object_]: ... +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[object_]: ... @overload -def fix( - x: _ArrayLikeFloat_co | _ArrayLikeObject_co, - out: _ArrayT, -) -> _ArrayT: ... +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: _ArrayT) -> _ArrayT: ... @overload def isposinf( # type: ignore[misc] diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index f69ec8eaf4ed..7e556d10bef7 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -32,12 +32,12 @@ def __array__(self, dtype: np.typing.DTypeLike | None = None, AR_LIKE_O = [Object(), Object(), Object()] AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.zeros(3, dtype="U5") -np.fix(AR_LIKE_b) -np.fix(AR_LIKE_u) -np.fix(AR_LIKE_i) -np.fix(AR_LIKE_f) -np.fix(AR_LIKE_O) -np.fix(AR_LIKE_f, out=AR_U) +np.fix(AR_LIKE_b) # type: ignore[deprecated] +np.fix(AR_LIKE_u) # type: ignore[deprecated] +np.fix(AR_LIKE_i) # type: ignore[deprecated] +np.fix(AR_LIKE_f) # type: ignore[deprecated] +np.fix(AR_LIKE_O) # type: ignore[deprecated] +np.fix(AR_LIKE_f, out=AR_U) # type: ignore[deprecated] np.isposinf(AR_LIKE_b) np.isposinf(AR_LIKE_u) diff --git a/numpy/typing/tests/data/reveal/ufunclike.pyi b/numpy/typing/tests/data/reveal/ufunclike.pyi index aaae5e80e470..c679b82d2836 100644 --- a/numpy/typing/tests/data/reveal/ufunclike.pyi +++ b/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -11,12 +11,12 @@ AR_LIKE_O: list[np.object_] AR_U: npt.NDArray[np.str_] -assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating]) -assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating]) -assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating]) -assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating]) -assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) -assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) +assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) # type: ignore[deprecated] assert_type(np.isposinf(AR_LIKE_b), npt.NDArray[np.bool]) assert_type(np.isposinf(AR_LIKE_u), npt.NDArray[np.bool]) From 6521abc70eca5a6543f864575a6ffed2c8a8dda0 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 17 Nov 2025 21:25:27 +0100 Subject: [PATCH 0877/1018] ENH, TYP: transparent ``ma.extras._fromnxfunction`` runtime signatures (#30229) * ENH: transparent ``ma.extras._fromnxfunction`` runtime signatures * TYP: stub the ``ma.extras._fromnxfunction`` functions * STY: appease ruff --- numpy/_core/shape_base.pyi | 9 +- numpy/lib/_shape_base_impl.pyi | 4 +- numpy/lib/_twodim_base_impl.pyi | 1 + numpy/ma/core.py | 14 --- numpy/ma/extras.py | 203 +++++++++++------------------- numpy/ma/extras.pyi | 211 ++++++++++++++++++++++++++++---- numpy/ma/tests/test_extras.py | 13 ++ 7 files changed, 281 insertions(+), 174 deletions(-) diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index b4e8b473ed71..a0fbefce0b3c 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -22,6 +22,7 @@ _ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) ### +# keep in sync with `numpy.ma.extras.atleast_1d` @overload def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... @overload @@ -35,7 +36,7 @@ def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... -# +# keep in sync with `numpy.ma.extras.atleast_2d` @overload def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... @overload @@ -49,7 +50,7 @@ def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... -# +# keep in sync with `numpy.ma.extras.atleast_3d` @overload def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... @overload @@ -63,7 +64,7 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... -# +# keep in sync with `numpy.ma.extras.vstack` @overload def vstack( tup: Sequence[_ArrayLike[_ScalarT]], @@ -86,6 +87,7 @@ def vstack( casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.hstack` @overload def hstack( tup: Sequence[_ArrayLike[_ScalarT]], @@ -108,6 +110,7 @@ def hstack( casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.stack` @overload def stack( arrays: Sequence[_ArrayLike[_ScalarT]], diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index f8ba31d0f774..352f57dd810a 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -132,12 +132,13 @@ def row_stack( casting: _CastingKind = "same_kind", ) -> NDArray[Any]: ... -# +# keep in sync with `numpy.ma.extras.column_stack` @overload def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... @overload def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.dstack` @overload def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... @overload @@ -169,6 +170,7 @@ def split( axis: SupportsIndex = 0, ) -> list[NDArray[Any]]: ... +# keep in sync with `numpy.ma.extras.hsplit` @overload def hsplit( ary: _ArrayLike[_ScalarT], diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 7396fa1b0370..cc1d6b6245a9 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -131,6 +131,7 @@ def diag(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload def diag(v: ArrayLike, k: int = 0) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.diagflat` @overload def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 9b27d90852e7..e0ead317d9f6 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -46,7 +46,6 @@ from numpy._core import multiarray as mu from numpy._core.numeric import normalize_axis_tuple from numpy._utils import set_module -from numpy._utils._inspect import formatargspec, getargspec __all__ = [ 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', @@ -134,19 +133,6 @@ def doc_note(initialdoc, note): return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) -# TODO: remove/deprecate once `ma.extras._fromnxfunction.getdoc` no longer uses it -def get_object_signature(obj): - """ - Get the signature from obj - - """ - try: - sig = formatargspec(*getargspec(obj)) - except TypeError: - sig = '' - return sig - - ############################################################################### # Exceptions # ############################################################################### diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 6d5569e833dd..7387d4f9beb7 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -19,6 +19,7 @@ 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack', ] +import functools import itertools import warnings @@ -244,151 +245,93 @@ def masked_all_like(arr): #####-------------------------------------------------------------------------- #---- --- Standard functions --- #####-------------------------------------------------------------------------- -class _fromnxfunction: - """ - Defines a wrapper to adapt NumPy functions to masked arrays. - - - An instance of `_fromnxfunction` can be called with the same parameters - as the wrapped NumPy function. The docstring of `newfunc` is adapted from - the wrapped function as well, see `getdoc`. - This class should not be used directly. Instead, one of its extensions that - provides support for a specific type of input should be used. +def _fromnxfunction_function(_fromnxfunction): + """ + Decorator to wrap a "_fromnxfunction" function, wrapping a numpy function as a + masked array function, with proper docstring and name. Parameters ---------- - funcname : str - The name of the function to be adapted. The function should be - in the NumPy namespace (i.e. ``np.funcname``). + _fromnxfunction : ({params}) -> ndarray, {params}) -> masked_array + Wrapper function that calls the wrapped numpy function - """ + Returns + ------- + decorator : (f: ({params}) -> ndarray) -> ({params}) -> masked_array + Function that accepts a numpy function and returns a masked array function - def __init__(self, funcname): - self.__name__ = funcname - self.__qualname__ = funcname - self.__doc__ = self.getdoc() + """ + def decorator(npfunc, /): + def wrapper(*args, **kwargs): + return _fromnxfunction(npfunc, *args, **kwargs) - def getdoc(self): - """ - Retrieve the docstring and signature from the function. + functools.update_wrapper(wrapper, npfunc, assigned=("__name__", "__qualname__")) + wrapper.__doc__ = ma.doc_note( + npfunc.__doc__, + "The function is applied to both the ``_data`` and the ``_mask``, if any.", + ) + return wrapper - The ``__doc__`` attribute of the function is used as the docstring for - the new masked array version of the function. A note on application - of the function to the mask is appended. + return decorator - Parameters - ---------- - None - """ - npfunc = getattr(np, self.__name__, None) - doc = getattr(npfunc, '__doc__', None) - if doc: - sig = ma.get_object_signature(npfunc) - doc = ma.doc_note(doc, "The function is applied to both the _data " - "and the _mask, if any.") - if sig: - sig = self.__name__ + sig + "\n\n" - return sig + doc - return +@_fromnxfunction_function +def _fromnxfunction_single(npfunc, a, /, *args, **kwargs): + """ + Wraps a NumPy function that can be called with a single array argument followed by + auxiliary args that are passed verbatim for both the data and mask calls. + """ + return masked_array( + data=npfunc(np.asarray(a), *args, **kwargs), + mask=npfunc(getmaskarray(a), *args, **kwargs), + ) - def __call__(self, *args, **params): - pass +@_fromnxfunction_function +def _fromnxfunction_seq(npfunc, arys, /, *args, **kwargs): + """ + Wraps a NumPy function that can be called with a single sequence of arrays followed + by auxiliary args that are passed verbatim for both the data and mask calls. + """ + return masked_array( + data=npfunc(tuple(np.asarray(a) for a in arys), *args, **kwargs), + mask=npfunc(tuple(getmaskarray(a) for a in arys), *args, **kwargs), + ) -class _fromnxfunction_single(_fromnxfunction): +@_fromnxfunction_function +def _fromnxfunction_allargs(npfunc, /, *arys, **kwargs): """ - A version of `_fromnxfunction` that is called with a single array - argument followed by auxiliary args that are passed verbatim for - both the data and mask calls. + Wraps a NumPy function that can be called with multiple array arguments. + All args are converted to arrays even if they are not so already. + This makes it possible to process scalars as 1-D arrays. + Only keyword arguments are passed through verbatim for the data and mask calls. + Arrays arguments are processed independently and the results are returned in a list. + If only one arg is present, the return value is just the processed array instead of + a list. """ - def __call__(self, x, *args, **params): - func = getattr(np, self.__name__) - if isinstance(x, ndarray): - _d = func(x.__array__(), *args, **params) - _m = func(getmaskarray(x), *args, **params) - return masked_array(_d, mask=_m) - else: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - return masked_array(_d, mask=_m) - - -class _fromnxfunction_seq(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with a single sequence - of arrays followed by auxiliary args that are passed verbatim for - both the data and mask calls. - """ - def __call__(self, x, *args, **params): - func = getattr(np, self.__name__) - _d = func(tuple(np.asarray(a) for a in x), *args, **params) - _m = func(tuple(getmaskarray(a) for a in x), *args, **params) - return masked_array(_d, mask=_m) - - -class _fromnxfunction_args(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with multiple array - arguments. The first non-array-like input marks the beginning of the - arguments that are passed verbatim for both the data and mask calls. - Array arguments are processed independently and the results are - returned in a list. If only one array is found, the return value is - just the processed array instead of a list. - """ - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - arrays = [] - args = list(args) - while len(args) > 0 and issequence(args[0]): - arrays.append(args.pop(0)) - res = [] - for x in arrays: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - res.append(masked_array(_d, mask=_m)) - if len(arrays) == 1: - return res[0] - return res - - -class _fromnxfunction_allargs(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with multiple array - arguments. Similar to `_fromnxfunction_args` except that all args - are converted to arrays even if they are not so already. This makes - it possible to process scalars as 1-D arrays. Only keyword arguments - are passed through verbatim for the data and mask calls. Arrays - arguments are processed independently and the results are returned - in a list. If only one arg is present, the return value is just the - processed array instead of a list. - """ - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - res = [] - for x in args: - _d = func(np.asarray(x), **params) - _m = func(getmaskarray(x), **params) - res.append(masked_array(_d, mask=_m)) - if len(args) == 1: - return res[0] - return res - - -atleast_1d = _fromnxfunction_allargs('atleast_1d') -atleast_2d = _fromnxfunction_allargs('atleast_2d') -atleast_3d = _fromnxfunction_allargs('atleast_3d') - -vstack = row_stack = _fromnxfunction_seq('vstack') -hstack = _fromnxfunction_seq('hstack') -column_stack = _fromnxfunction_seq('column_stack') -dstack = _fromnxfunction_seq('dstack') -stack = _fromnxfunction_seq('stack') - -hsplit = _fromnxfunction_single('hsplit') - -diagflat = _fromnxfunction_single('diagflat') + out = tuple( + masked_array( + data=npfunc(np.asarray(a), **kwargs), + mask=npfunc(getmaskarray(a), **kwargs), + ) + for a in arys + ) + return out[0] if len(out) == 1 else out + + +atleast_1d = _fromnxfunction_allargs(np.atleast_1d) +atleast_2d = _fromnxfunction_allargs(np.atleast_2d) +atleast_3d = _fromnxfunction_allargs(np.atleast_3d) + +vstack = row_stack = _fromnxfunction_seq(np.vstack) +hstack = _fromnxfunction_seq(np.hstack) +column_stack = _fromnxfunction_seq(np.column_stack) +dstack = _fromnxfunction_seq(np.dstack) +stack = _fromnxfunction_seq(np.stack) + +hsplit = _fromnxfunction_single(np.hsplit) +diagflat = _fromnxfunction_single(np.diagflat) #####-------------------------------------------------------------------------- diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index afd03300ff5f..70881bd15c8a 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,6 +1,17 @@ from _typeshed import Incomplete +from collections.abc import Sequence +from typing import SupportsIndex, TypeAlias, TypeVar, overload import numpy as np +from numpy import _CastingKind +from numpy._typing import ( + ArrayLike, + DTypeLike, + _AnyShape, + _ArrayLike, + _DTypeLike, + _ShapeLike, +) from numpy.lib._function_base_impl import average from numpy.lib._index_tricks_impl import AxisConcatenator @@ -55,39 +66,187 @@ __all__ = [ "vstack", ] -def count_masked(arr, axis=None): ... -def masked_all(shape, dtype=float): ... # noqa: PYI014 -def masked_all_like(arr): ... +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT1 = TypeVar("_ScalarT1", bound=np.generic) +_ScalarT2 = TypeVar("_ScalarT2", bound=np.generic) +_MArrayT = TypeVar("_MArrayT", bound=MaskedArray) + +_MArray: TypeAlias = MaskedArray[_AnyShape, np.dtype[_ScalarT]] + +### + +# keep in sync with `numpy._core.shape_base.atleast_1d` +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +@overload +def atleast_1d( + a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] +) -> tuple[_MArray[_ScalarT], ...]: ... +@overload +def atleast_1d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... + +# keep in sync with `numpy._core.shape_base.atleast_2d` +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +@overload +def atleast_2d( + a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] +) -> tuple[_MArray[_ScalarT], ...]: ... +@overload +def atleast_2d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... -class _fromnxfunction: - __name__: Incomplete - __doc__: Incomplete - def __init__(self, funcname) -> None: ... - def getdoc(self): ... - def __call__(self, *args, **params): ... +# keep in sync with `numpy._core.shape_base.atleast_2d` +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +@overload +def atleast_3d( + a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] +) -> tuple[_MArray[_ScalarT], ...]: ... +@overload +def atleast_3d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... -class _fromnxfunction_single(_fromnxfunction): - def __call__(self, x, *args, **params): ... +# keep in sync with `numpy._core.shape_base.vstack` +@overload +def vstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... -class _fromnxfunction_seq(_fromnxfunction): - def __call__(self, x, *args, **params): ... +row_stack = vstack -class _fromnxfunction_allargs(_fromnxfunction): - def __call__(self, *args, **params): ... +# keep in sync with `numpy._core.shape_base.hstack` +@overload +def hstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... -atleast_1d: _fromnxfunction_allargs -atleast_2d: _fromnxfunction_allargs -atleast_3d: _fromnxfunction_allargs +# keep in sync with `numpy._core.shape_base_impl.column_stack` +@overload +def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +@overload +def column_stack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... -vstack: _fromnxfunction_seq -row_stack: _fromnxfunction_seq -hstack: _fromnxfunction_seq -column_stack: _fromnxfunction_seq -dstack: _fromnxfunction_seq -stack: _fromnxfunction_seq +# keep in sync with `numpy._core.shape_base_impl.dstack` +@overload +def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +@overload +def dstack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... -hsplit: _fromnxfunction_single -diagflat: _fromnxfunction_single +# keep in sync with `numpy._core.shape_base.stack` +@overload +def stack( + arrays: Sequence[_ArrayLike[_ScalarT]], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex, + out: _MArrayT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _MArrayT: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + *, + out: _MArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _MArrayT: ... + +# keep in sync with `numpy._core.shape_base_impl.hsplit` +@overload +def hsplit(ary: _ArrayLike[_ScalarT], indices_or_sections: _ShapeLike) -> list[_MArray[_ScalarT]]: ... +@overload +def hsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[_MArray[Incomplete]]: ... + +# keep in sync with `numpy._core.twodim_base_impl.hsplit` +@overload +def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _MArray[_ScalarT]: ... +@overload +def diagflat(v: ArrayLike, k: int = 0) -> _MArray[Incomplete]: ... + +# TODO: everything below + +def count_masked(arr, axis=None): ... +def masked_all(shape, dtype=float): ... # noqa: PYI014 +def masked_all_like(arr): ... def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... def apply_over_axes(func, a, axes): ... diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 07f0fbdd7f97..1993ffe3e90d 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -5,6 +5,7 @@ :contact: pierregm_at_uga_dot_edu """ +import inspect import itertools import pytest @@ -1811,6 +1812,18 @@ def test_shape_scalar(self): assert_equal(b.shape, (1, 1)) assert_equal(b.mask.shape, b.data.shape) + @pytest.mark.parametrize("fn", [atleast_1d, vstack, diagflat]) + def test_inspect_signature(self, fn): + name = fn.__name__ + assert getattr(np.ma, name) is fn + + assert fn.__module__ == "numpy.ma.extras" + + wrapped = getattr(np, fn.__name__) + sig_wrapped = inspect.signature(wrapped) + sig = inspect.signature(fn) + assert sig == sig_wrapped + class TestNDEnumerate: From f1b6878d9a48d2c1bb208a0be0b00b63ae977f53 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 17 Nov 2025 21:28:04 +0100 Subject: [PATCH 0878/1018] TYP: ``_core.numeric``: shape-typing and fixed overlapping overloads (#30233) --- numpy/_core/numeric.pyi | 525 ++++++++---------- .../tests/data/reveal/array_constructors.pyi | 20 +- numpy/typing/tests/data/reveal/numeric.pyi | 153 +++-- 3 files changed, 343 insertions(+), 355 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 48576c77fd56..9be6000f0a8a 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1,11 +1,10 @@ +from _typeshed import Incomplete from builtins import bool as py_bool from collections.abc import Callable, Sequence from typing import ( Any, Final, Literal as L, - Never, - NoReturn, SupportsAbs, SupportsIndex, TypeAlias, @@ -36,14 +35,21 @@ from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _ArrayLikeObject_co, + _ArrayLikeNumber_co, _ArrayLikeTD64_co, - _ArrayLikeUInt_co, + _CDoubleCodes, + _Complex128Codes, + _DoubleCodes, _DTypeLike, + _DTypeLikeBool, + _Float64Codes, + _IntCodes, _NestedSequence, + _NumberLike_co, _ScalarLike_co, _Shape, _ShapeLike, + _SupportsArray, _SupportsArrayFunc, _SupportsDType, ) @@ -628,10 +634,12 @@ __all__ = [ _T = TypeVar("_T") _ScalarT = TypeVar("_ScalarT", bound=generic) +_NumberObjectT = TypeVar("_NumberObjectT", bound=number | object_) _NumericScalarT = TypeVar("_NumericScalarT", bound=number | timedelta64 | object_) _DTypeT = TypeVar("_DTypeT", bound=dtype) _ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) _ShapeT = TypeVar("_ShapeT", bound=_Shape) + _AnyShapeT = TypeVar( "_AnyShapeT", tuple[()], @@ -641,9 +649,42 @@ _AnyShapeT = TypeVar( tuple[int, int, int, int], tuple[int, ...], ) +_AnyNumericScalarT = TypeVar( + "_AnyNumericScalarT", + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, + np.object_, +) _CorrelateMode: TypeAlias = L["valid", "same", "full"] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] +_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[_ScalarT]] +_Array4D: TypeAlias = np.ndarray[tuple[int, int, int, int], np.dtype[_ScalarT]] + +_Int_co: TypeAlias = np.integer | np.bool +_Float_co: TypeAlias = np.floating | _Int_co +_Number_co: TypeAlias = np.number | np.bool +_TD64_co: TypeAlias = np.timedelta64 | _Int_co + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] +_ArrayLike1DBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[py_bool | np.bool] +_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +_ArrayLike1DTD64_co: TypeAlias = _ArrayLike1D[_TD64_co] +_ArrayLike1DObject_co: TypeAlias = _ArrayLike1D[np.object_] + +_DTypeLikeInt: TypeAlias = type[int] | _IntCodes +_DTypeLikeFloat64: TypeAlias = type[float] | _Float64Codes | _DoubleCodes +_DTypeLikeComplex128: TypeAlias = type[complex] | _Complex128Codes | _CDoubleCodes + +### + # keep in sync with `ones_like` @overload def zeros_like( @@ -913,249 +954,100 @@ def count_nonzero( def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: py_bool = False) -> Any: ... # -def isfortran(a: NDArray[Any] | generic) -> py_bool: ... - -def argwhere(a: ArrayLike) -> NDArray[intp]: ... +def isfortran(a: ndarray | generic) -> py_bool: ... -def flatnonzero(a: ArrayLike) -> NDArray[intp]: ... +# +def argwhere(a: ArrayLike) -> _Array2D[np.intp]: ... +def flatnonzero(a: ArrayLike) -> _Array1D[np.intp]: ... +# keep in sync with `convolve` @overload def correlate( - a: _ArrayLike[Never], - v: _ArrayLike[Never], - mode: _CorrelateMode = "valid", -) -> NDArray[Any]: ... -@overload -def correlate( - a: _ArrayLikeBool_co, - v: _ArrayLikeBool_co, - mode: _CorrelateMode = "valid", -) -> NDArray[np.bool]: ... + a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" +) -> _Array1D[_AnyNumericScalarT]: ... @overload -def correlate( - a: _ArrayLikeUInt_co, - v: _ArrayLikeUInt_co, - mode: _CorrelateMode = "valid", -) -> NDArray[unsignedinteger]: ... -@overload -def correlate( - a: _ArrayLikeInt_co, - v: _ArrayLikeInt_co, - mode: _CorrelateMode = "valid", -) -> NDArray[signedinteger]: ... +def correlate(a: _ArrayLike1DBool_co, v: _ArrayLike1DBool_co, mode: _CorrelateMode = "valid") -> _Array1D[np.bool]: ... @overload -def correlate( - a: _ArrayLikeFloat_co, - v: _ArrayLikeFloat_co, - mode: _CorrelateMode = "valid", -) -> NDArray[floating]: ... +def correlate(a: _ArrayLike1DInt_co, v: _ArrayLike1DInt_co, mode: _CorrelateMode = "valid") -> _Array1D[np.int_ | Any]: ... @overload -def correlate( - a: _ArrayLikeComplex_co, - v: _ArrayLikeComplex_co, - mode: _CorrelateMode = "valid", -) -> NDArray[complexfloating]: ... +def correlate(a: _ArrayLike1DFloat_co, v: _ArrayLike1DFloat_co, mode: _CorrelateMode = "valid") -> _Array1D[np.float64 | Any]: ... @overload def correlate( - a: _ArrayLikeTD64_co, - v: _ArrayLikeTD64_co, - mode: _CorrelateMode = "valid", -) -> NDArray[timedelta64]: ... + a: _ArrayLike1DNumber_co, v: _ArrayLike1DNumber_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.complex128 | Any]: ... @overload def correlate( - a: _ArrayLikeObject_co, - v: _ArrayLikeObject_co, - mode: _CorrelateMode = "valid", -) -> NDArray[object_]: ... + a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.timedelta64 | Any]: ... +# keep in sync with `correlate` @overload def convolve( - a: _ArrayLike[Never], - v: _ArrayLike[Never], - mode: _CorrelateMode = "full", -) -> NDArray[Any]: ... -@overload -def convolve( - a: _ArrayLikeBool_co, - v: _ArrayLikeBool_co, - mode: _CorrelateMode = "full", -) -> NDArray[np.bool]: ... -@overload -def convolve( - a: _ArrayLikeUInt_co, - v: _ArrayLikeUInt_co, - mode: _CorrelateMode = "full", -) -> NDArray[unsignedinteger]: ... + a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" +) -> _Array1D[_AnyNumericScalarT]: ... @overload -def convolve( - a: _ArrayLikeInt_co, - v: _ArrayLikeInt_co, - mode: _CorrelateMode = "full", -) -> NDArray[signedinteger]: ... +def convolve(a: _ArrayLike1DBool_co, v: _ArrayLike1DBool_co, mode: _CorrelateMode = "valid") -> _Array1D[np.bool]: ... @overload -def convolve( - a: _ArrayLikeFloat_co, - v: _ArrayLikeFloat_co, - mode: _CorrelateMode = "full", -) -> NDArray[floating]: ... +def convolve(a: _ArrayLike1DInt_co, v: _ArrayLike1DInt_co, mode: _CorrelateMode = "valid") -> _Array1D[np.int_ | Any]: ... @overload -def convolve( - a: _ArrayLikeComplex_co, - v: _ArrayLikeComplex_co, - mode: _CorrelateMode = "full", -) -> NDArray[complexfloating]: ... +def convolve(a: _ArrayLike1DFloat_co, v: _ArrayLike1DFloat_co, mode: _CorrelateMode = "valid") -> _Array1D[np.float64 | Any]: ... @overload def convolve( - a: _ArrayLikeTD64_co, - v: _ArrayLikeTD64_co, - mode: _CorrelateMode = "full", -) -> NDArray[timedelta64]: ... + a: _ArrayLike1DNumber_co, v: _ArrayLike1DNumber_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.complex128 | Any]: ... @overload def convolve( - a: _ArrayLikeObject_co, - v: _ArrayLikeObject_co, - mode: _CorrelateMode = "full", -) -> NDArray[object_]: ... + a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.timedelta64 | Any]: ... +# keep roughly in sync with `convolve` and `correlate`, but for 2-D output and an additional `out` overload @overload def outer( - a: _ArrayLike[Never], - b: _ArrayLike[Never], - out: None = None, -) -> NDArray[Any]: ... -@overload -def outer( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - out: None = None, -) -> NDArray[np.bool]: ... -@overload -def outer( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, - out: None = None, -) -> NDArray[unsignedinteger]: ... + a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], out: None = None +) -> _Array2D[_AnyNumericScalarT]: ... @overload -def outer( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - out: None = None, -) -> NDArray[signedinteger]: ... +def outer(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, out: None = None) -> _Array2D[np.bool]: ... @overload -def outer( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - out: None = None, -) -> NDArray[floating]: ... +def outer(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, out: None = None) -> _Array2D[np.int_ | Any]: ... @overload -def outer( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - out: None = None, -) -> NDArray[complexfloating]: ... +def outer(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, out: None = None) -> _Array2D[np.float64 | Any]: ... @overload -def outer( - a: _ArrayLikeTD64_co, - b: _ArrayLikeTD64_co, - out: None = None, -) -> NDArray[timedelta64]: ... +def outer(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, out: None = None) -> _Array2D[np.complex128 | Any]: ... @overload -def outer( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - out: None = None, -) -> NDArray[object_]: ... +def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, out: None = None) -> _Array2D[np.timedelta64 | Any]: ... @overload -def outer( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - out: _ArrayT, -) -> _ArrayT: ... +def outer(a: _ArrayLikeNumber_co | _ArrayLikeTD64_co, b: _ArrayLikeNumber_co | _ArrayLikeTD64_co, out: _ArrayT) -> _ArrayT: ... # keep in sync with numpy.linalg._linalg.tensordot (ignoring `/, *`) @overload def tensordot( - a: _ArrayLike[_NumericScalarT], - b: _ArrayLike[_NumericScalarT], - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[_NumericScalarT]: ... + a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[_AnyNumericScalarT]: ... @overload -def tensordot( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[bool_]: ... +def tensordot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2) -> NDArray[np.bool]: ... @overload def tensordot( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[int_ | Any]: ... + a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.int_ | Any]: ... @overload def tensordot( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[float64 | Any]: ... + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.float64 | Any]: ... @overload def tensordot( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[complex128 | Any]: ... - -@overload -def roll( - a: _ArrayLike[_ScalarT], - shift: _ShapeLike, - axis: _ShapeLike | None = None, -) -> NDArray[_ScalarT]: ... -@overload -def roll( - a: ArrayLike, - shift: _ShapeLike, - axis: _ShapeLike | None = None, -) -> NDArray[Any]: ... - -def rollaxis( - a: NDArray[_ScalarT], - axis: int, - start: int = 0, -) -> NDArray[_ScalarT]: ... + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.complex128 | Any]: ... -def moveaxis( - a: NDArray[_ScalarT], - source: _ShapeLike, - destination: _ShapeLike, -) -> NDArray[_ScalarT]: ... - -@overload -def cross( - a: _ArrayLike[Never], - b: _ArrayLike[Never], - axisa: int = -1, - axisb: int = -1, - axisc: int = -1, - axis: int | None = None, -) -> NDArray[Any]: ... -@overload -def cross( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - axisa: int = -1, - axisb: int = -1, - axisc: int = -1, - axis: int | None = None, -) -> NoReturn: ... +# @overload def cross( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, + a: _ArrayLike[_AnyNumericScalarT], + b: _ArrayLike[_AnyNumericScalarT], axisa: int = -1, axisb: int = -1, axisc: int = -1, axis: int | None = None, -) -> NDArray[unsignedinteger]: ... +) -> NDArray[_AnyNumericScalarT]: ... @overload def cross( a: _ArrayLikeInt_co, @@ -1164,7 +1056,7 @@ def cross( axisb: int = -1, axisc: int = -1, axis: int | None = None, -) -> NDArray[signedinteger]: ... +) -> NDArray[np.int_ | Any]: ... @overload def cross( a: _ArrayLikeFloat_co, @@ -1173,7 +1065,7 @@ def cross( axisb: int = -1, axisc: int = -1, axis: int | None = None, -) -> NDArray[floating]: ... +) -> NDArray[np.float64 | Any]: ... @overload def cross( a: _ArrayLikeComplex_co, @@ -1182,109 +1074,104 @@ def cross( axisb: int = -1, axisc: int = -1, axis: int | None = None, -) -> NDArray[complexfloating]: ... -@overload -def cross( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - axisa: int = -1, - axisb: int = -1, - axisc: int = -1, - axis: int | None = None, -) -> NDArray[object_]: ... +) -> NDArray[np.complex128 | Any]: ... +# @overload -def indices( - dimensions: Sequence[int], - dtype: type[int] = ..., - sparse: L[False] = False, -) -> NDArray[int_]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: type[int], - sparse: L[True], -) -> tuple[NDArray[int_], ...]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: type[int] = ..., - *, - sparse: L[True], -) -> tuple[NDArray[int_], ...]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: _DTypeLike[_ScalarT], - sparse: L[False] = False, -) -> NDArray[_ScalarT]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: _DTypeLike[_ScalarT], - sparse: L[True], -) -> tuple[NDArray[_ScalarT], ...]: ... +def roll(a: _ArrayT, shift: _ShapeLike, axis: _ShapeLike | None = None) -> _ArrayT: ... @overload -def indices( - dimensions: Sequence[int], - dtype: DTypeLike | None = ..., - sparse: L[False] = False, -) -> NDArray[Any]: ... +def roll(a: _ArrayLike[_ScalarT], shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... @overload +def roll(a: ArrayLike, shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... + +# +def rollaxis(a: _ArrayT, axis: int, start: int = 0) -> _ArrayT: ... +def moveaxis(a: _ArrayT, source: _ShapeLike, destination: _ShapeLike) -> _ArrayT: ... + +# +@overload # 0d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[()], dtype: type[int] = int, sparse: L[False] = False) -> _Array1D[np.intp]: ... +@overload # 0d, dtype=, sparse=True +def indices(dimensions: tuple[()], dtype: DTypeLike | None = int, *, sparse: L[True]) -> tuple[()]: ... +@overload # 0d, dtype=, sparse=False (default) +def indices(dimensions: tuple[()], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array1D[_ScalarT]: ... +@overload # 0d, dtype=, sparse=False (default) +def indices(dimensions: tuple[()], dtype: DTypeLike, sparse: L[False] = False) -> _Array1D[Any]: ... +@overload # 1d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[int], dtype: type[int] = int, sparse: L[False] = False) -> _Array2D[np.intp]: ... +@overload # 1d, dtype=int (default), sparse=True +def indices(dimensions: tuple[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[_Array1D[np.intp]]: ... +@overload # 1d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array2D[_ScalarT]: ... +@overload # 1d, dtype=, sparse=True +def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[_Array1D[_ScalarT]]: ... +@overload # 1d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[False] = False) -> _Array2D[Any]: ... +@overload # 1d, dtype=, sparse=True +def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[True]) -> tuple[_Array1D[Any]]: ... +@overload # 2d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: type[int] = int, sparse: L[False] = False) -> _Array3D[np.intp]: ... +@overload # 2d, dtype=int (default), sparse=True def indices( - dimensions: Sequence[int], - dtype: DTypeLike | None, - sparse: L[True], -) -> tuple[NDArray[Any], ...]: ... -@overload + dimensions: tuple[int, int], dtype: type[int] = int, *, sparse: L[True] +) -> tuple[_Array2D[np.intp], _Array2D[np.intp]]: ... +@overload # 2d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array3D[_ScalarT]: ... +@overload # 2d, dtype=, sparse=True def indices( - dimensions: Sequence[int], - dtype: DTypeLike | None = ..., - *, - sparse: L[True], -) -> tuple[NDArray[Any], ...]: ... + dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[True] +) -> tuple[_Array2D[_ScalarT], _Array2D[_ScalarT]]: ... +@overload # 2d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[False] = False) -> _Array3D[Any]: ... +@overload # 2d, dtype=, sparse=True +def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[True]) -> tuple[_Array2D[Any], _Array2D[Any]]: ... +@overload # ?d, dtype=int (default), sparse=False (default) +def indices(dimensions: Sequence[int], dtype: type[int] = int, sparse: L[False] = False) -> NDArray[np.intp]: ... +@overload # ?d, dtype=int (default), sparse=True +def indices(dimensions: Sequence[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[NDArray[np.intp], ...]: ... +@overload # ?d, dtype=, sparse=False (default) +def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> NDArray[_ScalarT]: ... +@overload # ?d, dtype=, sparse=True +def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[NDArray[_ScalarT], ...]: ... +@overload # ?d, dtype=, sparse=False (default) +def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[False] = False) -> ndarray: ... +@overload # ?d, dtype=, sparse=True +def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[True]) -> tuple[ndarray, ...]: ... +# def fromfunction( function: Callable[..., _T], shape: Sequence[int], *, - dtype: DTypeLike | None = ..., + dtype: DTypeLike | None = float, like: _SupportsArrayFunc | None = None, - **kwargs: Any, + **kwargs: object, ) -> _T: ... +# def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ... +# def binary_repr(num: SupportsIndex, width: int | None = None) -> str: ... +def base_repr(number: SupportsAbs[float], base: float = 2, padding: SupportsIndex | None = 0) -> str: ... -def base_repr( - number: SupportsAbs[float], - base: float = 2, - padding: SupportsIndex | None = 0, -) -> str: ... - -@overload -def identity( - n: int, - dtype: None = None, - *, - like: _SupportsArrayFunc | None = None, -) -> NDArray[float64]: ... -@overload -def identity( - n: int, - dtype: _DTypeLike[_ScalarT], - *, - like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... -@overload -def identity( - n: int, - dtype: DTypeLike | None = None, - *, - like: _SupportsArrayFunc | None = None, -) -> NDArray[Any]: ... +# +@overload # dtype: None (default) +def identity(n: int, dtype: None = None, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64]: ... +@overload # dtype: known scalar type +def identity(n: int, dtype: _DTypeLike[_ScalarT], *, like: _SupportsArrayFunc | None = None) -> _Array2D[_ScalarT]: ... +@overload # dtype: like bool +def identity(n: int, dtype: _DTypeLikeBool, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.bool]: ... +@overload # dtype: like int_ +def identity(n: int, dtype: _DTypeLikeInt, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.int_ | Any]: ... +@overload # dtype: like float64 +def identity(n: int, dtype: _DTypeLikeFloat64, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64 | Any]: ... +@overload # dtype: like complex128 +def identity(n: int, dtype: _DTypeLikeComplex128, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.complex128 | Any]: ... +@overload # dtype: unknown +def identity(n: int, dtype: DTypeLike, *, like: _SupportsArrayFunc | None = None) -> _Array2D[Incomplete]: ... +# def allclose( a: ArrayLike, b: ArrayLike, @@ -1293,30 +1180,80 @@ def allclose( equal_nan: py_bool = False, ) -> py_bool: ... -@overload +# +@overload # scalar, scalar def isclose( - a: _ScalarLike_co, - b: _ScalarLike_co, + a: _NumberLike_co, + b: _NumberLike_co, rtol: ArrayLike = 1e-5, atol: ArrayLike = 1e-8, equal_nan: py_bool = False, ) -> np.bool: ... -@overload +@overload # known shape, same shape or scalar +def isclose( + a: np.ndarray[_ShapeT], + b: np.ndarray[_ShapeT] | _NumberLike_co, + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +@overload # same shape or scalar, known shape +def isclose( + a: np.ndarray[_ShapeT] | _NumberLike_co, + b: np.ndarray[_ShapeT], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +@overload # 1d sequence, <=1d array-like +def isclose( + a: Sequence[_NumberLike_co], + b: Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # <=1d array-like, 1d sequence +def isclose( + a: Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int]], + b: Sequence[_NumberLike_co], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # 2d sequence, <=2d array-like +def isclose( + a: Sequence[Sequence[_NumberLike_co]], + b: Sequence[Sequence[_NumberLike_co]] | Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int] | tuple[int, int]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # <=2d array-like, 2d sequence +def isclose( + b: Sequence[Sequence[_NumberLike_co]] | Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int] | tuple[int, int]], + a: Sequence[Sequence[_NumberLike_co]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # unknown shape, unknown shape def isclose( a: ArrayLike, b: ArrayLike, rtol: ArrayLike = 1e-5, atol: ArrayLike = 1e-8, equal_nan: py_bool = False, -) -> NDArray[np.bool]: ... +) -> NDArray[np.bool] | Any: ... +# def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: py_bool = False) -> py_bool: ... - def array_equiv(a1: ArrayLike, a2: ArrayLike) -> py_bool: ... +# @overload def astype( - x: ndarray[_ShapeT, dtype], + x: ndarray[_ShapeT], dtype: _DTypeLike[_ScalarT], /, *, @@ -1325,10 +1262,10 @@ def astype( ) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... @overload def astype( - x: ndarray[_ShapeT, dtype], + x: ndarray[_ShapeT], dtype: DTypeLike | None, /, *, copy: py_bool = True, device: L["cpu"] | None = None, -) -> ndarray[_ShapeT, dtype]: ... +) -> ndarray[_ShapeT]: ... diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index b0ba67d6a087..ba8fc4db23c9 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -216,9 +216,23 @@ assert_type(np.indices([1, 2, 3], sparse=True), tuple[npt.NDArray[np.int_], ...] assert_type(np.fromfunction(func, (3, 5)), SubClass[np.float64]) -assert_type(np.identity(10), npt.NDArray[np.float64]) -assert_type(np.identity(10, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.identity(10, dtype=int), npt.NDArray[Any]) +assert_type(np.identity(3), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.identity(3, dtype=np.int8), np.ndarray[tuple[int, int], np.dtype[np.int8]]) +assert_type(np.identity(3, dtype=bool), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="bool"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="b1"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="?"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype=int), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype="int"), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype="n"), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype=float), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="float"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="f8"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="d"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype=complex), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="complex"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="c16"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="D"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) assert_type(np.atleast_1d(A), npt.NDArray[np.float64]) assert_type(np.atleast_1d(C), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 247294cf34c6..24f97d2d0784 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -22,88 +22,125 @@ AR_c16: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_O: npt.NDArray[np.object_] -B: list[int] -C: SubClass +_sub_nd_i8: SubClass + +_to_1d_bool: list[bool] +_to_1d_int: list[int] +_to_1d_float: list[float] +_to_1d_complex: list[complex] + +### assert_type(np.count_nonzero(i8), np.intp) assert_type(np.count_nonzero(AR_i8), np.intp) -assert_type(np.count_nonzero(B), np.intp) +assert_type(np.count_nonzero(_to_1d_int), np.intp) assert_type(np.count_nonzero(AR_i8, keepdims=True), npt.NDArray[np.intp]) assert_type(np.count_nonzero(AR_i8, axis=0), Any) assert_type(np.isfortran(i8), bool) assert_type(np.isfortran(AR_i8), bool) -assert_type(np.argwhere(i8), npt.NDArray[np.intp]) -assert_type(np.argwhere(AR_i8), npt.NDArray[np.intp]) - -assert_type(np.flatnonzero(i8), npt.NDArray[np.intp]) -assert_type(np.flatnonzero(AR_i8), npt.NDArray[np.intp]) - -assert_type(np.correlate(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger]) -assert_type(np.correlate(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger]) -assert_type(np.correlate(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.correlate(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.correlate(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.correlate(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.correlate(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.correlate(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.correlate(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.convolve(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger]) -assert_type(np.convolve(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger]) -assert_type(np.convolve(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.convolve(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.convolve(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.convolve(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.convolve(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.convolve(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.outer(i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.outer(B, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.outer(AR_i8, AR_i8, out=C), SubClass) -assert_type(np.outer(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.outer(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.outer(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.outer(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.outer(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.tensordot(B, AR_i8), npt.NDArray[np.int_ | Any]) +assert_type(np.argwhere(i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(np.argwhere(AR_i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) + +assert_type(np.flatnonzero(i8), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.flatnonzero(AR_i8), np.ndarray[tuple[int], np.dtype[np.intp]]) + +# correlate +assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.correlate(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.correlate(AR_u8, AR_u8), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.correlate(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.correlate(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.correlate(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.correlate(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) +assert_type(np.correlate(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.correlate(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.correlate(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.correlate(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.correlate(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.int_ | Any]]) +assert_type(np.correlate(_to_1d_float, _to_1d_float), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.correlate(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) + +# convolve (same as correlate) +assert_type(np.convolve(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.convolve(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.convolve(AR_u8, AR_u8), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(np.convolve(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.convolve(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.convolve(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.convolve(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.convolve(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) +assert_type(np.convolve(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.convolve(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.convolve(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.convolve(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.convolve(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.int_ | Any]]) +assert_type(np.convolve(_to_1d_float, _to_1d_float), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.convolve(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) + +# outer (very similar to above, but 2D output) +assert_type(np.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.outer(AR_b, AR_b), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.outer(AR_u8, AR_u8), np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(np.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.outer(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.outer(AR_f8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.outer(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.outer(AR_c16, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.outer(AR_m, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) +assert_type(np.outer(AR_i8, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.outer(AR_O, AR_O), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.outer(AR_i8, AR_i8, out=_sub_nd_i8), SubClass) +assert_type(np.outer(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.outer(_to_1d_int, _to_1d_int), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.outer(_to_1d_float, _to_1d_float), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.outer(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) + +# tensordot +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) assert_type(np.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) assert_type(np.tensordot(AR_u8, AR_u8), npt.NDArray[np.uint64]) assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=0), npt.NDArray[np.int64]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1)), npt.NDArray[np.int64]) assert_type(np.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.tensordot(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) assert_type(np.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.tensordot(AR_c16, AR_f8), npt.NDArray[np.complex128 | Any]) assert_type(np.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) assert_type(np.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.tensordot(_to_1d_bool, _to_1d_bool), npt.NDArray[np.bool]) +assert_type(np.tensordot(_to_1d_int, _to_1d_int), npt.NDArray[np.int_ | Any]) +assert_type(np.tensordot(_to_1d_float, _to_1d_float), npt.NDArray[np.float64 | Any]) +assert_type(np.tensordot(_to_1d_complex, _to_1d_complex), npt.NDArray[np.complex128 | Any]) + +# cross +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.cross(AR_u8, AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.cross(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.cross(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(np.cross(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.cross(AR_c16, AR_f8), npt.NDArray[np.complex128 | Any]) +assert_type(np.cross(AR_m, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.cross(_to_1d_int, _to_1d_int), npt.NDArray[np.int_ | Any]) +assert_type(np.cross(_to_1d_float, _to_1d_float), npt.NDArray[np.float64 | Any]) +assert_type(np.cross(_to_1d_complex, _to_1d_complex), npt.NDArray[np.complex128 | Any]) assert_type(np.isscalar(i8), bool) assert_type(np.isscalar(AR_i8), bool) -assert_type(np.isscalar(B), bool) +assert_type(np.isscalar(_to_1d_int), bool) assert_type(np.roll(AR_i8, 1), npt.NDArray[np.int64]) assert_type(np.roll(AR_i8, (1, 2)), npt.NDArray[np.int64]) -assert_type(np.roll(B, 1), npt.NDArray[Any]) +assert_type(np.roll(_to_1d_int, 1), npt.NDArray[Any]) assert_type(np.rollaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) assert_type(np.moveaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) assert_type(np.moveaxis(AR_i8, (0, 1), (1, 2)), npt.NDArray[np.int64]) -assert_type(np.cross(B, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.cross(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.cross(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.cross(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.cross(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) - assert_type(np.indices([0, 1, 2]), npt.NDArray[np.int_]) assert_type(np.indices([0, 1, 2], sparse=True), tuple[npt.NDArray[np.int_], ...]) assert_type(np.indices([0, 1, 2], dtype=np.float64), npt.NDArray[np.float64]) @@ -116,18 +153,18 @@ assert_type(np.binary_repr(1), str) assert_type(np.base_repr(1), str) assert_type(np.allclose(i8, AR_i8), bool) -assert_type(np.allclose(B, AR_i8), bool) +assert_type(np.allclose(_to_1d_int, AR_i8), bool) assert_type(np.allclose(AR_i8, AR_i8), bool) assert_type(np.isclose(i8, i8), np.bool) assert_type(np.isclose(i8, AR_i8), npt.NDArray[np.bool]) -assert_type(np.isclose(B, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isclose(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.isclose(AR_i8, AR_i8), npt.NDArray[np.bool]) assert_type(np.array_equal(i8, AR_i8), bool) -assert_type(np.array_equal(B, AR_i8), bool) +assert_type(np.array_equal(_to_1d_int, AR_i8), bool) assert_type(np.array_equal(AR_i8, AR_i8), bool) assert_type(np.array_equiv(i8, AR_i8), bool) -assert_type(np.array_equiv(B, AR_i8), bool) +assert_type(np.array_equiv(_to_1d_int, AR_i8), bool) assert_type(np.array_equiv(AR_i8, AR_i8), bool) From ab06e8c3c96b280fd9747667342cd79fffb03d8e Mon Sep 17 00:00:00 2001 From: Amelia Thurdekoos Date: Mon, 17 Nov 2025 16:11:56 -0800 Subject: [PATCH 0879/1018] DOC: Update wording in numpy.coremath --- doc/source/reference/c-api/coremath.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/source/reference/c-api/coremath.rst b/doc/source/reference/c-api/coremath.rst index c07abb47bc10..cc46ba744a49 100644 --- a/doc/source/reference/c-api/coremath.rst +++ b/doc/source/reference/c-api/coremath.rst @@ -1,8 +1,7 @@ NumPy core math library ======================= -The numpy core math library (``npymath``) is a first step in this direction. This -library contains most math-related C99 functionality, which can be used on +This library contains most math-related C99 functionality, which can be used on platforms where C99 is not well supported. The core math functions have the same API as the C99 ones, except for the ``npy_*`` prefix. From a91953965abeb918fe925d82d49bdf50e0ef5dc2 Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 18 Nov 2025 09:05:17 +0200 Subject: [PATCH 0880/1018] DOC: remove mention of 'skip azp' since we no longer use azure [skip actions][skip cirrus] --- doc/RELEASE_WALKTHROUGH.rst | 2 +- doc/source/dev/development_workflow.rst | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index ac306c682148..c8c1f129c0b2 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -290,7 +290,7 @@ Update the ``version`` in ``pyproject.toml``:: $ gvim pyproject.toml Commit the result, edit the commit message, note the files in the commit, and -add a line ``[skip azp] [skip cirrus] [skip actions]``, then push:: +add a line ``[skip cirrus] [skip actions]``, then push:: $ git commit -a -m"MAINT: Prepare 2.4.x for further development" $ git rebase -i HEAD^ diff --git a/doc/source/dev/development_workflow.rst b/doc/source/dev/development_workflow.rst index fa5f8b1e65b7..10b07cc1f437 100644 --- a/doc/source/dev/development_workflow.rst +++ b/doc/source/dev/development_workflow.rst @@ -205,13 +205,6 @@ these fragments in each commit message of a PR: settings. `See the configuration files for these checks. `__ -* ``[skip azp]``: skip Azure jobs - - `Azure `__ is - where all comprehensive tests are run. This is an expensive run, and one you - could typically skip if you do documentation-only changes, for example. - `See the main configuration file for these checks. `__ - * ``[skip circle]``: skip CircleCI jobs `CircleCI `__ is where we build the documentation and From 195c4b2b8991356c4b9d5afe15d897c29c1ad058 Mon Sep 17 00:00:00 2001 From: Simola Nayak <14366306+simolanayak@users.noreply.github.com> Date: Tue, 18 Nov 2025 01:28:12 -0800 Subject: [PATCH 0881/1018] Merge pull request #30184 from simolanayak/test-numeric-lines MAINT: Enable linting with ruff E501 #28947 (test-numeric.py) --- numpy/_core/tests/test_numeric.py | 28 ++++++++++++++++++++-------- ruff.toml | 1 - 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 485147500461..2c5535a19c77 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1660,8 +1660,10 @@ def test_nonzero_onedim(self): # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], # dtype=[('a', 'i4'), ('b', 'i2')]) - x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], - dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')]) + x = np.array( + [(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], + dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')] + ) assert_equal(np.count_nonzero(x['a']), 3) assert_equal(np.count_nonzero(x['b']), 4) assert_equal(np.count_nonzero(x['c']), 3) @@ -1995,7 +1997,9 @@ def test_boolean(self): g1 = randint(0, 5, size=15) g2 = randint(0, 8, size=15) V[g1, g2] = -V[g1, g2] - assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) + assert_( + (np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all() + ) def test_boolean_edgecase(self): a = np.array([], dtype='int32') @@ -2261,7 +2265,10 @@ def test_array_equiv(self): res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) assert_(not res) assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + res = np.array_equiv( + np.array([1, 2]), + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), + ) assert_(not res) assert_(type(res) is bool) @@ -3116,7 +3123,9 @@ def tst_isclose_allclose(self, x, y): if np.isscalar(x) and np.isscalar(y): assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) else: - assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) + assert_array_equal( + np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y) + ) def test_ip_all_isclose(self): self._setup() @@ -3210,7 +3219,8 @@ def test_tol_warnings(self): for i in b: for j in b: - # Making sure that i and j are not both numbers, because that won't create a warning + # Making sure that i and j are not both numbers, + # because that won't create a warning if (i == 1) and (j == 1): continue @@ -3220,7 +3230,8 @@ def test_tol_warnings(self): c = np.isclose(a, a, atol=i, rtol=j) assert len(w) == 1 assert issubclass(w[-1].category, RuntimeWarning) - assert f"One of rtol or atol is not valid, atol: {i}, rtol: {j}" in str(w[-1].message) + expected = f"One of rtol or atol is not valid, atol: {i}, rtol: {j}" + assert expected in str(w[-1].message) class TestStdVar: @@ -4244,7 +4255,8 @@ def test_zero_dimension(self): def test_zero_dimensional(self): # gh-12130 arr_0d = np.array(1) - ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined + # contracting no axes is well defined + ret = np.tensordot(arr_0d, arr_0d, ([], [])) assert_array_equal(ret, arr_0d) diff --git a/ruff.toml b/ruff.toml index 8b4d8358ba4a..b25a34d45984 100644 --- a/ruff.toml +++ b/ruff.toml @@ -90,7 +90,6 @@ ignore = [ "numpy/_core/tests/test_multiarray.py" = ["E501"] "numpy/_core/tests/test_nditer*py" = ["E501"] "numpy/_core/tests/test_umath.py" = ["E501"] -"numpy/_core/tests/test_numeric.py" = ["E501"] "numpy/_core/tests/test_numerictypes.py" = ["E501"] "numpy/_core/tests/test_regression.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] From e4a68f45ad0be0ad34e03fb960559aa3f98ddb64 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 6 Nov 2025 17:28:25 +0000 Subject: [PATCH 0882/1018] CI: add check for numpy-release version of scipy-openblas --- .github/workflows/linux.yml | 8 +++ .github/workflows/linux_blas.yml | 2 +- tools/check_openblas_version.py | 113 +++++++++++++++++++++++++++---- 3 files changed, 109 insertions(+), 14 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 5d1f29693155..c0effa244646 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -342,6 +342,14 @@ jobs: - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + with: + repository: numpy/numpy-release + path: numpy-release + persist-credentials: false + - name: Check scipy-openblas version in release pipelines + run: | + python tools/check_openblas_version.py --req-files numpy-release/requirements/openblas_requirements.txt - name: Install build and test dependencies from PyPI run: | pip install -r requirements/build_requirements.txt diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 7b7647044002..fccc7e3ad84b 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -107,7 +107,7 @@ jobs: - name: Ensure scipy-openblas run: | set -ex - spin python tools/check_openblas_version.py 0.3.26 + spin python tools/check_openblas_version.py -- --min-version 0.3.30 - name: Test shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' diff --git a/tools/check_openblas_version.py b/tools/check_openblas_version.py index 9aa0b265dea5..085308b71171 100644 --- a/tools/check_openblas_version.py +++ b/tools/check_openblas_version.py @@ -1,20 +1,107 @@ """ -usage: check_openblas_version.py +Checks related to the OpenBLAS version used in CI. -Check the blas version is blas from scipy-openblas and is higher than -min_version -example: check_openblas_version.py 0.3.26 +Options: +1. Check that the BLAS used at build time is (a) scipy-openblas, and (b) its version is + higher than a given minimum version. Note: this method only seems to give + the first 3 version components, so 0.3.30.0.7 gets translated to 0.3.30 when reading + it back out from `scipy.show_config()`. +2. Check requirements files in the main numpy repo and compare with the numpy-release + repo. Goal is to ensure that `numpy-release` is not behind. + +Both of these checks are primarily useful in a CI job. + +Examples: + + # Requires install numpy + $ python check_openblas_version.py --min-version 0.3.30 + + # Only needs the requirements files + $ python check_openblas_version.py --req-files \ + ../numpy-release/requirements/openblas_requirements.txt """ +import argparse +import os.path import pprint -import sys -import numpy -version = sys.argv[1] -deps = numpy.show_config('dicts')['Build Dependencies'] -assert "blas" in deps -print("Build Dependencies: blas") -pprint.pprint(deps["blas"]) -assert deps["blas"]["version"].split(".") >= version.split(".") -assert deps["blas"]["name"] == "scipy-openblas" +def check_built_version(min_version): + import numpy + deps = numpy.show_config('dicts')['Build Dependencies'] + assert "blas" in deps + print("Build Dependencies: blas") + pprint.pprint(deps["blas"]) + assert deps["blas"]["version"].split(".") >= min_version.split(".") + assert deps["blas"]["name"] == "scipy-openblas" + + +def check_requirements_files(reqfile): + if not os.path.exists(reqfile): + print(f"Path does not exist: {reqfile}") + + def get_version(line): + req = line.split(";")[0].split("==")[1].split(".")[:5] + return tuple(int(s) for s in req) + + def parse_reqs(reqfile): + with open(reqfile) as f: + lines = f.readlines() + + v32 = None + v64 = None + for line in lines: + if "scipy-openblas32" in line: + v32 = get_version(line) + if "scipy-openblas64" in line: + v64 = get_version(line) + if v32 is None or v64 is None: + raise AssertionError("Expected `scipy-openblas32` and " + "`scipy-openblas64` in `ci_requirements.txt`, " + f"got:\n {' '.join(lines)}") + return v32, v64 + + this_dir = os.path.abspath(os.path.dirname(__file__)) + reqfile_thisrepo = os.path.join(this_dir, '..', 'requirements', + 'ci_requirements.txt') + + v32_thisrepo, v64_thisrepo = parse_reqs(reqfile_thisrepo) + v32_rel, v64_rel = parse_reqs(reqfile) + + def compare_versions(v_rel, v_thisrepo, bits): + if not v_rel >= v_thisrepo: + raise AssertionError(f"`numpy-release` version of scipy-openblas{bits} " + f"{v_rel} is behind this repo: {v_thisrepo}") + + compare_versions(v64_rel, v64_thisrepo, "64") + compare_versions(v32_rel, v32_thisrepo, "32") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--req-files", + type=str, + help="Path to the requirements file to compare with the one in this repo" + ) + parser.add_argument( + "--min-version", + type=str, + help="The minimum version that should have been used at build time for " + "installed `numpy` package" + ) + args = parser.parse_args() + + if args.min_version is None and args.req_files is None: + raise ValueError("One of `--req-files` or `--min-version` needs to be " + "specified") + + if args.min_version: + check_built_version(args.min_version) + + if args.req_files: + check_requirements_files(args.req_files) + + +if __name__ == '__main__': + main() From 31ea0a31800645c6e0fcf2e5e54b714d1ff99ddd Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Tue, 18 Nov 2025 21:45:28 +0530 Subject: [PATCH 0883/1018] BUG: fix data race in wrapping_auxdata_freelist by making it thread-local (#30228) --- numpy/_core/src/umath/wrapping_array_method.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index b340e72bdbfd..924bac9524e9 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -83,8 +83,8 @@ typedef struct { #define WRAPPING_AUXDATA_FREELIST_SIZE 5 -static int wrapping_auxdata_freenum = 0; -static wrapping_auxdata *wrapping_auxdata_freelist[WRAPPING_AUXDATA_FREELIST_SIZE] = {NULL}; +static NPY_TLS int wrapping_auxdata_freenum = 0; +static NPY_TLS wrapping_auxdata *wrapping_auxdata_freelist[WRAPPING_AUXDATA_FREELIST_SIZE] = {NULL}; static void From 0c7ddfc343ee0b7937e893227d2090ee4dce090e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Nov 2025 17:06:33 +0000 Subject: [PATCH 0884/1018] MAINT: Bump github/codeql-action from 4.31.3 to 4.31.4 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.3 to 4.31.4. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/014f16e7ab1402f30e7c3329d33797e7948572db...e12f0178983d466f2f6028f5cc7a6d786fd97f4b) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 1546cdc10adf..bc4ff9f9cdc5 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3 + uses: github/codeql-action/init@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v4.31.4 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3 + uses: github/codeql-action/autobuild@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v4.31.4 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3 + uses: github/codeql-action/analyze@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v4.31.4 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 227f2f3788b0..165a172b9ee4 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@014f16e7ab1402f30e7c3329d33797e7948572db # v2.1.27 + uses: github/codeql-action/upload-sarif@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v2.1.27 with: sarif_file: results.sarif From 6faf12368c02e465d0980db9ff5b344944e752e9 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 18 Nov 2025 18:48:58 +0100 Subject: [PATCH 0885/1018] TYP: Shape-typing in ``lib._twodim_base_impl`` (#30231) --- numpy/lib/_twodim_base_impl.pyi | 307 ++++++++---------- numpy/typing/tests/data/fail/twodim_base.pyi | 33 +- .../typing/tests/data/reveal/twodim_base.pyi | 262 +++++++++------ 3 files changed, 333 insertions(+), 269 deletions(-) diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index cc1d6b6245a9..58582119429a 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -1,31 +1,26 @@ +from _typeshed import Incomplete from collections.abc import Callable, Sequence -from typing import Any, Literal as L, TypeAlias, TypeVar, overload +from typing import ( + Any, + Literal as L, + Never, + Protocol, + TypeAlias, + TypeVar, + overload, + type_check_only, +) import numpy as np -from numpy import ( - _OrderCF, - complex128, - complexfloating, - datetime64, - float64, - floating, - generic, - int_, - intp, - object_, - signedinteger, - timedelta64, -) +from numpy import _OrderCF from numpy._typing import ( ArrayLike, DTypeLike, NDArray, _ArrayLike, - _ArrayLikeComplex_co, - _ArrayLikeFloat_co, - _ArrayLikeInt_co, - _ArrayLikeObject_co, _DTypeLike, + _NumberLike_co, + _ScalarLike_co, _SupportsArray, _SupportsArrayFunc, ) @@ -51,36 +46,63 @@ __all__ = [ ### _T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ComplexFloatingT = TypeVar("_ComplexFloatingT", bound=np.complexfloating) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ComplexT = TypeVar("_ComplexT", bound=np.complexfloating) _InexactT = TypeVar("_InexactT", bound=np.inexact) +_NumberT = TypeVar("_NumberT", bound=np.number) +_NumberObjectT = TypeVar("_NumberObjectT", bound=np.number | np.object_) _NumberCoT = TypeVar("_NumberCoT", bound=_Number_co) -# The returned arrays dtype must be compatible with `np.equal` -_MaskFunc: TypeAlias = Callable[[NDArray[int_], _T], NDArray[_Number_co | timedelta64 | datetime64 | object_]] - _Int_co: TypeAlias = np.integer | np.bool _Float_co: TypeAlias = np.floating | _Int_co _Number_co: TypeAlias = np.number | np.bool +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] +# Workaround for mypy's and pyright's lack of compliance with the typing spec for +# overloads for gradual types. This works because only `Any` and `Never` are assignable +# to `Never`. +_ArrayNoD: TypeAlias = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[_ScalarT]] + _ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] _ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] _ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] _ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] _ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +# The returned arrays dtype must be compatible with `np.equal` +_MaskFunc: TypeAlias = Callable[[NDArray[np.int_], _T], NDArray[_Number_co | np.timedelta64 | np.datetime64 | np.object_]] + +_Indices2D: TypeAlias = tuple[_Array1D[np.intp], _Array1D[np.intp]] +_Histogram2D: TypeAlias = tuple[_Array1D[np.float64], _Array1D[_ScalarT], _Array1D[_ScalarT]] + +@type_check_only +class _HasShapeAndNDim(Protocol): + @property # TODO: require 2d shape once shape-typing has matured + def shape(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + ### +# keep in sync with `flipud` +@overload +def fliplr(m: _ArrayT) -> _ArrayT: ... @overload def fliplr(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... @overload def fliplr(m: ArrayLike) -> NDArray[Any]: ... +# keep in sync with `fliplr` +@overload +def flipud(m: _ArrayT) -> _ArrayT: ... @overload def flipud(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... @overload def flipud(m: ArrayLike) -> NDArray[Any]: ... +# @overload def eye( N: int, @@ -91,7 +113,7 @@ def eye( *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[float64]: ... +) -> _Array2D[np.float64]: ... @overload def eye( N: int, @@ -102,7 +124,7 @@ def eye( *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> _Array2D[_ScalarT]: ... @overload def eye( N: int, @@ -113,7 +135,7 @@ def eye( order: _OrderCF = "C", device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> _Array2D[_ScalarT]: ... @overload def eye( N: int, @@ -124,19 +146,31 @@ def eye( *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[Any]: ... +) -> _Array2D[Incomplete]: ... +# +@overload +def diag(v: _ArrayNoD[_ScalarT] | Sequence[Sequence[_ScalarT]], k: int = 0) -> NDArray[_ScalarT]: ... +@overload +def diag(v: _Array2D[_ScalarT] | Sequence[Sequence[_ScalarT]], k: int = 0) -> _Array1D[_ScalarT]: ... +@overload +def diag(v: _Array1D[_ScalarT] | Sequence[_ScalarT], k: int = 0) -> _Array2D[_ScalarT]: ... +@overload +def diag(v: Sequence[Sequence[_ScalarLike_co]], k: int = 0) -> _Array1D[Incomplete]: ... +@overload +def diag(v: Sequence[_ScalarLike_co], k: int = 0) -> _Array2D[Incomplete]: ... @overload def diag(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload -def diag(v: ArrayLike, k: int = 0) -> NDArray[Any]: ... +def diag(v: ArrayLike, k: int = 0) -> NDArray[Incomplete]: ... # keep in sync with `numpy.ma.extras.diagflat` @overload -def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _Array2D[_ScalarT]: ... @overload -def diagflat(v: ArrayLike, k: int = 0) -> NDArray[Any]: ... +def diagflat(v: ArrayLike, k: int = 0) -> _Array2D[Incomplete]: ... +# @overload def tri( N: int, @@ -145,7 +179,7 @@ def tri( dtype: None = ..., # = float # stubdefaulter: ignore[missing-default] *, like: _SupportsArrayFunc | None = None -) -> NDArray[float64]: ... +) -> _Array2D[np.float64]: ... @overload def tri( N: int, @@ -154,7 +188,7 @@ def tri( dtype: _DTypeLike[_ScalarT], *, like: _SupportsArrayFunc | None = None -) -> NDArray[_ScalarT]: ... +) -> _Array2D[_ScalarT]: ... @overload def tri( N: int, @@ -163,7 +197,7 @@ def tri( *, dtype: _DTypeLike[_ScalarT], like: _SupportsArrayFunc | None = None -) -> NDArray[_ScalarT]: ... +) -> _Array2D[_ScalarT]: ... @overload def tri( N: int, @@ -172,69 +206,55 @@ def tri( dtype: DTypeLike | None = ..., # = float *, like: _SupportsArrayFunc | None = None -) -> NDArray[Any]: ... +) -> _Array2D[Any]: ... +# keep in sync with `triu` +@overload +def tril(m: _ArrayT, k: int = 0) -> _ArrayT: ... @overload def tril(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... +# keep in sync with `tril` +@overload +def triu(m: _ArrayT, k: int = 0) -> _ArrayT: ... @overload def triu(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... +# we use `list` (invariant) instead of `Sequence` (covariant) to avoid overlap +@overload +def vander(x: _ArrayLike1D[_NumberObjectT], N: int | None = None, increasing: bool = False) -> _Array2D[_NumberObjectT]: ... +@overload +def vander(x: _ArrayLike1D[np.bool] | list[int], N: int | None = None, increasing: bool = False) -> _Array2D[np.int_]: ... +@overload +def vander(x: list[float], N: int | None = None, increasing: bool = False) -> _Array2D[np.float64]: ... @overload -def vander( # type: ignore[misc] - x: _ArrayLikeInt_co, - N: int | None = None, - increasing: bool = False, -) -> NDArray[signedinteger]: ... -@overload -def vander( # type: ignore[misc] - x: _ArrayLikeFloat_co, - N: int | None = None, - increasing: bool = False, -) -> NDArray[floating]: ... -@overload -def vander( - x: _ArrayLikeComplex_co, - N: int | None = None, - increasing: bool = False, -) -> NDArray[complexfloating]: ... -@overload -def vander( - x: _ArrayLikeObject_co, - N: int | None = None, - increasing: bool = False, -) -> NDArray[object_]: ... +def vander(x: list[complex], N: int | None = None, increasing: bool = False) -> _Array2D[np.complex128]: ... +@overload # fallback +def vander(x: Sequence[_NumberLike_co], N: int | None = None, increasing: bool = False) -> _Array2D[Any]: ... +# @overload def histogram2d( - x: _ArrayLike1D[_ComplexFloatingT], - y: _ArrayLike1D[_ComplexFloatingT | _Float_co], + x: _ArrayLike1D[_ComplexT], + y: _ArrayLike1D[_ComplexT | _Float_co], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_ComplexFloatingT], - NDArray[_ComplexFloatingT], -]: ... +) -> _Histogram2D[_ComplexT]: ... @overload def histogram2d( - x: _ArrayLike1D[_ComplexFloatingT | _Float_co], - y: _ArrayLike1D[_ComplexFloatingT], + x: _ArrayLike1D[_ComplexT | _Float_co], + y: _ArrayLike1D[_ComplexT], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_ComplexFloatingT], - NDArray[_ComplexFloatingT], -]: ... +) -> _Histogram2D[_ComplexT]: ... @overload def histogram2d( x: _ArrayLike1D[_InexactT], @@ -243,11 +263,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_InexactT], - NDArray[_InexactT], -]: ... +) -> _Histogram2D[_InexactT]: ... @overload def histogram2d( x: _ArrayLike1D[_InexactT | _Int_co], @@ -256,11 +272,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_InexactT], - NDArray[_InexactT], -]: ... +) -> _Histogram2D[_InexactT]: ... @overload def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], @@ -269,11 +281,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[float64], - NDArray[float64], -]: ... +) -> _Histogram2D[np.float64]: ... @overload def histogram2d( x: Sequence[complex], @@ -282,11 +290,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[complex128 | float64], - NDArray[complex128 | float64], -]: ... +) -> _Histogram2D[np.complex128 | Any]: ... @overload def histogram2d( x: _ArrayLike1DNumber_co, @@ -295,11 +299,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_NumberCoT], - NDArray[_NumberCoT], -]: ... +) -> _Histogram2D[_NumberCoT]: ... @overload def histogram2d( x: _ArrayLike1D[_InexactT], @@ -308,11 +308,16 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_NumberCoT | _InexactT], - NDArray[_NumberCoT | _InexactT], -]: ... +) -> _Histogram2D[_InexactT | _NumberCoT]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_InexactT], + y: _ArrayLike1D[_InexactT], + bins: Sequence[_ArrayLike1DNumber_co | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[_InexactT | Any]: ... @overload def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], @@ -321,11 +326,16 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_NumberCoT | float64], - NDArray[_NumberCoT | float64], -]: ... +) -> _Histogram2D[np.float64 | _NumberCoT]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float], + y: _ArrayLike1DInt_co | Sequence[float], + bins: Sequence[_ArrayLike1DNumber_co | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64 | Any]: ... @overload def histogram2d( x: Sequence[complex], @@ -334,24 +344,16 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_NumberCoT | complex128 | float64], - NDArray[_NumberCoT | complex128 | float64], -]: ... +) -> _Histogram2D[np.complex128 | _NumberCoT]: ... @overload def histogram2d( - x: _ArrayLike1DNumber_co, - y: _ArrayLike1DNumber_co, - bins: Sequence[Sequence[bool]], + x: Sequence[complex], + y: Sequence[complex], + bins: Sequence[_ArrayLike1DNumber_co | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[np.bool], - NDArray[np.bool], -]: ... +) -> _Histogram2D[np.complex128 | Any]: ... @overload def histogram2d( x: _ArrayLike1DNumber_co, @@ -360,11 +362,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[np.int_ | np.bool], - NDArray[np.int_ | np.bool], -]: ... +) -> _Histogram2D[np.int_]: ... @overload def histogram2d( x: _ArrayLike1DNumber_co, @@ -373,11 +371,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[np.float64 | np.int_ | np.bool], - NDArray[np.float64 | np.int_ | np.bool], -]: ... +) -> _Histogram2D[np.float64 | Any]: ... @overload def histogram2d( x: _ArrayLike1DNumber_co, @@ -386,46 +380,29 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[np.complex128 | np.float64 | np.int_ | np.bool], - NDArray[np.complex128 | np.float64 | np.int_ | np.bool], -]: ... +) -> _Histogram2D[np.complex128 | Any]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[_ArrayLike1DNumber_co | int] | int, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[Any]: ... # NOTE: we're assuming/demanding here the `mask_func` returns # an ndarray of shape `(n, n)`; otherwise there is the possibility # of the output tuple having more or less than 2 elements @overload -def mask_indices( - n: int, - mask_func: _MaskFunc[int], - k: int = 0, -) -> tuple[NDArray[intp], NDArray[intp]]: ... +def mask_indices(n: int, mask_func: _MaskFunc[int], k: int = 0) -> _Indices2D: ... @overload -def mask_indices( - n: int, - mask_func: _MaskFunc[_T], - k: _T, -) -> tuple[NDArray[intp], NDArray[intp]]: ... +def mask_indices(n: int, mask_func: _MaskFunc[_T], k: _T) -> _Indices2D: ... -def tril_indices( - n: int, - k: int = 0, - m: int | None = None, -) -> tuple[NDArray[int_], NDArray[int_]]: ... - -def tril_indices_from( - arr: NDArray[Any], - k: int = 0, -) -> tuple[NDArray[int_], NDArray[int_]]: ... +# +def tril_indices(n: int, k: int = 0, m: int | None = None) -> _Indices2D: ... +def triu_indices(n: int, k: int = 0, m: int | None = None) -> _Indices2D: ... -def triu_indices( - n: int, - k: int = 0, - m: int | None = None, -) -> tuple[NDArray[int_], NDArray[int_]]: ... - -def triu_indices_from( - arr: NDArray[Any], - k: int = 0, -) -> tuple[NDArray[int_], NDArray[int_]]: ... +# these will accept anything with `shape: tuple[int, int]` and `ndim: int` attributes +def tril_indices_from(arr: _HasShapeAndNDim, k: int = 0) -> _Indices2D: ... +def triu_indices_from(arr: _HasShapeAndNDim, k: int = 0) -> _Indices2D: ... diff --git a/numpy/typing/tests/data/fail/twodim_base.pyi b/numpy/typing/tests/data/fail/twodim_base.pyi index e146d68c7418..473419cecef1 100644 --- a/numpy/typing/tests/data/fail/twodim_base.pyi +++ b/numpy/typing/tests/data/fail/twodim_base.pyi @@ -1,32 +1,39 @@ -from typing import Any +from typing import type_check_only import numpy as np import numpy.typing as npt -def func1(ar: npt.NDArray[Any], a: int) -> npt.NDArray[np.str_]: ... +_0d_bool: np.bool +_nd_bool: npt.NDArray[np.bool] +_nd_td64: npt.NDArray[np.timedelta64] +_to_2d_bool: list[list[bool]] -def func2(ar: npt.NDArray[Any], a: float) -> float: ... +@type_check_only +def func1(ar: np.ndarray, a: int) -> npt.NDArray[np.str_]: ... +@type_check_only +def func2(ar: np.ndarray, a: float) -> float: ... -AR_b: npt.NDArray[np.bool] -AR_m: npt.NDArray[np.timedelta64] - -AR_LIKE_b: list[bool] +### np.eye(10, M=20.0) # type: ignore[call-overload] np.eye(10, k=2.5, dtype=int) # type: ignore[call-overload] -np.diag(AR_b, k=0.5) # type: ignore[call-overload] -np.diagflat(AR_b, k=0.5) # type: ignore[call-overload] +np.diag(_nd_bool, k=0.5) # type: ignore[call-overload] +np.diagflat(_nd_bool, k=0.5) # type: ignore[call-overload] np.tri(10, M=20.0) # type: ignore[call-overload] np.tri(10, k=2.5, dtype=int) # type: ignore[call-overload] -np.tril(AR_b, k=0.5) # type: ignore[call-overload] -np.triu(AR_b, k=0.5) # type: ignore[call-overload] +np.tril(_nd_bool, k=0.5) # type: ignore[call-overload] +np.triu(_nd_bool, k=0.5) # type: ignore[call-overload] -np.vander(AR_m) # type: ignore[arg-type] +np.vander(_nd_td64) # type: ignore[type-var] -np.histogram2d(AR_m) # type: ignore[call-overload] +np.histogram2d(_nd_td64) # type: ignore[call-overload] np.mask_indices(10, func1) # type: ignore[arg-type] np.mask_indices(10, func2, 10.5) # type: ignore[arg-type] + +np.tril_indices(3.14) # type: ignore[arg-type] + +np.tril_indices_from(_to_2d_bool) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 7e9563a38611..30d4f408f1a9 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -1,145 +1,225 @@ -from typing import Any, TypeVar, assert_type +from typing import Any, TypeAlias, TypeVar, assert_type, type_check_only import numpy as np import numpy.typing as npt _ScalarT = TypeVar("_ScalarT", bound=np.generic) -def func1(ar: npt.NDArray[_ScalarT], a: int) -> npt.NDArray[_ScalarT]: ... - -def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... - -AR_b: npt.NDArray[np.bool] -AR_u: npt.NDArray[np.uint64] -AR_i: npt.NDArray[np.int64] -AR_f: npt.NDArray[np.float64] -AR_c: npt.NDArray[np.complex128] -AR_O: npt.NDArray[np.object_] - -AR_LIKE_b: list[bool] -AR_LIKE_c: list[complex] - -assert_type(np.fliplr(AR_b), npt.NDArray[np.bool]) -assert_type(np.fliplr(AR_LIKE_b), npt.NDArray[Any]) +_1D: TypeAlias = tuple[int] +_2D: TypeAlias = tuple[int, int] +_ND: TypeAlias = tuple[Any, ...] -assert_type(np.flipud(AR_b), npt.NDArray[np.bool]) -assert_type(np.flipud(AR_LIKE_b), npt.NDArray[Any]) +_Indices2D: TypeAlias = tuple[ + np.ndarray[_1D, np.dtype[np.intp]], + np.ndarray[_1D, np.dtype[np.intp]], +] -assert_type(np.eye(10), npt.NDArray[np.float64]) -assert_type(np.eye(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.eye(10, k=2, dtype=int), npt.NDArray[Any]) +### -assert_type(np.diag(AR_b), npt.NDArray[np.bool]) -assert_type(np.diag(AR_LIKE_b, k=0), npt.NDArray[Any]) +_nd_bool: np.ndarray[_ND, np.dtype[np.bool]] +_1d_bool: np.ndarray[_1D, np.dtype[np.bool]] +_2d_bool: np.ndarray[_2D, np.dtype[np.bool]] +_nd_u64: np.ndarray[_ND, np.dtype[np.uint64]] +_nd_i64: np.ndarray[_ND, np.dtype[np.int64]] +_nd_f64: np.ndarray[_ND, np.dtype[np.float64]] +_nd_c128: np.ndarray[_ND, np.dtype[np.complex128]] +_nd_obj: np.ndarray[_ND, np.dtype[np.object_]] -assert_type(np.diagflat(AR_b), npt.NDArray[np.bool]) -assert_type(np.diagflat(AR_LIKE_b, k=0), npt.NDArray[Any]) +_to_nd_bool: list[bool] | list[list[bool]] +_to_1d_bool: list[bool] +_to_2d_bool: list[list[bool]] -assert_type(np.tri(10), npt.NDArray[np.float64]) -assert_type(np.tri(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.tri(10, k=2, dtype=int), npt.NDArray[Any]) +_to_1d_f64: list[float] +_to_1d_c128: list[complex] -assert_type(np.tril(AR_b), npt.NDArray[np.bool]) -assert_type(np.tril(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.triu(AR_b), npt.NDArray[np.bool]) -assert_type(np.triu(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.vander(AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.vander(AR_u), npt.NDArray[np.signedinteger]) -assert_type(np.vander(AR_i, N=2), npt.NDArray[np.signedinteger]) -assert_type(np.vander(AR_f, increasing=True), npt.NDArray[np.floating]) -assert_type(np.vander(AR_c), npt.NDArray[np.complexfloating]) -assert_type(np.vander(AR_O), npt.NDArray[np.object_]) +@type_check_only +def func1(ar: npt.NDArray[_ScalarT], a: int) -> npt.NDArray[_ScalarT]: ... +@type_check_only +def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... +@type_check_only +class _Cube: + shape = 3, 4 + ndim = 2 + +### + +# fliplr +assert_type(np.fliplr(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.fliplr(_1d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.fliplr(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.fliplr(_to_nd_bool), np.ndarray) +assert_type(np.fliplr(_to_1d_bool), np.ndarray) +assert_type(np.fliplr(_to_2d_bool), np.ndarray) + +# flipud +assert_type(np.flipud(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.flipud(_1d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.flipud(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.flipud(_to_nd_bool), np.ndarray) +assert_type(np.flipud(_to_1d_bool), np.ndarray) +assert_type(np.flipud(_to_2d_bool), np.ndarray) + +# eye +assert_type(np.eye(10), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.eye(10, M=20, dtype=np.int64), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.eye(10, k=2, dtype=int), np.ndarray[_2D]) + +# diag +assert_type(np.diag(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.diag(_1d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diag(_2d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.diag(_to_nd_bool, k=0), np.ndarray) +assert_type(np.diag(_to_1d_bool, k=0), np.ndarray[_2D]) +assert_type(np.diag(_to_2d_bool, k=0), np.ndarray[_1D]) + +# diagflat +assert_type(np.diagflat(_nd_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_1d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_to_nd_bool, k=0), np.ndarray[_2D]) +assert_type(np.diagflat(_to_1d_bool, k=0), np.ndarray[_2D]) +assert_type(np.diagflat(_to_2d_bool, k=0), np.ndarray[_2D]) + +# tri +assert_type(np.tri(10), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.tri(10, M=20, dtype=np.int64), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.tri(10, k=2, dtype=int), np.ndarray[_2D]) + +# tril +assert_type(np.tril(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.tril(_to_nd_bool, k=0), np.ndarray) +assert_type(np.tril(_to_1d_bool, k=0), np.ndarray) +assert_type(np.tril(_to_2d_bool, k=0), np.ndarray) + +# triu +assert_type(np.triu(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.triu(_to_nd_bool, k=0), np.ndarray) +assert_type(np.triu(_to_1d_bool, k=0), np.ndarray) +assert_type(np.triu(_to_2d_bool, k=0), np.ndarray) + +# vander +assert_type(np.vander(_nd_bool), np.ndarray[_2D, np.dtype[np.int_]]) +assert_type(np.vander(_nd_u64), np.ndarray[_2D, np.dtype[np.uint64]]) +assert_type(np.vander(_nd_i64, N=2), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.vander(_nd_f64, increasing=True), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.vander(_nd_c128), np.ndarray[_2D, np.dtype[np.complex128]]) +assert_type(np.vander(_nd_obj), np.ndarray[_2D, np.dtype[np.object_]]) + +# histogram2d assert_type( - np.histogram2d(AR_LIKE_c, AR_LIKE_c), + np.histogram2d(_to_1d_f64, _to_1d_f64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128 | np.float64], - npt.NDArray[np.complex128 | np.float64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_i, AR_b), + np.histogram2d(_to_1d_c128, _to_1d_c128), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.float64], - npt.NDArray[np.float64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], ) assert_type( - np.histogram2d(AR_f, AR_i), + np.histogram2d(_nd_i64, _nd_bool), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.float64], - npt.NDArray[np.float64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_i, AR_f), + np.histogram2d(_nd_f64, _nd_i64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.float64], - npt.NDArray[np.float64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b), + np.histogram2d(_nd_i64, _nd_f64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128], - npt.NDArray[np.complex128], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_f, AR_c, bins=8), + np.histogram2d(_nd_f64, _nd_c128, weights=_to_1d_bool), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128], - npt.NDArray[np.complex128], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], ], ) assert_type( - np.histogram2d(AR_c, AR_f, bins=(8, 5)), + np.histogram2d(_nd_f64, _nd_c128, bins=8), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128], - npt.NDArray[np.complex128], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], ], ) assert_type( - np.histogram2d(AR_c, AR_i, bins=AR_u), + np.histogram2d(_nd_c128, _nd_f64, bins=(8, 5)), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.uint64], - npt.NDArray[np.uint64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], ], ) assert_type( - np.histogram2d(AR_c, AR_c, bins=(AR_u, AR_u)), + np.histogram2d(_nd_c128, _nd_i64, bins=_nd_u64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.uint64], - npt.NDArray[np.uint64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.uint64]], + np.ndarray[_1D, np.dtype[np.uint64]], ], ) assert_type( - np.histogram2d(AR_c, AR_c, bins=(AR_b, 8)), + np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_u64, _nd_u64)), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.bool | np.complex128], - npt.NDArray[np.bool | np.complex128], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.uint64]], + np.ndarray[_1D, np.dtype[np.uint64]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_bool, 8)), + tuple[ + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], + np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_to_1d_f64, 8)), + tuple[ + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], ) -assert_type(np.mask_indices(10, func1), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) -assert_type(np.mask_indices(8, func2, "0"), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) - -assert_type(np.tril_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.tril_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.triu_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.triu_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) +# mask_indices +assert_type(np.mask_indices(10, func1), _Indices2D) +assert_type(np.mask_indices(8, func2, "0"), _Indices2D) + +# tril_indices +assert_type(np.tril_indices(3), _Indices2D) +assert_type(np.tril_indices(3, 1), _Indices2D) +assert_type(np.tril_indices(3, 1, 2), _Indices2D) +# tril_indices +assert_type(np.triu_indices(3), _Indices2D) +assert_type(np.triu_indices(3, 1), _Indices2D) +assert_type(np.triu_indices(3, 1, 2), _Indices2D) + +# tril_indices_from +assert_type(np.tril_indices_from(_2d_bool), _Indices2D) +assert_type(np.tril_indices_from(_Cube()), _Indices2D) +# triu_indices_from +assert_type(np.triu_indices_from(_2d_bool), _Indices2D) +assert_type(np.triu_indices_from(_Cube()), _Indices2D) From dd69ebad5c3816535af0660df7568d0936b4200b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 18 Nov 2025 18:51:12 +0100 Subject: [PATCH 0886/1018] TYP: Annotate remaining ``ma.MaskedArray`` methods (#30221) --- numpy/__init__.pyi | 6 +++- numpy/ma/core.pyi | 74 +++++++++++++++++++++++++++++++++++++--------- 2 files changed, 65 insertions(+), 15 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a04ec3139250..b973e43093ca 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2273,6 +2273,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): *args: SupportsIndex, ) -> str: ... + # keep in sync with `ma.MaskedArray.tolist` @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` def tolist(self: ndarray[tuple[Never], dtype[generic[_T]]], /) -> Any: ... @overload @@ -2291,6 +2292,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = True) -> None: ... + # keep in sync with `ma.MaskedArray.squeeze` def squeeze( self, /, @@ -2418,7 +2420,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): order: str | Sequence[str] | None = None, ) -> NDArray[intp]: ... - # + # keep in sync with `ma.MaskedArray.diagonal` def diagonal( self, offset: SupportsIndex = 0, @@ -2536,11 +2538,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): mode: _ModeKind = ..., ) -> _ArrayT: ... + # keep in sync with `ma.MaskedArray.repeat` @overload def repeat(self, repeats: _ArrayLikeInt_co, /, axis: None = None) -> ndarray[tuple[int], _DTypeT_co]: ... @overload def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex) -> ndarray[_AnyShape, _DTypeT_co]: ... + # keep in sync with `ma.MaskedArray.flatten` and `ma.MaskedArray.ravel` def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 31d66fff0ab8..62dc32c13d97 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -960,7 +960,8 @@ class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): def __next__(self: MaskedIterator[Any, np.dtype[_ScalarT]]) -> _ScalarT: ... class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): - __array_priority__: Any + __array_priority__: Final[Literal[15]] = 15 + @overload def __new__( cls, @@ -1615,10 +1616,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # @property # type: ignore[misc] def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] - get_imag: Any + def get_imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + + # @property # type: ignore[misc] def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] - get_real: Any + def get_real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # keep in sync with `np.ma.count` @overload @@ -1630,9 +1633,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... - # keep roughly in sync with `ma.core.ravel` - def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... - # Keep in sync with `ndarray.reshape` # NOTE: reshape also accepts negative integers, so we can't use integer literals @overload # (None) @@ -2317,34 +2317,80 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): mode: _ModeKind = "raise", ) -> _ArrayT: ... - copy: Any - diagonal: Any - flatten: Any + # keep in sync with `ndarray.diagonal` + @override + def diagonal( + self, + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # keep in sync with `ndarray.repeat` + @override @overload def repeat( self, + /, repeats: _ArrayLikeInt_co, axis: None = None, ) -> MaskedArray[tuple[int], _DTypeT_co]: ... @overload def repeat( self, + /, repeats: _ArrayLikeInt_co, axis: SupportsIndex, ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... - squeeze: Any + # keep in sync with `ndarray.flatten` and `ndarray.ravel` + @override + def flatten(self, /, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + @override + def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + + # keep in sync with `ndarray.squeeze` + @override + def squeeze( + self, + /, + axis: SupportsIndex | tuple[SupportsIndex, ...] | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # - def toflex(self) -> Incomplete: ... - def torecords(self) -> Incomplete: ... - def tolist(self, fill_value: Incomplete | None = None) -> Incomplete: ... + def toflex(self) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... + def torecords(self) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... + + # + @override def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] - def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... + + # keep in sync with `ndarray.tolist` + @override + @overload + def tolist(self: MaskedArray[tuple[Never], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... + @overload + def tolist(self: MaskedArray[tuple[()], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> _T: ... + @overload + def tolist(self: MaskedArray[tuple[int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> list[_T]: ... + @overload + def tolist( + self: MaskedArray[tuple[int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None + ) -> list[list[_T]]: ... + @overload + def tolist( + self: MaskedArray[tuple[int, int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None + ) -> list[list[list[_T]]]: ... + @overload + def tolist(self, /, fill_value: _ScalarLike_co | None = None) -> Any: ... + + # NOTE: will raise `NotImplementedError` + @override + def tofile(self, /, fid: Never, sep: str = "", format: str = "%s") -> NoReturn: ... # type: ignore[override] # + @override def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` From 4f97a4e64e6389f5b1be66954b98d5f2078afacc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Nov 2025 11:27:10 -0700 Subject: [PATCH 0887/1018] MAINT: Bump actions/checkout from 5.0.0 to 5.0.1 (#30252) Bumps [actions/checkout](https://github.com/actions/checkout) from 5.0.0 to 5.0.1. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/08c6903cd8c0fde910a37f88322edcfb5dd907a8...93cb6efe18208431cddfb8368fd83d5badbf9bfd) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 5.0.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 2 +- .github/workflows/compiler_sanitizers.yml | 4 ++-- .github/workflows/cygwin.yml | 2 +- .github/workflows/dependency-review.yml | 2 +- .github/workflows/emscripten.yml | 2 +- .github/workflows/linux-ppc64le.yml | 2 +- .github/workflows/linux.yml | 24 +++++++++++------------ .github/workflows/linux_blas.yml | 18 ++++++++--------- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 4 ++-- .github/workflows/mypy.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/stubtest.yml | 2 +- .github/workflows/wheels.yml | 2 +- .github/workflows/windows.yml | 6 +++--- 17 files changed, 46 insertions(+), 46 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 1546cdc10adf..ba75ce30ce33 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,7 +41,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: persist-credentials: false diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 290ddfa75ffe..54e136839053 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -30,7 +30,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: macos-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -86,7 +86,7 @@ jobs: options: --shm-size=2g # increase memory for large matrix ops steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index c010cd5c9783..279020cbe365 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -22,7 +22,7 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index b2fd18051ddd..c27e9ef4e88c 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: persist-credentials: false - name: 'Dependency Review' diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 0d1936b55b60..906dec766b24 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -29,7 +29,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout NumPy - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index 9b23c1939b00..93426a95e8c0 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-24.04-ppc64le-p10 name: "Native PPC64LE" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index c0effa244646..4e3e01235e65 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -37,7 +37,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-depth: 0 @@ -68,7 +68,7 @@ jobs: matrix: version: ["3.11", "3.12", "3.13", "3.14", "3.14t"] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -83,7 +83,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -102,7 +102,7 @@ jobs: runs-on: ubuntu-24.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -132,7 +132,7 @@ jobs: needs: [smoke_test] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -172,7 +172,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-24.04-arm steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -217,7 +217,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -256,7 +256,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -296,13 +296,13 @@ jobs: if: github.event_name != 'push' steps: - name: Checkout NumPy - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Checkout array-api-tests - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: repository: data-apis/array-api-tests ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 @@ -334,7 +334,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -342,7 +342,7 @@ jobs: - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: repository: numpy/numpy-release path: numpy-release diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index fccc7e3ad84b..b141c33165cf 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -69,7 +69,7 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -127,7 +127,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel openblas-devel -y - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -162,7 +162,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel flexiblas-devel -y - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -192,7 +192,7 @@ jobs: runs-on: ubuntu-latest name: "OpenBLAS with CMake" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -220,7 +220,7 @@ jobs: runs-on: ubuntu-latest name: "Debian libblas/liblapack" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -256,7 +256,7 @@ jobs: # If it is needed in the future, use install name `pkgconf-pkg-config` zypper install -y git gcc-c++ python3-pip python3-devel blas cblas lapack - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -283,7 +283,7 @@ jobs: runs-on: ubuntu-latest name: "MKL (LP64, ILP64, SDL)" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -347,7 +347,7 @@ jobs: runs-on: ubuntu-latest name: "BLIS" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -384,7 +384,7 @@ jobs: runs-on: ubuntu-latest name: "ATLAS" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 5fd10b73b9be..b27a665c0e2b 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -95,7 +95,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -202,7 +202,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index de77a8410f18..74cc00f52e7d 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -58,7 +58,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -76,7 +76,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -123,7 +123,7 @@ jobs: args: "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none" name: "ARM64 SIMD - ${{ matrix.config.name }}" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -170,7 +170,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -185,7 +185,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -235,7 +235,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 399bec631713..8dc888aa94c4 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -32,7 +32,7 @@ jobs: python-version: ["3.12"] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -121,7 +121,7 @@ jobs: version: ["3.11", "3.14t"] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 0e9e63d53d1a..34780b0fdd8c 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -53,7 +53,7 @@ jobs: - [ubuntu-latest, '3.12'] - [windows-latest, '3.11'] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index d8e4a7eb5817..74b3fac96b6d 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -28,7 +28,7 @@ jobs: shard-index: [0] # e.g. change this to [0, 1, 2] and --num-shards below to 3 fail-fast: false steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: path: numpy_to_test fetch-depth: 0 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 227f2f3788b0..20d0c642849e 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,7 +25,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v3.1.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v3.1.0 with: persist-credentials: false diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 43e60fae80b9..d9f72a61b199 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -37,7 +37,7 @@ jobs: py: ["3.11", "3.14"] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 1ceefc2e540d..bc89ffab3423 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -44,7 +44,7 @@ jobs: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh steps: - name: Checkout numpy - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: true persist-credentials: false diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 3302e054d438..8136d80c6a88 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -25,7 +25,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -83,7 +83,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -139,7 +139,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true From 68da3c3ef579e20c3d4e745abbe94e46f36c7a80 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Wed, 19 Nov 2025 00:14:07 +0530 Subject: [PATCH 0888/1018] BUG: fix data race in `PyArray_DescrHash` (#30234) * BUG: fix data race in PyArray_DescrHash * fix return type --- numpy/_core/src/common/npy_atomic.h | 12 ++++++++++ numpy/_core/src/multiarray/hashdescr.c | 32 +++++++++++--------------- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h index f5b41d7068be..61a31acc13e0 100644 --- a/numpy/_core/src/common/npy_atomic.h +++ b/numpy/_core/src/common/npy_atomic.h @@ -79,6 +79,12 @@ npy_atomic_load_ptr(const void *obj) { #endif } +static inline npy_hash_t +npy_atomic_load_hash_t(const npy_hash_t *obj) { + assert(sizeof(npy_hash_t) == sizeof(void *)); + return (npy_hash_t)npy_atomic_load_ptr((const void *)obj); +} + static inline void npy_atomic_store_uint8(npy_uint8 *obj, npy_uint8 value) { #ifdef STDC_ATOMICS @@ -104,6 +110,12 @@ npy_atomic_store_ptr(void *obj, void *value) #endif } +static inline void +npy_atomic_store_hash_t(npy_hash_t *obj, npy_hash_t value) { + assert(sizeof(npy_hash_t) == sizeof(void *)); + npy_atomic_store_ptr((void *)obj, (void *)value); +} + #undef MSC_ATOMICS #undef STDC_ATOMICS #undef GCC_ATOMICS diff --git a/numpy/_core/src/multiarray/hashdescr.c b/numpy/_core/src/multiarray/hashdescr.c index d5359d86390f..0117ef218a81 100644 --- a/numpy/_core/src/multiarray/hashdescr.c +++ b/numpy/_core/src/multiarray/hashdescr.c @@ -6,6 +6,7 @@ #include +#include "npy_atomic.h" #include "npy_config.h" @@ -256,12 +257,13 @@ static int _array_descr_walk(PyArray_Descr* descr, PyObject *l) } /* - * Return 0 if successful + * Return hash on success, -1 on failure */ -static int _PyArray_DescrHashImp(PyArray_Descr *descr, npy_hash_t *hash) +static npy_hash_t _PyArray_DescrHashImp(PyArray_Descr *descr) { PyObject *l, *tl; int st; + npy_hash_t hash; l = PyList_New(0); if (l == NULL) { @@ -283,25 +285,16 @@ static int _PyArray_DescrHashImp(PyArray_Descr *descr, npy_hash_t *hash) if (tl == NULL) return -1; - *hash = PyObject_Hash(tl); + hash = PyObject_Hash(tl); Py_DECREF(tl); - if (*hash == -1) { - /* XXX: does PyObject_Hash set an exception on failure ? */ -#if 0 - PyErr_SetString(PyExc_SystemError, - "(Hash) Error while hashing final tuple"); -#endif - return -1; - } - - return 0; + return hash; } NPY_NO_EXPORT npy_hash_t PyArray_DescrHash(PyObject* odescr) { PyArray_Descr *descr; - int st; + npy_hash_t hash; if (!PyArray_DescrCheck(odescr)) { PyErr_SetString(PyExc_ValueError, @@ -310,12 +303,15 @@ PyArray_DescrHash(PyObject* odescr) } descr = (PyArray_Descr*)odescr; - if (descr->hash == -1) { - st = _PyArray_DescrHashImp(descr, &descr->hash); - if (st) { + hash = npy_atomic_load_hash_t(&descr->hash); + + if (hash == -1) { + hash = _PyArray_DescrHashImp(descr); + if (hash == -1) { return -1; } + npy_atomic_store_hash_t(&descr->hash, hash); } - return descr->hash; + return hash; } From 676d6c1d927484847f065736d83c4163b287e5e1 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 18 Nov 2025 20:30:05 +0100 Subject: [PATCH 0889/1018] CI: update ``paths-ignore`` for mypy and wheels workflows --- .github/workflows/mypy.yml | 19 +++++++++++-------- .github/workflows/wheels.yml | 4 ++++ 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 34780b0fdd8c..2b41011315da 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -18,14 +18,17 @@ on: - main - maintenance/** paths-ignore: - - '.circlecl/' - - '.devcontainer/' - - 'benchmarks/' - - 'branding/' - - 'docs/' - - 'meson_cpu/' - - 'tools/' - - 'vendored-meson/' + - '**.md' + - '**.rst' + - '.circlecl/**' + - '.devcontainer/**' + - '.spin/**' + - 'benchmarks/**' + - 'branding/**' + - 'doc/**' + - 'meson_cpu/**' + - 'tools/**' + - 'vendored-meson/**' workflow_dispatch: defaults: diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index bc89ffab3423..044020adeff8 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -9,6 +9,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' workflow_dispatch: concurrency: From a79233cfa506d96477afed0d900d3f7995cbac71 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Wed, 19 Nov 2025 02:00:42 +0200 Subject: [PATCH 0890/1018] BUG: always ignore FPE when Accelerate is the BLAS backend (#30255) --- numpy/_core/src/common/blas_utils.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c index cbf8e0dc05c5..43d4b1e845f0 100644 --- a/numpy/_core/src/common/blas_utils.c +++ b/numpy/_core/src/common/blas_utils.c @@ -14,8 +14,13 @@ #if NPY_BLAS_CHECK_FPE_SUPPORT /* * Static variable to cache runtime check of BLAS FPE support. + * Will always be false (ignore all FPE) when accelerate is the compiled backend */ + #if defined(ACCELERATE_NEW_LAPACK) +static bool blas_supports_fpe = false; + #else static bool blas_supports_fpe = true; + #endif // ACCELERATE_NEW_LAPACK #endif // NPY_BLAS_CHECK_FPE_SUPPORT From 8eb840a074ced9678f7c5bc1257842131892fcb1 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 19 Nov 2025 03:08:43 +0100 Subject: [PATCH 0891/1018] ENH: ``ufunc.__signature__`` (#30211) * ENH: ``ufunc.__signature__`` * DOC: document ``ufunc.__signature__`` * MAINT: apply review suggestions Co-authored-by: Marten van Kerkwijk * TST: more and better testing of `ufunc.__signature__` * STY: consistent `ufunc_getset[]` ordering Co-authored-by: Marten van Kerkwijk * DOC: fix table alignment * ENH: Cache the `ufunc.__signature__` signature object on access * TST: fix test failure caused by the new `ufunc.__signature__` * BUG: avoid adding `ufunc.__signature__` as class atrribute * TYP: appease stubtest * DOC: mention ufuncs in the `inpsect.signature` release notes * MAINT: interned `__signature__` comparison in `ufunc` attribute access Co-authored-by: Marten van Kerkwijk --------- Co-authored-by: Marten van Kerkwijk --- .../upcoming_changes/30208.improvement.rst | 4 +- doc/source/reference/ufuncs.rst | 19 +++--- numpy/__init__.pyi | 3 + numpy/_core/_internal.py | 50 ++++++++++++++ numpy/_core/src/common/npy_import.h | 1 + numpy/_core/src/multiarray/npy_static_data.c | 1 + numpy/_core/src/multiarray/npy_static_data.h | 1 + numpy/_core/src/umath/ufunc_object.c | 63 +++++++++++++++-- numpy/_core/tests/test_ufunc.py | 67 +++++++++++++++++++ numpy/_core/tests/test_umath.py | 2 + 10 files changed, 195 insertions(+), 16 deletions(-) diff --git a/doc/release/upcoming_changes/30208.improvement.rst b/doc/release/upcoming_changes/30208.improvement.rst index 065e91eca33d..ad9faaedfb6b 100644 --- a/doc/release/upcoming_changes/30208.improvement.rst +++ b/doc/release/upcoming_changes/30208.improvement.rst @@ -8,5 +8,5 @@ the NumPy API. Over three hundred classes and functions have been updated in total, including, but not limited to, core classes such as `ndarray`, `generic`, `dtype`, `ufunc`, `broadcast`, `nditer`, etc., most methods of `ndarray` and scalar types, array constructor functions (`array`, `empty`, -`arange`, `fromiter`, etc.), and many other commonly used functions, including `dot`, `concat`, -`where`, `bincount`, `can_cast`, and numerous others. +`arange`, `fromiter`, etc.), all :ref:`ufuncs`, and many other commonly used functions, including +`dot`, `concat`, `where`, `bincount`, `can_cast`, and numerous others. diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 03d86cd057d2..0c675718818b 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -52,7 +52,7 @@ tuple holding a single array) is also valid. If 'out' is None (the default), a uninitialized output array is created, which will be filled in the ufunc. At the end, this array is returned unless it is zero-dimensional, in which case it is converted to a scalar; -this conversion can be avoided by passing in ``out=...``. This can also be +this conversion can be avoided by passing in ``out=...``. This can also be spelled `out=Ellipsis` if you think that is clearer. Note that the output is filled only in the places that the broadcast @@ -183,14 +183,17 @@ possess. None of the attributes can be set. pair: ufunc; attributes -============ ================================================================= -**__doc__** A docstring for each ufunc. The first part of the docstring is - dynamically generated from the number of outputs, the name, and - the number of inputs. The second part of the docstring is - provided at creation time and stored with the ufunc. +================= ================================================================= +**__doc__** A docstring for each ufunc. The first part of the docstring is + dynamically generated from the number of outputs, the name, and + the number of inputs. The second part of the docstring is + provided at creation time and stored with the ufunc. -**__name__** The name of the ufunc. -============ ================================================================= +**__name__** The name of the ufunc. + +**__signature__** The call signature of the ufunc, as an :class:`inspect.Signature` + object. +================= ================================================================= .. autosummary:: :toctree: generated/ diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b973e43093ca..7d91e3d384de 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5,6 +5,7 @@ import mmap import ctypes as ct import array as _array import datetime as dt +import inspect from abc import abstractmethod from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias from decimal import Decimal @@ -5731,6 +5732,8 @@ class str_(character[str], str): # type: ignore[misc] # See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs @final class ufunc: + __signature__: Final[inspect.Signature] + @property def __name__(self) -> LiteralString: ... @property diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index 490b3407997a..7c64daf30dbd 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -855,6 +855,8 @@ def _ufunc_doc_signature_formatter(ufunc): Builds a signature string which resembles PEP 457 This is used to construct the first line of the docstring + + Keep in sync with `_ufunc_inspect_signature_builder`. """ # input arguments are simple @@ -893,6 +895,54 @@ def _ufunc_doc_signature_formatter(ufunc): return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})' +def _ufunc_inspect_signature_builder(ufunc): + """ + Builds a ``__signature__`` string. + + Should be kept in sync with `_ufunc_doc_signature_formatter`. + """ + + from inspect import Parameter, Signature + + params = [] + + # positional-only input parameters + if ufunc.nin == 1: + params.append(Parameter("x", Parameter.POSITIONAL_ONLY)) + else: + params.extend( + Parameter(f"x{i}", Parameter.POSITIONAL_ONLY) + for i in range(1, ufunc.nin + 1) + ) + + # for the sake of simplicity, we only consider a single output parameter + if ufunc.nout == 1: + out_default = None + else: + out_default = (None,) * ufunc.nout + params.append( + Parameter("out", Parameter.POSITIONAL_OR_KEYWORD, default=out_default), + ) + + if ufunc.signature is None: + params.append(Parameter("where", Parameter.KEYWORD_ONLY, default=True)) + else: + # NOTE: not all gufuncs support the `axis` parameters + params.append(Parameter("axes", Parameter.KEYWORD_ONLY, default=_NoValue)) + params.append(Parameter("axis", Parameter.KEYWORD_ONLY, default=_NoValue)) + params.append(Parameter("keepdims", Parameter.KEYWORD_ONLY, default=False)) + + params.extend(( + Parameter("casting", Parameter.KEYWORD_ONLY, default='same_kind'), + Parameter("order", Parameter.KEYWORD_ONLY, default='K'), + Parameter("dtype", Parameter.KEYWORD_ONLY, default=None), + Parameter("subok", Parameter.KEYWORD_ONLY, default=True), + Parameter("signature", Parameter.KEYWORD_ONLY, default=None), + )) + + return Signature(params) + + def npy_ctypes_check(cls): # determine if a class comes from ctypes, in order to work around # a bug in the buffer protocol for those objects, bpo-10746 diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index d7fb8d20857d..fec1b22f3975 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -45,6 +45,7 @@ typedef struct npy_runtime_imports_struct { PyObject *_std; PyObject *_sum; PyObject *_ufunc_doc_signature_formatter; + PyObject *_ufunc_inspect_signature_builder; PyObject *_usefields; PyObject *_var; PyObject *_view_is_safe; diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 9fd321a375a2..c7f4c51c6675 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -64,6 +64,7 @@ intern_strings(void) INTERN_STRING(pyvals_name, "UFUNC_PYVALS_NAME"); INTERN_STRING(legacy, "legacy"); INTERN_STRING(__doc__, "__doc__"); + INTERN_STRING(__signature__, "__signature__"); INTERN_STRING(copy, "copy"); INTERN_STRING(dl_device, "dl_device"); INTERN_STRING(max_version, "max_version"); diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index a6901c858374..365673ffb0b4 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -43,6 +43,7 @@ typedef struct npy_interned_str_struct { PyObject *pyvals_name; PyObject *legacy; PyObject *__doc__; + PyObject *__signature__; PyObject *copy; PyObject *dl_device; PyObject *max_version; diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 6bf6832dfb9b..9c0c0a4f8eab 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -6575,8 +6575,8 @@ static struct PyMethodDef ufunc_methods[] = { }; -/****************************************************************************** - *** UFUNC GETSET *** +/***************************************************************************** + *** UFUNC GETSET *** *****************************************************************************/ @@ -6639,6 +6639,57 @@ ufunc_set_doc(PyUFuncObject *ufunc, PyObject *doc, void *NPY_UNUSED(ignored)) } } +static PyObject * +ufunc_get_inspect_signature(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) +{ + PyObject *signature; + + // If there is a __signature__ in the instance __dict__, use it. + int result = PyDict_GetItemRef(ufunc->dict, npy_interned_str.__signature__, + &signature); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return signature; + } + + if (npy_cache_import_runtime( + "numpy._core._internal", "_ufunc_inspect_signature_builder", + &npy_runtime_imports._ufunc_inspect_signature_builder) == -1) { + return NULL; + } + + signature = PyObject_CallFunctionObjArgs( + npy_runtime_imports._ufunc_inspect_signature_builder, + (PyObject *)ufunc, NULL); + if (signature == NULL) { + return NULL; + } + + // Cache the result in the instance dict for next time + if (PyDict_SetItem(ufunc->dict, npy_interned_str.__signature__, + signature) < 0) { + Py_DECREF(signature); + return NULL; + } + + return signature; +} + +static PyObject * +ufunc_getattro(PyObject *obj, PyObject *name) +{ + // __signature__ special-casing to prevent class attribute access + if (PyUnicode_Check(name) && + PyUnicode_Compare(name, npy_interned_str.__signature__) == 0) { + return ufunc_get_inspect_signature((PyUFuncObject *)obj, NULL); + } + + // For all other attributes, use default behavior + return PyObject_GenericGetAttr(obj, name); +} + static PyObject * ufunc_get_nin(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) { @@ -6725,6 +6776,9 @@ static PyGetSetDef ufunc_getset[] = { {"__doc__", (getter)ufunc_get_doc, (setter)ufunc_set_doc, NULL, NULL}, + {"__name__", + (getter)ufunc_get_name, + NULL, NULL, NULL}, {"nin", (getter)ufunc_get_nin, NULL, NULL, NULL}, @@ -6740,9 +6794,6 @@ static PyGetSetDef ufunc_getset[] = { {"types", (getter)ufunc_get_types, NULL, NULL, NULL}, - {"__name__", - (getter)ufunc_get_name, - NULL, NULL, NULL}, {"identity", (getter)ufunc_get_identity, NULL, NULL, NULL}, @@ -6783,7 +6834,7 @@ NPY_NO_EXPORT PyTypeObject PyUFunc_Type = { .tp_traverse = (traverseproc)ufunc_traverse, .tp_methods = ufunc_methods, .tp_getset = ufunc_getset, - .tp_getattro = PyObject_GenericGetAttr, + .tp_getattro = ufunc_getattro, .tp_setattro = PyObject_GenericSetAttr, // TODO when Python 3.12 is the minimum supported version, // use Py_TPFLAGS_MANAGED_DICT diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index a29fae0539e4..09d01eab8186 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -3336,3 +3336,70 @@ def test_long_arrays(self): t[28][414] = 1 tc = np.cos(t) assert_equal(tc[0][0], tc[28][414]) + + +class TestUFuncInspectSignature: + PARAMS_COMMON = { + "casting": "same_kind", + "order": "K", + "dtype": None, + "subok": True, + "signature": None, + } + + PARAMS_UFUNC = { + "where": True, + } | PARAMS_COMMON + + PARAMS_GUFUNC = { + "axes": np._NoValue, + "axis": np._NoValue, + "keepdims": False, + } | PARAMS_COMMON + + @pytest.mark.parametrize("ufunc", [np.log, np.gcd, np.frexp, np.divmod, np.matvec]) + def test_dunder_signature_attr(self, ufunc: np.ufunc): + assert hasattr(ufunc, "__signature__") + assert isinstance(ufunc.__signature__, inspect.Signature) + assert inspect.signature(ufunc) == ufunc.__signature__ + + @pytest.mark.parametrize("ufunc", [np.exp, np.mod, np.frexp, np.divmod, np.vecmat]) + def test_params_common_positional(self, ufunc: np.ufunc): + sig = inspect.signature(ufunc) + + # check positional-only parameters + posonly_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.POSITIONAL_ONLY} + assert len(posonly_params) == ufunc.nin + assert all(default is inspect.Parameter.empty + for default in posonly_params.values()) + + # check 'out' parameter + out_param = sig.parameters.get("out") + assert out_param is not None + assert out_param.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + @pytest.mark.parametrize("ufunc", [np.sin, np.add, np.frexp, np.divmod]) + def test_params_common_ufunc(self, ufunc: np.ufunc): + assert ufunc.signature is None # sanity check + + sig = inspect.signature(ufunc) + + # check keyword-only parameters + keyword_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.KEYWORD_ONLY} + assert keyword_params == self.PARAMS_UFUNC + + @pytest.mark.parametrize("gufunc", [np.matmul, np.matvec, np.vecdot, np.vecmat]) + def test_params_common_gufunc(self, gufunc: np.ufunc): + assert gufunc.signature is not None # sanity check + + sig = inspect.signature(gufunc) + + # check keyword-only parameters + keyword_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.KEYWORD_ONLY} + assert keyword_params == self.PARAMS_GUFUNC diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index f99441180182..c10e4d1cf387 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1,4 +1,5 @@ import fnmatch +import inspect import itertools import operator import platform @@ -4053,6 +4054,7 @@ def test_ufunc_docstring(self): expected_dict = ( {} if IS_PYPY else {"__module__": "numpy", "__qualname__": "add"} ) + expected_dict["__signature__"] = inspect.signature(np.add) np.add.__doc__ = new_doc assert np.add.__doc__ == new_doc From 0ecf7805870507d6c0bbfa97f529eb82741284d0 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Wed, 19 Nov 2025 17:40:59 +0530 Subject: [PATCH 0892/1018] BUG: fix data race in ufunc scalar fast path This PR adds read lock while accessing the dispatch cache in try_trivial_scalar_call to fix the data race. Fixes gh-30251 --- numpy/_core/src/common/npy_hashtable.cpp | 17 +++++++++++++++++ numpy/_core/src/common/npy_hashtable.h | 5 +++++ numpy/_core/src/umath/dispatching.cpp | 6 +----- numpy/_core/src/umath/ufunc_object.c | 6 ++++++ 4 files changed, 29 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/common/npy_hashtable.cpp b/numpy/_core/src/common/npy_hashtable.cpp index ffd67d403853..27e014ca00e0 100644 --- a/numpy/_core/src/common/npy_hashtable.cpp +++ b/numpy/_core/src/common/npy_hashtable.cpp @@ -240,3 +240,20 @@ PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) PyObject *res = find_item(tb, key)[0]; return res; } + +#ifdef Py_GIL_DISABLED + +NPY_NO_EXPORT PyObject * +PyArrayIdentityHash_GetItemWithLock(PyArrayIdentityHash *tb, PyObject *const *key) +{ + PyObject *res; + std::shared_mutex *mutex = (std::shared_mutex *)tb->mutex; + NPY_BEGIN_ALLOW_THREADS + mutex->lock_shared(); + NPY_END_ALLOW_THREADS + res = find_item(tb, key)[0]; + mutex->unlock_shared(); + return res; +} + +#endif // Py_GIL_DISABLED diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index cd061ba6fa11..aed805e1791d 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -27,6 +27,11 @@ NPY_NO_EXPORT int PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, PyObject *const *key, PyObject *value, int replace); +#ifdef Py_GIL_DISABLED +NPY_NO_EXPORT PyObject * +PyArrayIdentityHash_GetItemWithLock(PyArrayIdentityHash *tb, PyObject *const *key); +#endif // Py_GIL_DISABLED + NPY_NO_EXPORT PyObject * PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key); diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index 3ce6624bbf4a..0857f7c8a768 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -982,13 +982,9 @@ promote_and_get_info_and_ufuncimpl_with_locking( npy_bool legacy_promotion_is_possible) { std::shared_mutex *mutex = ((std::shared_mutex *)((PyArrayIdentityHash *)ufunc->_dispatch_cache)->mutex); - NPY_BEGIN_ALLOW_THREADS - mutex->lock_shared(); - NPY_END_ALLOW_THREADS - PyObject *info = PyArrayIdentityHash_GetItem( + PyObject *info = PyArrayIdentityHash_GetItemWithLock( (PyArrayIdentityHash *)ufunc->_dispatch_cache, (PyObject **)op_dtypes); - mutex->unlock_shared(); if (info != NULL && PyObject_TypeCheck( PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) { diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 9c0c0a4f8eab..eae5b666197a 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4337,9 +4337,15 @@ try_trivial_scalar_call( // Try getting info from the (private) cache. Fall back if not found, // so that the the dtype gets registered and things will work next time. PyArray_DTypeMeta *op_dtypes[2] = {NPY_DTYPE(dt), NULL}; +#ifdef Py_GIL_DISABLED + PyObject *info = PyArrayIdentityHash_GetItemWithLock( // borrowed reference. + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); +#else PyObject *info = PyArrayIdentityHash_GetItem( // borrowed reference. (PyArrayIdentityHash *)ufunc->_dispatch_cache, (PyObject **)op_dtypes); +#endif if (info == NULL) { goto bail; } From a684abe04c930457410db79564ae5dc58e7bb0d4 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Wed, 19 Nov 2025 22:57:44 +0530 Subject: [PATCH 0893/1018] TST: mark tests which call gc.collect() as thread unsafe (#30259) --- numpy/_core/tests/test_multiarray.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 2cba4eb3ec75..5410f7677b9f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4652,6 +4652,7 @@ def test_non_contiguous_array(self): assert_equal(len(buffers), 0) assert_equal(non_contiguous_array, depickled_non_contiguous_array) + @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_roundtrip(self): for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): carray = np.array([[2, 9], [7, 0], [3, 8]]) @@ -9845,6 +9846,7 @@ def _make_readonly(x): x.flags.writeable = False return x + @pytest.mark.thread_unsafe(reason="calls gc.collect()") @pytest.mark.parametrize('arr', [ np.array([1, 2, 3]), np.array([['one', 'two'], ['three', 'four']]), @@ -9893,6 +9895,7 @@ def test_ctypes_data_as_holds_reference(self, arr): break_cycles() assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference") + @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_ctypes_as_parameter_holds_reference(self): arr = np.array([None]).copy() @@ -10216,6 +10219,7 @@ def __array_finalize__(self, obj): with pytest.raises(RuntimeError, match="boohoo!"): np.arange(10).view(BadAttributeArray) + @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_lifetime_on_error(self): # gh-11237 class RaisesInFinalize(np.ndarray): From 598dbf69a66efafa310b6edc3abe634fba970c1f Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 20 Nov 2025 03:44:51 -0700 Subject: [PATCH 0894/1018] TST: scalar fast path multithreaded test (#30263) * TST: Add a multithreaded test for the scalar fast path * MAINT: add comments explaining the locking in the dispatch cache * TST: make the test more exhaustive --- numpy/_core/src/common/npy_hashtable.h | 2 +- numpy/_core/src/umath/dispatching.cpp | 7 +++++++ numpy/_core/src/umath/ufunc_object.c | 3 +++ numpy/_core/tests/test_multithreading.py | 9 ++++----- 4 files changed, 15 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index aed805e1791d..02acc12d3191 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -18,7 +18,7 @@ typedef struct { npy_intp size; /* current size */ npy_intp nelem; /* number of elements */ #ifdef Py_GIL_DISABLED - void *mutex; + void *mutex; /* std::shared_mutex, prevents races to fill the cache */ #endif } PyArrayIdentityHash; diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index 0857f7c8a768..db5698d8a819 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -1093,6 +1093,13 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } } + /* + * We hold the GIL here, so on the GIL-enabled build the GIL prevents + * races to fill the promotion cache. + * + * On the free-threaded build we need to set up our own locking to prevent + * races to fill the promotion cache. + */ #ifdef Py_GIL_DISABLED PyObject *info = promote_and_get_info_and_ufuncimpl_with_locking(ufunc, ops, signature, op_dtypes, legacy_promotion_is_possible); diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index eae5b666197a..c4e941f65bfe 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4338,6 +4338,9 @@ try_trivial_scalar_call( // so that the the dtype gets registered and things will work next time. PyArray_DTypeMeta *op_dtypes[2] = {NPY_DTYPE(dt), NULL}; #ifdef Py_GIL_DISABLED + // Other threads may be in the process of filling the dispatch cache, + // so we need to acquire the free-threading-specific dispatch cache mutex + // before reading the cache PyObject *info = PyArrayIdentityHash_GetItemWithLock( // borrowed reference. (PyArrayIdentityHash *)ufunc->_dispatch_cache, (PyObject **)op_dtypes); diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 149a216fa1cd..b78f476d33e2 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -29,11 +29,10 @@ def test_parallel_ufunc_execution(): # if the loop data cache or dispatch cache are not thread-safe # computing ufuncs simultaneously in multiple threads leads # to a data race that causes crashes or spurious exceptions - def func(): - arr = np.random.random((25,)) - np.isnan(arr) - - run_threaded(func, 500) + for dtype in [np.float32, np.float64, np.int32]: + for op in [np.random.random((25,)).astype(dtype), dtype(25)]: + for ufunc in [np.isnan, np.sin]: + run_threaded(lambda: ufunc(op), 500) # see gh-26690 NUM_THREADS = 50 From 229c0170a935fe37964380dbe9b6d82d0708593d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Nov 2025 17:06:19 +0000 Subject: [PATCH 0895/1018] MAINT: Bump actions/checkout from 5.0.1 to 6.0.0 Bumps [actions/checkout](https://github.com/actions/checkout) from 5.0.1 to 6.0.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/93cb6efe18208431cddfb8368fd83d5badbf9bfd...1af3b93b6815bc44a9784bd300feb67ff0d1eeb3) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 6.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 2 +- .github/workflows/compiler_sanitizers.yml | 4 ++-- .github/workflows/cygwin.yml | 2 +- .github/workflows/dependency-review.yml | 2 +- .github/workflows/emscripten.yml | 2 +- .github/workflows/linux-ppc64le.yml | 2 +- .github/workflows/linux.yml | 24 +++++++++++------------ .github/workflows/linux_blas.yml | 18 ++++++++--------- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 4 ++-- .github/workflows/mypy.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/stubtest.yml | 2 +- .github/workflows/wheels.yml | 2 +- .github/workflows/windows.yml | 6 +++--- 17 files changed, 46 insertions(+), 46 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 318ad87039f2..ae31d4f33040 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,7 +41,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: persist-credentials: false diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 54e136839053..bb185282b083 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -30,7 +30,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: macos-latest steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -86,7 +86,7 @@ jobs: options: --shm-size=2g # increase memory for large matrix ops steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 279020cbe365..03dc0b41f987 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -22,7 +22,7 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index c27e9ef4e88c..28fe31642faa 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: persist-credentials: false - name: 'Dependency Review' diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 906dec766b24..2b7eb24f9812 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -29,7 +29,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout NumPy - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index 93426a95e8c0..3817f3ebc0d8 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-24.04-ppc64le-p10 name: "Native PPC64LE" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 4e3e01235e65..1d8cb119705b 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -37,7 +37,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-depth: 0 @@ -68,7 +68,7 @@ jobs: matrix: version: ["3.11", "3.12", "3.13", "3.14", "3.14t"] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -83,7 +83,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -102,7 +102,7 @@ jobs: runs-on: ubuntu-24.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -132,7 +132,7 @@ jobs: needs: [smoke_test] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -172,7 +172,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-24.04-arm steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -217,7 +217,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -256,7 +256,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -296,13 +296,13 @@ jobs: if: github.event_name != 'push' steps: - name: Checkout NumPy - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Checkout array-api-tests - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: repository: data-apis/array-api-tests ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 @@ -334,7 +334,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -342,7 +342,7 @@ jobs: - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: repository: numpy/numpy-release path: numpy-release diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index b141c33165cf..8dba9281443b 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -69,7 +69,7 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -127,7 +127,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel openblas-devel -y - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -162,7 +162,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel flexiblas-devel -y - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -192,7 +192,7 @@ jobs: runs-on: ubuntu-latest name: "OpenBLAS with CMake" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -220,7 +220,7 @@ jobs: runs-on: ubuntu-latest name: "Debian libblas/liblapack" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -256,7 +256,7 @@ jobs: # If it is needed in the future, use install name `pkgconf-pkg-config` zypper install -y git gcc-c++ python3-pip python3-devel blas cblas lapack - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -283,7 +283,7 @@ jobs: runs-on: ubuntu-latest name: "MKL (LP64, ILP64, SDL)" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -347,7 +347,7 @@ jobs: runs-on: ubuntu-latest name: "BLIS" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -384,7 +384,7 @@ jobs: runs-on: ubuntu-latest name: "ATLAS" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index b27a665c0e2b..e4688e50e5bc 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -95,7 +95,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -202,7 +202,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 74cc00f52e7d..6078e2f15378 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -58,7 +58,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -76,7 +76,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -123,7 +123,7 @@ jobs: args: "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none" name: "ARM64 SIMD - ${{ matrix.config.name }}" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -170,7 +170,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -185,7 +185,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -235,7 +235,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 8dc888aa94c4..76d87a5e5022 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -32,7 +32,7 @@ jobs: python-version: ["3.12"] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -121,7 +121,7 @@ jobs: version: ["3.11", "3.14t"] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 2b41011315da..51c03285c990 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -56,7 +56,7 @@ jobs: - [ubuntu-latest, '3.12'] - [windows-latest, '3.11'] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 74b3fac96b6d..dcdad4ff36ba 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -28,7 +28,7 @@ jobs: shard-index: [0] # e.g. change this to [0, 1, 2] and --num-shards below to 3 fail-fast: false steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: path: numpy_to_test fetch-depth: 0 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index a43e793e1e98..5b5a78c59029 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,7 +25,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v3.1.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v3.1.0 with: persist-credentials: false diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index d9f72a61b199..0cfbd6197de0 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -37,7 +37,7 @@ jobs: py: ["3.11", "3.14"] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 044020adeff8..24061ae7b014 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -48,7 +48,7 @@ jobs: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh steps: - name: Checkout numpy - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: true persist-credentials: false diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 8136d80c6a88..20584f60219f 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -25,7 +25,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -83,7 +83,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -139,7 +139,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true From 1aa31099403cbf5ba17dab857666986d57749834 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 20 Nov 2025 21:35:03 +0100 Subject: [PATCH 0896/1018] ENH: Use descriptor rather than custom `tp_getattro` (#30270) * ENH: Use descriptor rather than custom `tp_getattro` To me a custom `tp_getattro` feels a bit wrong, so instead use a descriptor protocol to solve this. That is unfortunately slightly annoying/complex. Static types do however have a `tp_dict` (a fact that I had missed...). So while normal properties don't behave well enough for signatures putting a `__signature__` into the `__dict__` does work perfectly fine, although admittedly it is bit awkward to set up. This should ensure that fast-paths that rely on the default getattr not being replaced should remain active. * Add comment about __signature__ definition based on review * Skip test on PyPy and rename to _signature_descriptor --- numpy/_core/src/multiarray/multiarraymodule.c | 11 ++++ numpy/_core/src/umath/ufunc_object.c | 54 +------------------ numpy/_core/tests/test_umath.py | 1 + numpy/_globals.py | 25 +++++++++ 4 files changed, 39 insertions(+), 52 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 73ef0760d979..8bede253a22f 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4966,7 +4966,18 @@ _multiarray_umath_exec(PyObject *m) { return -1; } + /* Set __signature__ to None on the type (the instance has a property) */ + s = npy_import("numpy._globals", "_signature_descriptor"); + if (s == NULL) { + return -1; + } + PyUFunc_Type.tp_dict = Py_BuildValue( + "{ON}", npy_interned_str.__signature__, s); + if (PyUFunc_Type.tp_dict == NULL) { + return -1; + } if (PyType_Ready(&PyUFunc_Type) < 0) { + Py_CLEAR(PyUFunc_Type.tp_dict); return -1; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index c4e941f65bfe..63ac438eabc4 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -6648,57 +6648,6 @@ ufunc_set_doc(PyUFuncObject *ufunc, PyObject *doc, void *NPY_UNUSED(ignored)) } } -static PyObject * -ufunc_get_inspect_signature(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) -{ - PyObject *signature; - - // If there is a __signature__ in the instance __dict__, use it. - int result = PyDict_GetItemRef(ufunc->dict, npy_interned_str.__signature__, - &signature); - if (result == -1) { - return NULL; - } - else if (result == 1) { - return signature; - } - - if (npy_cache_import_runtime( - "numpy._core._internal", "_ufunc_inspect_signature_builder", - &npy_runtime_imports._ufunc_inspect_signature_builder) == -1) { - return NULL; - } - - signature = PyObject_CallFunctionObjArgs( - npy_runtime_imports._ufunc_inspect_signature_builder, - (PyObject *)ufunc, NULL); - if (signature == NULL) { - return NULL; - } - - // Cache the result in the instance dict for next time - if (PyDict_SetItem(ufunc->dict, npy_interned_str.__signature__, - signature) < 0) { - Py_DECREF(signature); - return NULL; - } - - return signature; -} - -static PyObject * -ufunc_getattro(PyObject *obj, PyObject *name) -{ - // __signature__ special-casing to prevent class attribute access - if (PyUnicode_Check(name) && - PyUnicode_Compare(name, npy_interned_str.__signature__) == 0) { - return ufunc_get_inspect_signature((PyUFuncObject *)obj, NULL); - } - - // For all other attributes, use default behavior - return PyObject_GenericGetAttr(obj, name); -} - static PyObject * ufunc_get_nin(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) { @@ -6809,6 +6758,7 @@ static PyGetSetDef ufunc_getset[] = { {"signature", (getter)ufunc_get_signature, NULL, NULL, NULL}, + // __signature__ stored in `__dict__`, see `_globals._SignatureDescriptor` {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ }; @@ -6843,7 +6793,7 @@ NPY_NO_EXPORT PyTypeObject PyUFunc_Type = { .tp_traverse = (traverseproc)ufunc_traverse, .tp_methods = ufunc_methods, .tp_getset = ufunc_getset, - .tp_getattro = ufunc_getattro, + .tp_getattro = PyObject_GenericGetAttr, .tp_setattro = PyObject_GenericSetAttr, // TODO when Python 3.12 is the minimum supported version, // use Py_TPFLAGS_MANAGED_DICT diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index c10e4d1cf387..40b815f88984 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4048,6 +4048,7 @@ def test_array_ufunc_direct_call(self): assert_array_equal(res, a + a) @pytest.mark.thread_unsafe(reason="modifies global module") + @pytest.mark.skipif(IS_PYPY, reason="__signature__ descriptor dance fails") def test_ufunc_docstring(self): original_doc = np.add.__doc__ new_doc = "new docs" diff --git a/numpy/_globals.py b/numpy/_globals.py index 5f838ba91544..ada8d5c41af0 100644 --- a/numpy/_globals.py +++ b/numpy/_globals.py @@ -94,3 +94,28 @@ def __bool__(self): return False raise ValueError(f"{self} is neither True nor False.") + + +class _SignatureDescriptor: + # A descriptor to store on the ufunc __dict__ that avoids definig a + # signature for the ufunc class/type but allows the instance to have one. + # This is needed because inspect.signature() chokes on normal properties + # (as of 3.14 at least). + # We could also set __signature__ on the instance but this allows deferred + # computation of the signature. + def __get__(self, obj, objtype=None): + # Delay import, not a critical path but need to avoid circular import. + from numpy._core._internal import _ufunc_inspect_signature_builder + + if obj is None: + # could also return None, which is accepted as "not set" by + # inspect.signature(). + raise AttributeError( + "type object 'numpy.ufunc' has no attribute '__signature__'") + + # Store on the instance, after this the descriptor won't be used. + obj.__signature__ = _ufunc_inspect_signature_builder(obj) + return obj.__signature__ + + +_signature_descriptor = _SignatureDescriptor() From 396db9b09e43425c6de64db415c668a848c7dea1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Nov 2025 10:56:27 -0700 Subject: [PATCH 0897/1018] MAINT: Bump astral-sh/setup-uv from 7.1.3 to 7.1.4 (#30276) Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.1.3 to 7.1.4. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/5a7eac68fb9809dea845d802897dc5c723910fa3...1e862dfacbd1d6d858c55d9b792c756523627244) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.1.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mypy.yml | 2 +- .github/workflows/stubtest.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 51c03285c990..e3c6e7beba13 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -61,7 +61,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@5a7eac68fb9809dea845d802897dc5c723910fa3 # v7.1.3 + - uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 0cfbd6197de0..731d9091ec69 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@5a7eac68fb9809dea845d802897dc5c723910fa3 # v7.1.3 + - uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 with: python-version: ${{ matrix.py }} activate-environment: true From dd80e20bc4b7f78ba92177c87d11c236bb7d194d Mon Sep 17 00:00:00 2001 From: Paresh Joshi Date: Sat, 22 Nov 2025 00:05:24 +0530 Subject: [PATCH 0898/1018] Fix race condition in test_printoptions_thread_safety (#30271) The test_printoptions_thread_safety function spawned threads but exited immediately without joining them. This created a race condition where: 1. The test could return 'Passed' before the assertions inside the threads actually ran. 2. "Zombie" threads were left running in the background, potentially interfering with subsequent tests. Added .join() calls for both threads to ensure proper execution and teardown. --- numpy/_core/tests/test_multithreading.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index b78f476d33e2..9b10974839a4 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -122,6 +122,8 @@ def legacy_125(): task1.start() task2.start() + task1.join() + task2.join() def test_parallel_reduction(): From 0957720b27c9f5f6c9f1d43f1fa0d7dd43bd19db Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sat, 22 Nov 2025 00:33:27 -0700 Subject: [PATCH 0899/1018] BUG: fix np.resize refchk on python 3.14 (#30278) Use new API on Python 3.14 for unique reference check (as it changed and for free-threading). On older Python's there is an additional reference on the stack for the method call, though. Fixes gh-30265 --- numpy/_core/src/multiarray/shape.c | 27 +++++++++++++-------------- numpy/_core/tests/test_multiarray.py | 7 +++++++ 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index c98dba9abcb2..fce61ef36e63 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -32,7 +32,6 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) npy_intp oldnbytes, newnbytes; npy_intp oldsize, newsize; int new_nd=newshape->len, k, elsize; - int refcnt; npy_intp* new_dimensions=newshape->ptr; npy_intp new_strides[NPY_MAXDIMS]; npy_intp *dimptr; @@ -93,21 +92,21 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) "Use the np.resize function or refcheck=False"); return -1; #else - refcnt = Py_REFCNT(self); +#if PY_VERSION_HEX >= 0x030E00B0 + if (!PyUnstable_Object_IsUniquelyReferenced((PyObject *)self)) { +#else + if (Py_REFCNT(self) > 2) { +#endif + PyErr_SetString( + PyExc_ValueError, + "cannot resize an array that " + "references or is referenced\n" + "by another array in this way.\n" + "Use the np.resize function or refcheck=False"); + return -1; + } #endif /* PYPY_VERSION */ } - else { - refcnt = 1; - } - if (refcnt > 2) { - PyErr_SetString(PyExc_ValueError, - "cannot resize an array that " - "references or is referenced\n" - "by another array in this way.\n" - "Use the np.resize function or refcheck=False"); - return -1; - } - /* Reallocate space if needed - allocating 0 is forbidden */ PyObject *handler = PyArray_HANDLER(self); if (handler == NULL) { diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 5410f7677b9f..218fcc79592e 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -6287,6 +6287,13 @@ def test_check_reference(self): y = x assert_raises(ValueError, x.resize, (5, 1)) + def test_check_reference_2(self): + # see gh-30265 + x = np.zeros((2, 2)) + y = x + with pytest.raises(ValueError): + x.resize((5, 5)) + @_no_tracing def test_int_shape(self): x = np.eye(3) From aa6eb42e5f8b79f62d057f981770a29421233c06 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sat, 22 Nov 2025 00:40:01 -0700 Subject: [PATCH 0900/1018] MAINT: refactor unary temporary elision check (#30279) --- numpy/_core/src/multiarray/temp_elide.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/temp_elide.c b/numpy/_core/src/multiarray/temp_elide.c index dbd469b05654..ea6cac08f78b 100644 --- a/numpy/_core/src/multiarray/temp_elide.c +++ b/numpy/_core/src/multiarray/temp_elide.c @@ -387,12 +387,12 @@ NPY_NO_EXPORT int can_elide_temp_unary(PyArrayObject * m1) { int cannot; - if (Py_REFCNT(m1) != 1 || !PyArray_CheckExact(m1) || + if (!check_unique_temporary((PyObject *)m1) || + !PyArray_CheckExact(m1) || !PyArray_ISNUMBER(m1) || !PyArray_CHKFLAGS(m1, NPY_ARRAY_OWNDATA) || !PyArray_ISWRITEABLE(m1) || - PyArray_NBYTES(m1) < NPY_MIN_ELIDE_BYTES || - !check_unique_temporary((PyObject *)m1)) { + PyArray_NBYTES(m1) < NPY_MIN_ELIDE_BYTES) { return 0; } if (check_callers(&cannot)) { From debb9a3f4bfb7ce8db68b687d71e5287c4cee069 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 24 Nov 2025 17:31:35 +0100 Subject: [PATCH 0901/1018] TYP: ``lib._function_base_impl``: many typing improvements (#30246) * TYP: ``lib._function_base_impl``: many typing improvements * TYP: use the any trick when calling ``gradiant`` with arrays of unknown shape * TYP: fix regression in ``append`` caught by mypy_primer * TYP: Apply Copilot's review suggestions (yes, they actually made sense) --- numpy/lib/_function_base_impl.pyi | 2225 +++++++++++++---- .../tests/data/fail/lib_function_base.pyi | 36 +- .../tests/data/reveal/lib_function_base.pyi | 428 +++- 3 files changed, 2030 insertions(+), 659 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index bad00641072c..48d3743983b3 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,37 +1,22 @@ -# ruff: noqa: ANN401 -from _typeshed import Incomplete +from _typeshed import ConvertibleToInt, Incomplete from collections.abc import Callable, Iterable, Sequence from typing import ( Any, Concatenate, Literal as L, + Never, ParamSpec, Protocol, SupportsIndex, SupportsInt, TypeAlias, - TypeVar, overload, type_check_only, ) -from typing_extensions import TypeIs +from typing_extensions import TypeIs, TypeVar import numpy as np -from numpy import ( - _OrderKACF, - bool_, - complex128, - complexfloating, - datetime64, - float64, - floating, - generic, - integer, - intp, - object_, - timedelta64, - vectorize, -) +from numpy import _OrderKACF, vectorize from numpy._core.multiarray import bincount from numpy._globals import _NoValueType from numpy._typing import ( @@ -41,19 +26,18 @@ from numpy._typing import ( _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, - _ArrayLikeDT64_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, _ArrayLikeNumber_co, _ArrayLikeObject_co, - _ArrayLikeTD64_co, _ComplexLike_co, _DTypeLike, _FloatLike_co, - _NestedSequence, + _NestedSequence as _SeqND, _NumberLike_co, _ScalarLike_co, _ShapeLike, + _SupportsArray, ) __all__ = [ @@ -98,15 +82,94 @@ __all__ = [ _T = TypeVar("_T") _T_co = TypeVar("_T_co", covariant=True) -# The `{}ss` suffix refers to the Python 3.12 syntax: `**P` -_Pss = ParamSpec("_Pss") -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT1 = TypeVar("_ScalarT1", bound=generic) -_ScalarT2 = TypeVar("_ScalarT2", bound=generic) +# The `{}ss` suffix refers to the PEP 695 (Python 3.12) `ParamSpec` syntax, `**P`. +_Tss = ParamSpec("_Tss") + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT1 = TypeVar("_ScalarT1", bound=np.generic) +_ScalarT2 = TypeVar("_ScalarT2", bound=np.generic) +_FloatingT = TypeVar("_FloatingT", bound=np.floating) +_InexactT = TypeVar("_InexactT", bound=np.inexact) +_InexactTimeT = TypeVar("_InexactTimeT", bound=np.inexact | np.timedelta64) +_InexactDateTimeT = TypeVar("_InexactDateTimeT", bound=np.inexact | np.timedelta64 | np.datetime64) +_ScalarNumericT = TypeVar("_ScalarNumericT", bound=np.inexact | np.timedelta64 | np.object_) +_AnyDoubleT = TypeVar("_AnyDoubleT", bound=np.float64 | np.longdouble | np.complex128 | np.clongdouble) + _ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +_ArrayFloatingT = TypeVar("_ArrayFloatingT", bound=NDArray[np.floating]) +_ArrayFloatObjT = TypeVar("_ArrayFloatObjT", bound=NDArray[np.floating | np.object_]) +_ArrayComplexT = TypeVar("_ArrayComplexT", bound=NDArray[np.complexfloating]) +_ArrayInexactT = TypeVar("_ArrayInexactT", bound=NDArray[np.inexact]) +_ArrayNumericT = TypeVar("_ArrayNumericT", bound=NDArray[np.inexact | np.timedelta64 | np.object_]) + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] + +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) + +_integer_co: TypeAlias = np.integer | np.bool +_float64_co: TypeAlias = np.float64 | _integer_co +_floating_co: TypeAlias = np.floating | _integer_co + +# non-trivial scalar-types that will become `complex128` in `sort_complex()`, +# i.e. all numeric scalar types except for `[u]int{8,16} | longdouble` +_SortsToComplex128: TypeAlias = ( + np.bool + | np.int32 + | np.uint32 + | np.int64 + | np.uint64 + | np.float16 + | np.float32 + | np.float64 + | np.timedelta64 + | np.object_ +) + +_Array: TypeAlias = np.ndarray[_ShapeT, np.dtype[_ScalarT]] +_Array0D: TypeAlias = np.ndarray[tuple[()], np.dtype[_ScalarT]] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] +_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[_ScalarT]] +_ArrayMax2D: TypeAlias = np.ndarray[tuple[int] | tuple[int, int], np.dtype[_ScalarT]] +# workaround for mypy and pyright not following the typing spec for overloads +_ArrayNoD: TypeAlias = np.ndarray[tuple[Never, Never, Never, Never], np.dtype[_ScalarT]] + +_Seq1D: TypeAlias = Sequence[_T] +_Seq2D: TypeAlias = Sequence[Sequence[_T]] +_Seq3D: TypeAlias = Sequence[Sequence[Sequence[_T]]] +_ListSeqND: TypeAlias = list[_T] | _SeqND[list[_T]] + +_Tuple2: TypeAlias = tuple[_T, _T] +_Tuple3: TypeAlias = tuple[_T, _T, _T] +_Tuple4: TypeAlias = tuple[_T, _T, _T, _T] + +_Mesh1: TypeAlias = tuple[_Array1D[_ScalarT]] +_Mesh2: TypeAlias = tuple[_Array2D[_ScalarT], _Array2D[_ScalarT1]] +_Mesh3: TypeAlias = tuple[_Array3D[_ScalarT], _Array3D[_ScalarT1], _Array3D[_ScalarT2]] + +_IndexLike: TypeAlias = slice | _ArrayLikeInt_co + +_Indexing: TypeAlias = L["ij", "xy"] +_InterpolationMethod = L[ + "inverted_cdf", + "averaged_inverted_cdf", + "closest_observation", + "interpolated_inverted_cdf", + "hazen", + "weibull", + "linear", + "median_unbiased", + "normal_unbiased", + "lower", + "higher", + "midpoint", + "nearest", +] -_2Tuple: TypeAlias = tuple[_T, _T] -_MeshgridIdx: TypeAlias = L["ij", "xy"] +# The resulting value will be used as `y[cond] = func(vals, *args, **kw)`, so in can +# return any (usually 1d) array-like or scalar-like compatible with the input. +_PiecewiseFunction: TypeAlias = Callable[Concatenate[NDArray[_ScalarT], _Tss], ArrayLike] +_PiecewiseFunctions: TypeAlias = _SizedIterable[_PiecewiseFunction[_ScalarT, _Tss] | _ScalarLike_co] @type_check_only class _TrimZerosSequence(Protocol[_T_co]): @@ -116,192 +179,574 @@ class _TrimZerosSequence(Protocol[_T_co]): @overload def __getitem__(self, key: slice, /) -> _T_co: ... +@type_check_only +class _SupportsRMulFloat(Protocol[_T_co]): + def __rmul__(self, other: float, /) -> _T_co: ... + +@type_check_only +class _SizedIterable(Protocol[_T_co]): + def __iter__(self) -> Iterable[_T_co]: ... + def __len__(self) -> int: ... + ### @overload -def rot90( - m: _ArrayLike[_ScalarT], - k: int = 1, - axes: tuple[int, int] = (0, 1), -) -> NDArray[_ScalarT]: ... +def rot90(m: _ArrayT, k: int = 1, axes: tuple[int, int] = (0, 1)) -> _ArrayT: ... @overload -def rot90( - m: ArrayLike, - k: int = 1, - axes: tuple[int, int] = (0, 1), -) -> NDArray[Any]: ... - +def rot90(m: _ArrayLike[_ScalarT], k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[_ScalarT]: ... @overload -def flip(m: _ScalarT, axis: None = None) -> _ScalarT: ... +def rot90(m: ArrayLike, k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[Incomplete]: ... + +# NOTE: Technically `flip` also accept scalars, but that has no effect and complicates +# the overloads significantly, so we ignore that case here. @overload -def flip(m: _ScalarLike_co, axis: None = None) -> Any: ... +def flip(m: _ArrayT, axis: int | tuple[int, ...] | None = None) -> _ArrayT: ... @overload -def flip(m: _ArrayLike[_ScalarT], axis: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def flip(m: _ArrayLike[_ScalarT], axis: int | tuple[int, ...] | None = None) -> NDArray[_ScalarT]: ... @overload -def flip(m: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... +def flip(m: ArrayLike, axis: int | tuple[int, ...] | None = None) -> NDArray[Incomplete]: ... +# def iterable(y: object) -> TypeIs[Iterable[Any]]: ... -@overload +# NOTE: This assumes that if `axis` is given the input is at least 2d, and will +# therefore always return an array. +# NOTE: This assumes that if `keepdims=True` the input is at least 1d, and will +# therefore always return an array. +@overload # inexact array, keepdims=True +def average( + a: _ArrayInexactT, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> _ArrayInexactT: ... +@overload # inexact array, returned=True keepdims=True +def average( + a: _ArrayInexactT, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[_ArrayInexactT]: ... +@overload # inexact array-like, axis=None +def average( + a: _ArrayLike[_InexactT], + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> _InexactT: ... +@overload # inexact array-like, axis= +def average( + a: _ArrayLike[_InexactT], + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[_InexactT]: ... +@overload # inexact array-like, keepdims=True +def average( + a: _ArrayLike[_InexactT], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[_InexactT]: ... +@overload # inexact array-like, axis=None, returned=True +def average( + a: _ArrayLike[_InexactT], + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[_InexactT]: ... +@overload # inexact array-like, axis=, returned=True +def average( + a: _ArrayLike[_InexactT], + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[_InexactT]]: ... +@overload # inexact array-like, returned=True, keepdims=True +def average( + a: _ArrayLike[_InexactT], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[_InexactT]]: ... +@overload # bool or integer array-like, axis=None def average( - a: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, axis: None = None, weights: _ArrayLikeFloat_co | None = None, returned: L[False] = False, *, keepdims: L[False] | _NoValueType = ..., -) -> floating: ... -@overload +) -> np.float64: ... +@overload # bool or integer array-like, axis= +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # bool or integer array-like, keepdims=True +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # bool or integer array-like, axis=None, returned=True def average( - a: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, axis: None = None, weights: _ArrayLikeFloat_co | None = None, *, returned: L[True], keepdims: L[False] | _NoValueType = ..., -) -> _2Tuple[floating]: ... -@overload +) -> _Tuple2[np.float64]: ... +@overload # bool or integer array-like, axis=, returned=True +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[np.float64]]: ... +@overload # bool or integer array-like, returned=True, keepdims=True +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[np.float64]]: ... +@overload # complex array-like, axis=None def average( - a: _ArrayLikeComplex_co, + a: _ListSeqND[complex], axis: None = None, weights: _ArrayLikeComplex_co | None = None, returned: L[False] = False, *, keepdims: L[False] | _NoValueType = ..., -) -> complexfloating: ... -@overload +) -> np.complex128: ... +@overload # complex array-like, axis= +def average( + a: _ListSeqND[complex], + axis: int | tuple[int, ...], + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # complex array-like, keepdims=True +def average( + a: _ListSeqND[complex], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[np.complex128]: ... +@overload # complex array-like, axis=None, returned=True def average( - a: _ArrayLikeComplex_co, + a: _ListSeqND[complex], axis: None = None, weights: _ArrayLikeComplex_co | None = None, *, returned: L[True], keepdims: L[False] | _NoValueType = ..., -) -> _2Tuple[complexfloating]: ... -@overload +) -> _Tuple2[np.complex128]: ... +@overload # complex array-like, axis=, returned=True def average( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - weights: object | None = None, + a: _ListSeqND[complex], + axis: int | tuple[int, ...], + weights: _ArrayLikeComplex_co | None = None, *, returned: L[True], - keepdims: bool | bool_ | _NoValueType = ..., -) -> _2Tuple[Incomplete]: ... -@overload + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[np.complex128]]: ... +@overload # complex array-like, keepdims=True, returned=True def average( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - weights: object | None = None, - returned: bool | bool_ = False, + a: _ListSeqND[complex], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeComplex_co | None = None, *, - keepdims: bool | bool_ | _NoValueType = ..., -) -> Incomplete: ... + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[np.complex128]]: ... +@overload # unknown, axis=None +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> Any: ... +@overload # unknown, axis= +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> np.ndarray: ... +@overload # unknown, keepdims=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> np.ndarray: ... +@overload # unknown, axis=None, returned=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[Any]: ... +@overload # unknown, axis=, returned=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[np.ndarray]: ... +@overload # unknown, returned=True, keepdims=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[np.ndarray]: ... +# @overload -def asarray_chkfinite( - a: _ArrayLike[_ScalarT], - dtype: None = None, - order: _OrderKACF = None, -) -> NDArray[_ScalarT]: ... +def asarray_chkfinite(a: _ArrayT, dtype: None = None, order: _OrderKACF = None) -> _ArrayT: ... @overload def asarray_chkfinite( - a: object, - dtype: None = None, - order: _OrderKACF = None, -) -> NDArray[Any]: ... + a: np.ndarray[_ShapeT], dtype: _DTypeLike[_ScalarT], order: _OrderKACF = None +) -> _Array[_ShapeT, _ScalarT]: ... @overload -def asarray_chkfinite( - a: Any, - dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = None, -) -> NDArray[_ScalarT]: ... +def asarray_chkfinite(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF = None) -> NDArray[_ScalarT]: ... @overload -def asarray_chkfinite( - a: Any, - dtype: DTypeLike | None, - order: _OrderKACF = None, -) -> NDArray[Any]: ... +def asarray_chkfinite(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF = None) -> NDArray[_ScalarT]: ... +@overload +def asarray_chkfinite(a: object, dtype: DTypeLike | None = None, order: _OrderKACF = None) -> NDArray[Incomplete]: ... +# NOTE: Contrary to the documentation, scalars are also accepted and treated as +# `[condlist]`. And even though the documentation says these should be boolean, in +# practice anything that `np.array(condlist, dtype=bool)` accepts will work, i.e. any +# array-like. +@overload +def piecewise( + x: _Array[_ShapeT, _ScalarT], + condlist: ArrayLike, + funclist: _PiecewiseFunctions[Any, _Tss], + *args: _Tss.args, + **kw: _Tss.kwargs, +) -> _Array[_ShapeT, _ScalarT]: ... @overload def piecewise( x: _ArrayLike[_ScalarT], - condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co], - funclist: Sequence[ - Callable[Concatenate[NDArray[_ScalarT], _Pss], NDArray[_ScalarT | Any]] - | _ScalarT | object - ], - *args: _Pss.args, - **kw: _Pss.kwargs, + condlist: ArrayLike, + funclist: _PiecewiseFunctions[Any, _Tss], + *args: _Tss.args, + **kw: _Tss.kwargs, ) -> NDArray[_ScalarT]: ... @overload def piecewise( x: ArrayLike, - condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co], - funclist: Sequence[ - Callable[Concatenate[NDArray[Any], _Pss], NDArray[Any]] - | object - ], - *args: _Pss.args, - **kw: _Pss.kwargs, -) -> NDArray[Any]: ... + condlist: ArrayLike, + funclist: _PiecewiseFunctions[_ScalarT, _Tss], + *args: _Tss.args, + **kw: _Tss.kwargs, +) -> NDArray[_ScalarT]: ... +# NOTE: condition is usually boolean, but anything with zero/non-zero semantics works +@overload +def extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> _Array1D[_ScalarT]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[bool]) -> _Array1D[np.bool]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[int]) -> _Array1D[np.int_]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[float]) -> _Array1D[np.float64]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[complex]) -> _Array1D[np.complex128]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[bytes]) -> _Array1D[np.bytes_]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[str]) -> _Array1D[np.str_]: ... +@overload +def extract(condition: ArrayLike, arr: ArrayLike) -> _Array1D[Incomplete]: ... + +# NOTE: unlike `extract`, passing non-boolean conditions for `condlist` will raise an +# error at runtime +@overload +def select( + condlist: _SizedIterable[_ArrayLikeBool_co], + choicelist: Sequence[_ArrayT], + default: _ScalarLike_co = 0, +) -> _ArrayT: ... +@overload +def select( + condlist: _SizedIterable[_ArrayLikeBool_co], + choicelist: Sequence[_ArrayLike[_ScalarT]] | NDArray[_ScalarT], + default: _ScalarLike_co = 0, +) -> NDArray[_ScalarT]: ... +@overload def select( - condlist: Sequence[ArrayLike], + condlist: _SizedIterable[_ArrayLikeBool_co], choicelist: Sequence[ArrayLike], - default: ArrayLike = 0, -) -> NDArray[Any]: ... + default: _ScalarLike_co = 0, +) -> np.ndarray: ... # keep roughly in sync with `ma.core.copy` @overload -def copy( - a: _ArrayT, - order: _OrderKACF, - subok: L[True], -) -> _ArrayT: ... +def copy(a: _ArrayT, order: _OrderKACF, subok: L[True]) -> _ArrayT: ... @overload -def copy( - a: _ArrayT, - order: _OrderKACF = "K", - *, - subok: L[True], -) -> _ArrayT: ... +def copy(a: _ArrayT, order: _OrderKACF = "K", *, subok: L[True]) -> _ArrayT: ... @overload -def copy( - a: _ArrayLike[_ScalarT], - order: _OrderKACF = "K", - subok: L[False] = False, -) -> NDArray[_ScalarT]: ... +def copy(a: _ArrayLike[_ScalarT], order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[_ScalarT]: ... @overload -def copy( - a: ArrayLike, - order: _OrderKACF = "K", - subok: L[False] = False, -) -> NDArray[Any]: ... +def copy(a: ArrayLike, order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[Incomplete]: ... +# +@overload # ?d, known inexact scalar-type +def gradient( + f: _ArrayNoD[_InexactTimeT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, + # `| Any` instead of ` | tuple` is returned to avoid several mypy_primer errors +) -> _Array1D[_InexactTimeT] | Any: ... +@overload # 1d, known inexact scalar-type +def gradient( + f: _Array1D[_InexactTimeT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[_InexactTimeT]: ... +@overload # 2d, known inexact scalar-type +def gradient( + f: _Array2D[_InexactTimeT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[_InexactTimeT, _InexactTimeT]: ... +@overload # 3d, known inexact scalar-type +def gradient( + f: _Array3D[_InexactTimeT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[_InexactTimeT, _InexactTimeT, _InexactTimeT]: ... +@overload # ?d, datetime64 scalar-type +def gradient( + f: _ArrayNoD[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.timedelta64] | tuple[NDArray[np.timedelta64], ...]: ... +@overload # 1d, datetime64 scalar-type +def gradient( + f: _Array1D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.timedelta64]: ... +@overload # 2d, datetime64 scalar-type +def gradient( + f: _Array2D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.timedelta64, np.timedelta64]: ... +@overload # 3d, datetime64 scalar-type +def gradient( + f: _Array3D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.timedelta64, np.timedelta64, np.timedelta64]: ... +@overload # 1d float-like +def gradient( + f: _Seq1D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.float64]: ... +@overload # 2d float-like +def gradient( + f: _Seq2D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.float64, np.float64]: ... +@overload # 3d float-like +def gradient( + f: _Seq3D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.float64, np.float64, np.float64]: ... +@overload # 1d complex-like (the `list` avoids overlap with the float-like overload) +def gradient( + f: list[complex], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.complex128]: ... +@overload # 2d float-like +def gradient( + f: _Seq1D[list[complex]], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.complex128, np.complex128]: ... +@overload # 3d float-like +def gradient( + f: _Seq2D[list[complex]], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.complex128, np.complex128, np.complex128]: ... +@overload # fallback def gradient( f: ArrayLike, - *varargs: ArrayLike, + *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, -) -> Any: ... +) -> Incomplete: ... -@overload -def diff( # type: ignore[overload-overlap] +# +@overload # n == 0; return input unchanged +def diff( a: _T, n: L[0], axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., # = _NoValue append: ArrayLike | _NoValueType = ..., # = _NoValue ) -> _T: ... -@overload +@overload # known array-type +def diff( + a: _ArrayNumericT, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _ArrayNumericT: ... +@overload # known shape, datetime64 +def diff( + a: _Array[_ShapeT, np.datetime64], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array[_ShapeT, np.timedelta64]: ... +@overload # unknown shape, known scalar-type +def diff( + a: _ArrayLike[_ScalarNumericT], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> NDArray[_ScalarNumericT]: ... +@overload # unknown shape, datetime64 +def diff( + a: _ArrayLike[np.datetime64], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> NDArray[np.timedelta64]: ... +@overload # 1d int +def diff( + a: _Seq1D[int], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.int_]: ... +@overload # 2d int +def diff( + a: _Seq2D[int], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.int_]: ... +@overload # 1d float (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[float], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.float64]: ... +@overload # 2d float +def diff( + a: _Seq1D[list[float]], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.float64]: ... +@overload # 1d complex (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[complex], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.complex128]: ... +@overload # 2d complex +def diff( + a: _Seq1D[list[complex]], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.complex128]: ... +@overload # unknown shape, unknown scalar-type def diff( a: ArrayLike, n: int = 1, axis: SupportsIndex = -1, - prepend: ArrayLike | _NoValueType = ..., # = _NoValue - append: ArrayLike | _NoValueType = ..., # = _NoValue + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., ) -> NDArray[Incomplete]: ... +# @overload # float scalar def interp( x: _FloatLike_co, @@ -310,71 +755,89 @@ def interp( left: _FloatLike_co | None = None, right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> float64: ... -@overload # float array +) -> np.float64: ... +@overload # complex scalar def interp( - x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + x: _FloatLike_co, xp: _ArrayLikeFloat_co, - fp: _ArrayLikeFloat_co, - left: _FloatLike_co | None = None, - right: _FloatLike_co | None = None, + fp: _ArrayLike1D[np.complexfloating] | list[complex], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[float64]: ... -@overload # float scalar or array +) -> np.complex128: ... +@overload # float array def interp( - x: _ArrayLikeFloat_co, + x: _Array[_ShapeT, _floating_co], xp: _ArrayLikeFloat_co, fp: _ArrayLikeFloat_co, left: _FloatLike_co | None = None, right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[float64] | float64: ... -@overload # complex scalar +) -> _Array[_ShapeT, np.float64]: ... +@overload # complex array def interp( - x: _FloatLike_co, + x: _Array[_ShapeT, _floating_co], xp: _ArrayLikeFloat_co, - fp: _ArrayLike[complexfloating], + fp: _ArrayLike1D[np.complexfloating] | list[complex], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> complex128: ... -@overload # complex or float scalar +) -> _Array[_ShapeT, np.complex128]: ... +@overload # float sequence def interp( - x: _FloatLike_co, + x: _Seq1D[_FloatLike_co], xp: _ArrayLikeFloat_co, - fp: Sequence[complex | complexfloating], - left: _NumberLike_co | None = None, - right: _NumberLike_co | None = None, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> complex128 | float64: ... -@overload # complex array +) -> _Array1D[np.float64]: ... +@overload # complex sequence def interp( - x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + x: _Seq1D[_FloatLike_co], xp: _ArrayLikeFloat_co, - fp: _ArrayLike[complexfloating], + fp: _ArrayLike1D[np.complexfloating] | list[complex], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[complex128]: ... -@overload # complex or float array +) -> _Array1D[np.complex128]: ... +@overload # float array-like +def interp( + x: _SeqND[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex array-like def interp( - x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + x: _SeqND[_FloatLike_co], xp: _ArrayLikeFloat_co, - fp: Sequence[complex | complexfloating], + fp: _ArrayLike1D[np.complexfloating] | list[complex], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[complex128 | float64]: ... -@overload # complex scalar or array +) -> NDArray[np.complex128]: ... +@overload # float scalar/array-like +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.float64] | np.float64: ... +@overload # complex scalar/array-like def interp( x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, - fp: _ArrayLike[complexfloating], + fp: _ArrayLike1D[np.complexfloating], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[complex128] | complex128: ... -@overload # complex or float scalar or array +) -> NDArray[np.complex128] | np.complex128: ... +@overload # float/complex scalar/array-like def interp( x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, @@ -382,662 +845,1356 @@ def interp( left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[complex128 | float64] | complex128 | float64: ... +) -> NDArray[np.complex128 | np.float64] | np.complex128 | np.float64: ... -@overload -def angle(z: _ComplexLike_co, deg: bool = False) -> floating: ... -@overload -def angle(z: object_, deg: bool = False) -> Any: ... -@overload -def angle(z: _ArrayLikeComplex_co, deg: bool = False) -> NDArray[floating]: ... -@overload -def angle(z: _ArrayLikeObject_co, deg: bool = False) -> NDArray[object_]: ... +# +@overload # 0d T: floating -> 0d T +def angle(z: _FloatingT, deg: bool = False) -> _FloatingT: ... +@overload # 0d complex | float | ~integer -> 0d float64 +def angle(z: complex | _integer_co, deg: bool = False) -> np.float64: ... +@overload # 0d complex64 -> 0d float32 +def angle(z: np.complex64, deg: bool = False) -> np.float32: ... +@overload # 0d clongdouble -> 0d longdouble +def angle(z: np.clongdouble, deg: bool = False) -> np.longdouble: ... +@overload # T: nd floating -> T +def angle(z: _ArrayFloatingT, deg: bool = False) -> _ArrayFloatingT: ... +@overload # nd T: complex128 | ~integer -> nd float64 +def angle(z: _Array[_ShapeT, np.complex128 | _integer_co], deg: bool = False) -> _Array[_ShapeT, np.float64]: ... +@overload # nd T: complex64 -> nd float32 +def angle(z: _Array[_ShapeT, np.complex64], deg: bool = False) -> _Array[_ShapeT, np.float32]: ... +@overload # nd T: clongdouble -> nd longdouble +def angle(z: _Array[_ShapeT, np.clongdouble], deg: bool = False) -> _Array[_ShapeT, np.longdouble]: ... +@overload # 1d complex -> 1d float64 +def angle(z: _Seq1D[complex], deg: bool = False) -> _Array1D[np.float64]: ... +@overload # 2d complex -> 2d float64 +def angle(z: _Seq2D[complex], deg: bool = False) -> _Array2D[np.float64]: ... +@overload # 3d complex -> 3d float64 +def angle(z: _Seq3D[complex], deg: bool = False) -> _Array3D[np.float64]: ... +@overload # fallback +def angle(z: _ArrayLikeComplex_co, deg: bool = False) -> NDArray[np.floating] | Any: ... -@overload +# +@overload # known array-type def unwrap( - p: _ArrayLikeFloat_co, + p: _ArrayFloatObjT, discont: float | None = None, axis: int = -1, *, - period: float = ..., -) -> NDArray[floating]: ... -@overload + period: float = ..., # = τ +) -> _ArrayFloatObjT: ... +@overload # known shape, float64 def unwrap( - p: _ArrayLikeObject_co, + p: _Array[_ShapeT, _float64_co], discont: float | None = None, axis: int = -1, *, - period: float = ..., -) -> NDArray[object_]: ... - -def sort_complex(a: ArrayLike) -> NDArray[complexfloating]: ... - -def trim_zeros( - filt: _TrimZerosSequence[_T], - trim: L["f", "b", "fb", "bf"] = "fb", - axis: _ShapeLike | None = None, -) -> _T: ... + period: float = ..., # = τ +) -> _Array[_ShapeT, np.float64]: ... +@overload # 1d float64-like +def unwrap( + p: _Seq1D[float | _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> _Array1D[np.float64]: ... +@overload # 2d float64-like +def unwrap( + p: _Seq2D[float | _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> _Array2D[np.float64]: ... +@overload # 3d float64-like +def unwrap( + p: _Seq3D[float | _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> _Array3D[np.float64]: ... +@overload # ?d, float64 +def unwrap( + p: _SeqND[float] | _ArrayLike[_float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> NDArray[np.float64]: ... +@overload # fallback +def unwrap( + p: _ArrayLikeFloat_co | _ArrayLikeObject_co, + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> np.ndarray: ... -@overload -def extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... -@overload -def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ... +# +@overload +def sort_complex(a: _ArrayComplexT) -> _ArrayComplexT: ... +@overload # complex64, shape known +def sort_complex(a: _Array[_ShapeT, np.int8 | np.uint8 | np.int16 | np.uint16]) -> _Array[_ShapeT, np.complex64]: ... +@overload # complex64, shape unknown +def sort_complex(a: _ArrayLike[np.int8 | np.uint8 | np.int16 | np.uint16]) -> NDArray[np.complex64]: ... +@overload # complex128, shape known +def sort_complex(a: _Array[_ShapeT, _SortsToComplex128]) -> _Array[_ShapeT, np.complex128]: ... +@overload # complex128, shape unknown +def sort_complex(a: _ArrayLike[_SortsToComplex128]) -> NDArray[np.complex128]: ... +@overload # clongdouble, shape known +def sort_complex(a: _Array[_ShapeT, np.longdouble]) -> _Array[_ShapeT, np.clongdouble]: ... +@overload # clongdouble, shape unknown +def sort_complex(a: _ArrayLike[np.longdouble]) -> NDArray[np.clongdouble]: ... -def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ... +# +def trim_zeros(filt: _TrimZerosSequence[_T], trim: L["f", "b", "fb", "bf"] = "fb", axis: _ShapeLike | None = None) -> _T: ... -@overload +# NOTE: keep in sync with `corrcoef` +@overload # ?d, known inexact scalar-type >=64 precision, y=. def cov( - m: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co | None = None, + m: _ArrayLike[_AnyDoubleT], + y: _ArrayLike[_AnyDoubleT], rowvar: bool = True, bias: bool = False, ddof: SupportsIndex | SupportsInt | None = None, - fweights: ArrayLike | None = None, - aweights: ArrayLike | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, dtype: None = None, -) -> NDArray[floating]: ... -@overload +) -> _Array2D[_AnyDoubleT]: ... +@overload # ?d, known inexact scalar-type >=64 precision, y=None -> 0d or 2d +def cov( + m: _ArrayNoD[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> NDArray[_AnyDoubleT]: ... +@overload # 1d, known inexact scalar-type >=64 precision, y=None +def cov( + m: _Array1D[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _Array0D[_AnyDoubleT]: ... +@overload # nd, known inexact scalar-type >=64 precision, y=None -> 0d or 2d +def cov( + m: _ArrayLike[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> NDArray[_AnyDoubleT]: ... +@overload # nd, casts to float64, y= +def cov( + m: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + y: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64]: ... +@overload # ?d or 2d, casts to float64, y=None -> 0d or 2d +def cov( + m: _ArrayNoD[np.float32 | np.float16 | _integer_co] | _Seq2D[float], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> NDArray[np.float64]: ... +@overload # 1d, casts to float64, y=None +def cov( + m: _Array1D[np.float32 | np.float16 | _integer_co] | _Seq1D[float], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array0D[np.float64]: ... +@overload # nd, casts to float64, y=None -> 0d or 2d +def cov( + m: _ArrayLike[np.float32 | np.float16 | _integer_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> NDArray[np.float64]: ... +@overload # 1d complex, y= (`list` avoids overlap with float overloads) +def cov( + m: list[complex] | _Seq1D[list[complex]], + y: list[complex] | _Seq1D[list[complex]], + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128]: ... +@overload # 1d complex, y=None +def cov( + m: list[complex], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array0D[np.complex128]: ... +@overload # 2d complex, y=None -> 0d or 2d +def cov( + m: _Seq1D[list[complex]], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> NDArray[np.complex128]: ... +@overload # 1d complex-like, y=None, dtype= +def cov( + m: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_ScalarT], +) -> _Array0D[_ScalarT]: ... +@overload # nd complex-like, y=, dtype= def cov( m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = None, + y: _ArrayLikeComplex_co, rowvar: bool = True, bias: bool = False, ddof: SupportsIndex | SupportsInt | None = None, - fweights: ArrayLike | None = None, - aweights: ArrayLike | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: None = None, -) -> NDArray[complexfloating]: ... -@overload + dtype: _DTypeLike[_ScalarT], +) -> _Array2D[_ScalarT]: ... +@overload # nd complex-like, y=None, dtype= -> 0d or 2d def cov( m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = None, + y: None = None, rowvar: bool = True, bias: bool = False, ddof: SupportsIndex | SupportsInt | None = None, - fweights: ArrayLike | None = None, - aweights: ArrayLike | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, dtype: _DTypeLike[_ScalarT], ) -> NDArray[_ScalarT]: ... -@overload +@overload # nd complex-like, y=, dtype=? +def cov( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: DTypeLike | None = None, +) -> _Array2D[Incomplete]: ... +@overload # 1d complex-like, y=None, dtype=? +def cov( + m: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: DTypeLike | None = None, +) -> _Array0D[Incomplete]: ... +@overload # nd complex-like, dtype=? def cov( m: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, bias: bool = False, ddof: SupportsIndex | SupportsInt | None = None, - fweights: ArrayLike | None = None, - aweights: ArrayLike | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: DTypeLike | None, -) -> NDArray[Any]: ... + dtype: DTypeLike | None = None, +) -> NDArray[Incomplete]: ... -# NOTE `bias` and `ddof` are deprecated and ignored -@overload +# NOTE: If only `x` is given and the resulting array has shape (1,1), a bare scalar +# is returned instead of a 2D array. When y is given, a 2D array is always returned. +# This differs from `cov`, which returns 0-D arrays instead of scalars in such cases. +# NOTE: keep in sync with `cov` +@overload # ?d, known inexact scalar-type >=64 precision, y=. def corrcoef( - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co | None = None, + x: _ArrayLike[_AnyDoubleT], + y: _ArrayLike[_AnyDoubleT], rowvar: bool = True, *, - dtype: None = None, -) -> NDArray[floating]: ... -@overload + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _Array2D[_AnyDoubleT]: ... +@overload # ?d, known inexact scalar-type >=64 precision, y=None +def corrcoef( + x: _ArrayNoD[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _Array2D[_AnyDoubleT] | _AnyDoubleT: ... +@overload # 1d, known inexact scalar-type >=64 precision, y=None +def corrcoef( + x: _Array1D[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _AnyDoubleT: ... +@overload # nd, known inexact scalar-type >=64 precision, y=None +def corrcoef( + x: _ArrayLike[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _Array2D[_AnyDoubleT] | _AnyDoubleT: ... +@overload # nd, casts to float64, y= +def corrcoef( + x: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + y: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64]: ... +@overload # ?d or 2d, casts to float64, y=None +def corrcoef( + x: _ArrayNoD[np.float32 | np.float16 | _integer_co] | _Seq2D[float], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64] | np.float64: ... +@overload # 1d, casts to float64, y=None +def corrcoef( + x: _Array1D[np.float32 | np.float16 | _integer_co] | _Seq1D[float], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> np.float64: ... +@overload # nd, casts to float64, y=None +def corrcoef( + x: _ArrayLike[np.float32 | np.float16 | _integer_co], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64] | np.float64: ... +@overload # 1d complex, y= (`list` avoids overlap with float overloads) +def corrcoef( + x: list[complex] | _Seq1D[list[complex]], + y: list[complex] | _Seq1D[list[complex]], + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128]: ... +@overload # 1d complex, y=None +def corrcoef( + x: list[complex], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> np.complex128: ... +@overload # 2d complex, y=None +def corrcoef( + x: _Seq1D[list[complex]], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128] | np.complex128: ... +@overload # 1d complex-like, y=None, dtype= +def corrcoef( + x: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[_ScalarT], +) -> _ScalarT: ... +@overload # nd complex-like, y=, dtype= def corrcoef( x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = None, + y: _ArrayLikeComplex_co, rowvar: bool = True, *, - dtype: None = None, -) -> NDArray[complexfloating]: ... -@overload + dtype: _DTypeLike[_ScalarT], +) -> _Array2D[_ScalarT]: ... +@overload # nd complex-like, y=None, dtype= def corrcoef( x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = None, + y: None = None, rowvar: bool = True, *, dtype: _DTypeLike[_ScalarT], -) -> NDArray[_ScalarT]: ... -@overload +) -> _Array2D[_ScalarT] | _ScalarT: ... +@overload # nd complex-like, y=, dtype=? +def corrcoef( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + rowvar: bool = True, + *, + dtype: DTypeLike | None = None, +) -> _Array2D[Incomplete]: ... +@overload # 1d complex-like, y=None, dtype=? +def corrcoef( + x: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + *, + dtype: DTypeLike | None = None, +) -> Incomplete: ... +@overload # nd complex-like, dtype=? def corrcoef( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, *, dtype: DTypeLike | None = None, -) -> NDArray[Any]: ... +) -> _Array2D[Incomplete] | Incomplete: ... -def blackman(M: _FloatLike_co) -> NDArray[floating]: ... - -def bartlett(M: _FloatLike_co) -> NDArray[floating]: ... - -def hanning(M: _FloatLike_co) -> NDArray[floating]: ... - -def hamming(M: _FloatLike_co) -> NDArray[floating]: ... - -def i0(x: _ArrayLikeFloat_co) -> NDArray[floating]: ... - -def kaiser( - M: _FloatLike_co, - beta: _FloatLike_co, -) -> NDArray[floating]: ... +# note that floating `M` are accepted, but their fractional part is ignored +def blackman(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def bartlett(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def hanning(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def hamming(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def kaiser(M: _FloatLike_co, beta: _FloatLike_co) -> _Array1D[np.float64]: ... +# @overload -def sinc(x: _FloatLike_co) -> floating: ... +def i0(x: _Array[_ShapeT, np.floating | np.integer]) -> _Array[_ShapeT, np.float64]: ... @overload -def sinc(x: _ComplexLike_co) -> complexfloating: ... +def i0(x: _FloatLike_co) -> _Array0D[np.float64]: ... @overload -def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating]: ... +def i0(x: _Seq1D[_FloatLike_co]) -> _Array1D[np.float64]: ... @overload -def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... +def i0(x: _Seq2D[_FloatLike_co]) -> _Array2D[np.float64]: ... +@overload +def i0(x: _Seq3D[_FloatLike_co]) -> _Array3D[np.float64]: ... +@overload +def i0(x: _ArrayLikeFloat_co) -> NDArray[np.float64]: ... +# +@overload +def sinc(x: _InexactT) -> _InexactT: ... +@overload +def sinc(x: float | _float64_co) -> np.float64: ... +@overload +def sinc(x: complex) -> np.complex128 | Any: ... @overload +def sinc(x: _ArrayInexactT) -> _ArrayInexactT: ... +@overload +def sinc(x: _Array[_ShapeT, _integer_co]) -> _Array[_ShapeT, np.float64]: ... +@overload +def sinc(x: _Seq1D[float]) -> _Array1D[np.float64]: ... +@overload +def sinc(x: _Seq2D[float]) -> _Array2D[np.float64]: ... +@overload +def sinc(x: _Seq3D[float]) -> _Array3D[np.float64]: ... +@overload +def sinc(x: _SeqND[float]) -> NDArray[np.float64]: ... +@overload +def sinc(x: list[complex]) -> _Array1D[np.complex128]: ... +@overload +def sinc(x: _Seq1D[list[complex]]) -> _Array2D[np.complex128]: ... +@overload +def sinc(x: _Seq2D[list[complex]]) -> _Array3D[np.complex128]: ... +@overload +def sinc(x: _ArrayLikeComplex_co) -> np.ndarray | Any: ... + +# NOTE: We assume that `axis` is only provided for >=1-D arrays because for <1-D arrays +# it has no effect, and would complicate the overloads significantly. +@overload # known scalar-type, keepdims=False (default) def median( - a: _ArrayLikeFloat_co, + a: _ArrayLike[_InexactTimeT], axis: None = None, out: None = None, overwrite_input: bool = False, keepdims: L[False] = False, -) -> floating: ... -@overload +) -> _InexactTimeT: ... +@overload # float array-like, keepdims=False (default) def median( - a: _ArrayLikeComplex_co, + a: _ArrayLikeInt_co | _SeqND[float] | float, axis: None = None, out: None = None, overwrite_input: bool = False, keepdims: L[False] = False, -) -> complexfloating: ... -@overload +) -> np.float64: ... +@overload # complex array-like, keepdims=False (default) def median( - a: _ArrayLikeTD64_co, + a: _ListSeqND[complex], axis: None = None, out: None = None, overwrite_input: bool = False, keepdims: L[False] = False, -) -> timedelta64: ... -@overload +) -> np.complex128: ... +@overload # complex scalar, keepdims=False (default) def median( - a: _ArrayLikeObject_co, + a: complex, axis: None = None, out: None = None, overwrite_input: bool = False, keepdims: L[False] = False, -) -> Any: ... -@overload +) -> np.complex128 | Any: ... +@overload # known array-type, keepdims=True def median( - a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + a: _ArrayNumericT, axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, + *, + keepdims: L[True], +) -> _ArrayNumericT: ... +@overload # known scalar-type, keepdims=True +def median( + a: _ArrayLike[_ScalarNumericT], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[_ScalarNumericT]: ... +@overload # known scalar-type, axis= +def median( + a: _ArrayLike[_ScalarNumericT], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, keepdims: bool = False, -) -> Any: ... -@overload +) -> NDArray[_ScalarNumericT]: ... +@overload # float array-like, keepdims=True def median( - a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - out: _ArrayT, + a: _SeqND[float], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # float array-like, axis= +def median( + a: _SeqND[float], + axis: _ShapeLike, + out: None = None, overwrite_input: bool = False, keepdims: bool = False, -) -> _ArrayT: ... -@overload +) -> NDArray[np.float64]: ... +@overload # complex array-like, keepdims=True def median( - a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + a: _ListSeqND[complex], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[np.complex128]: ... +@overload # complex array-like, axis= +def median( + a: _ListSeqND[complex], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> NDArray[np.complex128]: ... +@overload # out= (keyword) +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], axis: _ShapeLike | None = None, *, out: _ArrayT, overwrite_input: bool = False, keepdims: bool = False, ) -> _ArrayT: ... - -_MethodKind = L[ - "inverted_cdf", - "averaged_inverted_cdf", - "closest_observation", - "interpolated_inverted_cdf", - "hazen", - "weibull", - "linear", - "median_unbiased", - "normal_unbiased", - "lower", - "higher", - "midpoint", - "nearest", -] +@overload # out= (positional) +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None, + out: _ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> _ArrayT: ... +@overload # fallback +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> Incomplete: ... # NOTE: keep in sync with `quantile` -@overload +@overload # inexact, scalar, axis=None +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _InexactDateTimeT: ... +@overload # inexact, scalar, axis= +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # inexact, scalar, keepdims=True +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # inexact, array, axis=None +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: _Array[_ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[_ShapeT, _InexactDateTimeT]: ... +@overload # inexact, array-like +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # float, scalar, axis=None +def percentile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> np.float64: ... +@overload # float, scalar, axis= def percentile( - a: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, - axis: None = None, + axis: _ShapeLike, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> floating: ... -@overload +) -> NDArray[np.float64]: ... +@overload # float, scalar, keepdims=True def percentile( - a: _ArrayLikeComplex_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, array, axis=None +def percentile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> complexfloating: ... -@overload +) -> _Array[_ShapeT, np.float64]: ... +@overload # float, array-like +def percentile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex, scalar, axis=None def percentile( - a: _ArrayLikeTD64_co, + a: _ListSeqND[complex], q: _FloatLike_co, axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> timedelta64: ... -@overload +) -> np.complex128: ... +@overload # complex, scalar, axis= def percentile( - a: _ArrayLikeDT64_co, + a: _ListSeqND[complex], q: _FloatLike_co, - axis: None = None, + axis: _ShapeLike, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> datetime64: ... -@overload +) -> NDArray[np.complex128]: ... +@overload # complex, scalar, keepdims=True def percentile( - a: _ArrayLikeObject_co, + a: _ListSeqND[complex], q: _FloatLike_co, - axis: None = None, + axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", - keepdims: L[False] = False, + method: _InterpolationMethod = "linear", *, + keepdims: L[True], weights: _ArrayLikeFloat_co | None = None, -) -> Any: ... -@overload +) -> NDArray[np.complex128]: ... +@overload # complex, array, axis=None def percentile( - a: _ArrayLikeFloat_co, - q: _ArrayLikeFloat_co, + a: _ListSeqND[complex], + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[floating]: ... -@overload +) -> _Array[_ShapeT, np.complex128]: ... +@overload # complex, array-like def percentile( - a: _ArrayLikeComplex_co, - q: _ArrayLikeFloat_co, - axis: None = None, + a: _ListSeqND[complex], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", - keepdims: L[False] = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[complexfloating]: ... -@overload +) -> NDArray[np.complex128]: ... +@overload # object_, scalar, axis=None def percentile( - a: _ArrayLikeTD64_co, - q: _ArrayLikeFloat_co, + a: _ArrayLikeObject_co, + q: _FloatLike_co, axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[timedelta64]: ... -@overload +) -> Any: ... +@overload # object_, scalar, axis= def percentile( - a: _ArrayLikeDT64_co, - q: _ArrayLikeFloat_co, - axis: None = None, + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[datetime64]: ... -@overload +) -> NDArray[np.object_]: ... +@overload # object_, scalar, keepdims=True def percentile( a: _ArrayLikeObject_co, - q: _ArrayLikeFloat_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, array, axis=None +def percentile( + a: _ArrayLikeObject_co, + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[object_]: ... -@overload +) -> _Array[_ShapeT, np.object_]: ... +@overload # object_, array-like def percentile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, - q: _ArrayLikeFloat_co, + a: _ArrayLikeObject_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> Any: ... -@overload +) -> NDArray[np.object_]: ... +@overload # out= (keyword) def percentile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None, out: _ArrayT, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... -@overload +@overload # out= (positional) def percentile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None = None, *, out: _ArrayT, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... +@overload # fallback +def percentile( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> Incomplete: ... # NOTE: keep in sync with `percentile` -@overload +@overload # inexact, scalar, axis=None def quantile( - a: _ArrayLikeFloat_co, + a: _ArrayLike[_InexactDateTimeT], q: _FloatLike_co, axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> floating: ... -@overload +) -> _InexactDateTimeT: ... +@overload # inexact, scalar, axis= def quantile( - a: _ArrayLikeComplex_co, + a: _ArrayLike[_InexactDateTimeT], q: _FloatLike_co, - axis: None = None, + axis: _ShapeLike, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> complexfloating: ... -@overload +) -> NDArray[_InexactDateTimeT]: ... +@overload # inexact, scalar, keepdims=True def quantile( - a: _ArrayLikeTD64_co, + a: _ArrayLike[_InexactDateTimeT], q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # inexact, array, axis=None +def quantile( + a: _ArrayLike[_InexactDateTimeT], + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> timedelta64: ... -@overload +) -> _Array[_ShapeT, _InexactDateTimeT]: ... +@overload # inexact, array-like +def quantile( + a: _ArrayLike[_InexactDateTimeT], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # float, scalar, axis=None def quantile( - a: _ArrayLikeDT64_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> datetime64: ... -@overload +) -> np.float64: ... +@overload # float, scalar, axis= def quantile( - a: _ArrayLikeObject_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, - axis: None = None, + axis: _ShapeLike, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> Any: ... -@overload +) -> NDArray[np.float64]: ... +@overload # float, scalar, keepdims=True def quantile( - a: _ArrayLikeFloat_co, - q: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, array, axis=None +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[floating]: ... -@overload +) -> _Array[_ShapeT, np.float64]: ... +@overload # float, array-like def quantile( - a: _ArrayLikeComplex_co, - q: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex, scalar, axis=None +def quantile( + a: _ListSeqND[complex], + q: _FloatLike_co, axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[complexfloating]: ... -@overload +) -> np.complex128: ... +@overload # complex, scalar, axis= def quantile( - a: _ArrayLikeTD64_co, - q: _ArrayLikeFloat_co, + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, scalar, keepdims=True +def quantile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, array, axis=None +def quantile( + a: _ListSeqND[complex], + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[timedelta64]: ... -@overload +) -> _Array[_ShapeT, np.complex128]: ... +@overload # complex, array-like def quantile( - a: _ArrayLikeDT64_co, - q: _ArrayLikeFloat_co, + a: _ListSeqND[complex], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # object_, scalar, axis=None +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[datetime64]: ... -@overload +) -> Any: ... +@overload # object_, scalar, axis= def quantile( a: _ArrayLikeObject_co, - q: _ArrayLikeFloat_co, + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, scalar, keepdims=True +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, array, axis=None +def quantile( + a: _ArrayLikeObject_co, + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[object_]: ... -@overload +) -> _Array[_ShapeT, np.object_]: ... +@overload # object_, array-like def quantile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, - q: _ArrayLikeFloat_co, + a: _ArrayLikeObject_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> Any: ... -@overload +) -> NDArray[np.object_]: ... +@overload # out= (keyword) def quantile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None, out: _ArrayT, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... -@overload +@overload # out= (positional) def quantile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None = None, *, out: _ArrayT, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... +@overload # fallback +def quantile( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> Incomplete: ... -_ScalarT_fm = TypeVar( - "_ScalarT_fm", - bound=floating | complexfloating | timedelta64, -) - -class _SupportsRMulFloat(Protocol[_T_co]): - def __rmul__(self, other: float, /) -> _T_co: ... - -@overload -def trapezoid( # type: ignore[overload-overlap] - y: Sequence[_FloatLike_co], - x: Sequence[_FloatLike_co] | None = None, +# +@overload # ?d, known inexact/timedelta64 scalar-type +def trapezoid( + y: _ArrayNoD[_InexactTimeT], + x: _ArrayLike[_InexactTimeT] | _ArrayLikeFloat_co | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> float64: ... -@overload +) -> NDArray[_InexactTimeT] | _InexactTimeT: ... +@overload # ?d, casts to float64 def trapezoid( - y: Sequence[_ComplexLike_co], - x: Sequence[_ComplexLike_co] | None = None, + y: _ArrayNoD[_integer_co], + x: _ArrayLikeFloat_co | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> complex128: ... -@overload +) -> NDArray[np.float64] | np.float64: ... +@overload # strict 1d, known inexact/timedelta64 scalar-type +def trapezoid( + y: _Array1D[_InexactTimeT], + x: _Array1D[_InexactTimeT] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> _InexactTimeT: ... +@overload # strict 1d, casts to float64 +def trapezoid( + y: _Array1D[_float64_co] | _Seq1D[float], + x: _Array1D[_float64_co] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> np.float64: ... +@overload # strict 1d, casts to complex128 (`list` prevents overlapping overloads) +def trapezoid( + y: list[complex], + x: _Seq1D[complex] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 1d, casts to complex128 +def trapezoid( + y: _Seq1D[complex], + x: list[complex], + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 2d, known inexact/timedelta64 scalar-type +def trapezoid( + y: _Array2D[_InexactTimeT], + x: _ArrayMax2D[_InexactTimeT] | _Seq2D[float] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> _InexactTimeT: ... +@overload # strict 2d, casts to float64 def trapezoid( - y: _ArrayLike[bool_ | integer], - x: _ArrayLike[bool_ | integer] | None = None, + y: _Array2D[_float64_co] | _Seq2D[float], + x: _ArrayMax2D[_float64_co] | _Seq2D[float] | _Seq1D[float] | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> float64 | NDArray[float64]: ... +) -> np.float64: ... +@overload # strict 2d, casts to complex128 (`list` prevents overlapping overloads) +def trapezoid( + y: _Seq1D[list[complex]], + x: _Seq2D[complex] | _Seq1D[complex] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 2d, casts to complex128 +def trapezoid( + y: _Seq2D[complex] | _Seq1D[complex], + x: _Seq1D[list[complex]], + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... @overload -def trapezoid( # type: ignore[overload-overlap] - y: _ArrayLikeObject_co, - x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = None, +def trapezoid( + y: _ArrayLike[_InexactTimeT], + x: _ArrayLike[_InexactTimeT] | _ArrayLikeInt_co | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[_InexactTimeT] | _InexactTimeT: ... +@overload +def trapezoid( + y: _ArrayLike[_float64_co], + x: _ArrayLikeFloat_co | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> float | NDArray[object_]: ... +) -> NDArray[np.float64] | np.float64: ... @overload def trapezoid( - y: _ArrayLike[_ScalarT_fm], - x: _ArrayLike[_ScalarT_fm] | _ArrayLikeInt_co | None = None, + y: _ArrayLike[np.complex128], + x: _ArrayLikeComplex_co | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> _ScalarT_fm | NDArray[_ScalarT_fm]: ... +) -> NDArray[np.complex128] | np.complex128: ... @overload def trapezoid( - y: Sequence[_SupportsRMulFloat[_T]], - x: Sequence[_SupportsRMulFloat[_T] | _T] | None = None, + y: _ArrayLikeComplex_co, + x: _ArrayLike[np.complex128], dx: float = 1.0, axis: SupportsIndex = -1, -) -> _T: ... +) -> NDArray[np.complex128] | np.complex128: ... @overload def trapezoid( - y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = None, + y: _ArrayLikeObject_co, + x: _ArrayLikeObject_co | _ArrayLikeFloat_co | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> ( - floating | complexfloating | timedelta64 - | NDArray[floating | complexfloating | timedelta64 | object_] -): ... +) -> NDArray[np.object_] | Any: ... @overload -def meshgrid( - *, - copy: bool = True, - sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[()]: ... +def trapezoid( + y: _Seq1D[_SupportsRMulFloat[_T]], + x: _Seq1D[_SupportsRMulFloat[_T] | _T] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> _T: ... @overload +def trapezoid( + y: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + x: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> Incomplete: ... + +# +@overload # 0d +def meshgrid(*, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy") -> tuple[()]: ... +@overload # 1d, known scalar-type def meshgrid( x1: _ArrayLike[_ScalarT], /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[_ScalarT]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh1[_ScalarT]: ... +@overload # 1d, unknown scalar-type def meshgrid( x1: ArrayLike, /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[Any]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh1[Any]: ... +@overload # 2d, known scalar-types def meshgrid( - x1: _ArrayLike[_ScalarT1], - x2: _ArrayLike[_ScalarT2], + x1: _ArrayLike[_ScalarT], + x2: _ArrayLike[_ScalarT1], /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh2[_ScalarT, _ScalarT1]: ... +@overload # 2d, known/unknown scalar-types def meshgrid( - x1: ArrayLike, - x2: _ArrayLike[_ScalarT], + x1: _ArrayLike[_ScalarT], + x2: ArrayLike, /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[Any], NDArray[_ScalarT]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh2[_ScalarT, Any]: ... +@overload # 2d, unknown/known scalar-types def meshgrid( - x1: _ArrayLike[_ScalarT], - x2: ArrayLike, + x1: ArrayLike, + x2: _ArrayLike[_ScalarT], /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[_ScalarT], NDArray[Any]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh2[Any, _ScalarT]: ... +@overload # 2d, unknown scalar-types def meshgrid( x1: ArrayLike, x2: ArrayLike, @@ -1045,82 +2202,102 @@ def meshgrid( *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[Any], NDArray[Any]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh2[Any, Any]: ... +@overload # 3d, known scalar-types def meshgrid( - x1: ArrayLike, - x2: ArrayLike, - x3: ArrayLike, + x1: _ArrayLike[_ScalarT], + x2: _ArrayLike[_ScalarT1], + x3: _ArrayLike[_ScalarT2], /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh3[_ScalarT, _ScalarT1, _ScalarT2]: ... +@overload # 3d, unknown scalar-types def meshgrid( x1: ArrayLike, x2: ArrayLike, x3: ArrayLike, - x4: ArrayLike, /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any], NDArray[Any]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh3[Any, Any, Any]: ... +@overload # ?d, known scalar-types +def meshgrid( + *xi: _ArrayLike[_ScalarT], + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> tuple[NDArray[_ScalarT], ...]: ... +@overload # ?d, unknown scalar-types def meshgrid( *xi: ArrayLike, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", + indexing: _Indexing = "xy", ) -> tuple[NDArray[Any], ...]: ... -@overload -def delete( - arr: _ArrayLike[_ScalarT], - obj: slice | _ArrayLikeInt_co, - axis: SupportsIndex | None = None, -) -> NDArray[_ScalarT]: ... -@overload -def delete( - arr: ArrayLike, - obj: slice | _ArrayLikeInt_co, - axis: SupportsIndex | None = None, -) -> NDArray[Any]: ... +# +def place(arr: np.ndarray, mask: ConvertibleToInt | Sequence[ConvertibleToInt], vals: ArrayLike) -> None: ... -@overload -def insert( - arr: _ArrayLike[_ScalarT], - obj: slice | _ArrayLikeInt_co, - values: ArrayLike, - axis: SupportsIndex | None = None, -) -> NDArray[_ScalarT]: ... -@overload -def insert( - arr: ArrayLike, - obj: slice | _ArrayLikeInt_co, - values: ArrayLike, - axis: SupportsIndex | None = None, -) -> NDArray[Any]: ... +# keep in sync with `insert` +@overload # known scalar-type, axis=None (default) +def delete(arr: _ArrayLike[_ScalarT], obj: _IndexLike, axis: None = None) -> _Array1D[_ScalarT]: ... +@overload # known array-type, axis specified +def delete(arr: _ArrayT, obj: _IndexLike, axis: SupportsIndex) -> _ArrayT: ... +@overload # known scalar-type, axis specified +def delete(arr: _ArrayLike[_ScalarT], obj: _IndexLike, axis: SupportsIndex) -> NDArray[_ScalarT]: ... +@overload # known scalar-type, axis=None (default) +def delete(arr: ArrayLike, obj: _IndexLike, axis: None = None) -> _Array1D[Any]: ... +@overload # unknown scalar-type, axis specified +def delete(arr: ArrayLike, obj: _IndexLike, axis: SupportsIndex) -> NDArray[Any]: ... -def append( - arr: ArrayLike, - values: ArrayLike, - axis: SupportsIndex | None = None, -) -> NDArray[Any]: ... +# keep in sync with `delete` +@overload # known scalar-type, axis=None (default) +def insert(arr: _ArrayLike[_ScalarT], obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[_ScalarT]: ... +@overload # known array-type, axis specified +def insert(arr: _ArrayT, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> _ArrayT: ... +@overload # known scalar-type, axis specified +def insert(arr: _ArrayLike[_ScalarT], obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[_ScalarT]: ... +@overload # known scalar-type, axis=None (default) +def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... +@overload # unknown scalar-type, axis specified +def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[Any]: ... +# +@overload # known array type, axis specified +def append(arr: _ArrayT, values: _ArrayT, axis: SupportsIndex) -> _ArrayT: ... +@overload # 1d, known scalar type, axis specified +def append(arr: _Seq1D[_ScalarT], values: _Seq1D[_ScalarT], axis: SupportsIndex) -> _Array1D[_ScalarT]: ... +@overload # 2d, known scalar type, axis specified +def append(arr: _Seq2D[_ScalarT], values: _Seq2D[_ScalarT], axis: SupportsIndex) -> _Array2D[_ScalarT]: ... +@overload # 3d, known scalar type, axis specified +def append(arr: _Seq3D[_ScalarT], values: _Seq3D[_ScalarT], axis: SupportsIndex) -> _Array3D[_ScalarT]: ... +@overload # ?d, known scalar type, axis specified +def append(arr: _SeqND[_ScalarT], values: _SeqND[_ScalarT], axis: SupportsIndex) -> NDArray[_ScalarT]: ... +@overload # ?d, unknown scalar type, axis specified +def append(arr: np.ndarray | _SeqND[_ScalarLike_co], values: _SeqND[_ScalarLike_co], axis: SupportsIndex) -> np.ndarray: ... +@overload # known scalar type, axis=None +def append(arr: _ArrayLike[_ScalarT], values: _ArrayLike[_ScalarT], axis: None = None) -> _Array1D[_ScalarT]: ... +@overload # unknown scalar type, axis=None +def append(arr: ArrayLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... + +# @overload def digitize( - x: _FloatLike_co, - bins: _ArrayLikeFloat_co, - right: bool = False, -) -> intp: ... + x: _Array[_ShapeT, np.floating | np.integer], bins: _ArrayLikeFloat_co, right: bool = False +) -> _Array[_ShapeT, np.int_]: ... @overload -def digitize( - x: _ArrayLikeFloat_co, - bins: _ArrayLikeFloat_co, - right: bool = False, -) -> NDArray[intp]: ... +def digitize(x: _FloatLike_co, bins: _ArrayLikeFloat_co, right: bool = False) -> np.int_: ... +@overload +def digitize(x: _Seq1D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array1D[np.int_]: ... +@overload +def digitize(x: _Seq2D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array2D[np.int_]: ... +@overload +def digitize(x: _Seq3D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array3D[np.int_]: ... +@overload +def digitize(x: _ArrayLikeFloat_co, bins: _ArrayLikeFloat_co, right: bool = False) -> NDArray[np.int_] | Any: ... diff --git a/numpy/typing/tests/data/fail/lib_function_base.pyi b/numpy/typing/tests/data/fail/lib_function_base.pyi index f0bf6347691d..d7be993bf04c 100644 --- a/numpy/typing/tests/data/fail/lib_function_base.pyi +++ b/numpy/typing/tests/data/fail/lib_function_base.pyi @@ -13,34 +13,32 @@ AR_b_list: list[npt.NDArray[np.bool]] def fn_none_i(a: None, /) -> npt.NDArray[Any]: ... def fn_ar_i(a: npt.NDArray[np.float64], posarg: int, /) -> npt.NDArray[Any]: ... -np.average(AR_m) # type: ignore[arg-type] -np.select(1, [AR_f8]) # type: ignore[arg-type] -np.angle(AR_m) # type: ignore[arg-type] -np.unwrap(AR_m) # type: ignore[arg-type] -np.unwrap(AR_c16) # type: ignore[arg-type] +np.average(AR_m) # type: ignore[type-var] +np.select(1, [AR_f8]) # type: ignore[call-overload] +np.angle(AR_m) # type: ignore[type-var] +np.unwrap(AR_m) # type: ignore[type-var] +np.unwrap(AR_c16) # type: ignore[type-var] np.trim_zeros(1) # type: ignore[arg-type] np.place(1, [True], 1.5) # type: ignore[arg-type] np.vectorize(1) # type: ignore[arg-type] np.place(AR_f8, slice(None), 5) # type: ignore[arg-type] -np.piecewise(AR_f8, True, [fn_ar_i], 42) # type: ignore[call-overload] -# TODO: enable these once mypy actually supports ParamSpec (released in 2021) -# NOTE: pyright correctly reports errors for these (`reportCallIssue`) -# np.piecewise(AR_f8, AR_b_list, [fn_none_i]) # type: ignore[call-overload]s -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i]) # type: ignore[call-overload] -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 3.14) # type: ignore[call-overload] -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, None) # type: ignore[call-overload] -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, _=None) # type: ignore[call-overload] +np.piecewise(AR_f8, True, [fn_ar_i], "wrong") # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_none_i]) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i]) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 3.14) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, None) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, _=None) # type: ignore[list-item] np.interp(AR_f8, AR_c16, AR_f8) # type: ignore[arg-type] np.interp(AR_c16, AR_f8, AR_f8) # type: ignore[arg-type] np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # type: ignore[call-overload] np.interp(AR_f8, AR_f8, AR_O) # type: ignore[arg-type] -np.cov(AR_m) # type: ignore[arg-type] -np.cov(AR_O) # type: ignore[arg-type] -np.corrcoef(AR_m) # type: ignore[arg-type] -np.corrcoef(AR_O) # type: ignore[arg-type] +np.cov(AR_m) # type: ignore[type-var] +np.cov(AR_O) # type: ignore[type-var] +np.corrcoef(AR_m) # type: ignore[type-var] +np.corrcoef(AR_O) # type: ignore[type-var] np.corrcoef(AR_f8, bias=True) # type: ignore[call-overload] np.corrcoef(AR_f8, ddof=2) # type: ignore[call-overload] np.blackman(1j) # type: ignore[arg-type] @@ -49,8 +47,8 @@ np.hanning(1j) # type: ignore[arg-type] np.hamming(1j) # type: ignore[arg-type] np.hamming(AR_c16) # type: ignore[arg-type] np.kaiser(1j, 1) # type: ignore[arg-type] -np.sinc(AR_O) # type: ignore[arg-type] -np.median(AR_M) # type: ignore[arg-type] +np.sinc(AR_O) # type: ignore[type-var] +np.median(AR_M) # type: ignore[type-var] np.percentile(AR_f8, 50j) # type: ignore[call-overload] np.percentile(AR_f8, 50, interpolation="bob") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 06096f5a7749..345635d06327 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -1,20 +1,26 @@ from collections.abc import Callable from fractions import Fraction -from typing import Any, assert_type +from typing import Any, LiteralString, assert_type, type_check_only import numpy as np import numpy.typing as npt -vectorized_func: np.vectorize - f8: np.float64 +AR_LIKE_b: list[bool] +AR_LIKE_i8: list[int] AR_LIKE_f8: list[float] AR_LIKE_c16: list[complex] AR_LIKE_O: list[Fraction] +AR_u1: npt.NDArray[np.uint8] AR_i8: npt.NDArray[np.int64] +AR_f2: npt.NDArray[np.float16] +AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_f10: npt.NDArray[np.longdouble] +AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] +AR_c20: npt.NDArray[np.clongdouble] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] @@ -22,21 +28,26 @@ AR_b: npt.NDArray[np.bool] AR_U: npt.NDArray[np.str_] CHAR_AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +AR_c16_1d: np.ndarray[tuple[int], np.dtype[np.complex128]] + AR_b_list: list[npt.NDArray[np.bool]] -def func( - a: npt.NDArray[Any], - posarg: bool = ..., - /, - arg: int = ..., - *, - kwarg: str = ..., -) -> npt.NDArray[Any]: ... +@type_check_only +def func(a: np.ndarray, posarg: bool = ..., /, arg: int = ..., *, kwarg: str = ...) -> np.ndarray: ... +@type_check_only +def func_f8(a: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: ... +### + +# vectorize +vectorized_func: np.vectorize assert_type(vectorized_func.pyfunc, Callable[..., Any]) assert_type(vectorized_func.cache, bool) -assert_type(vectorized_func.signature, str | None) -assert_type(vectorized_func.otypes, str | None) +assert_type(vectorized_func.signature, LiteralString | None) +assert_type(vectorized_func.otypes, LiteralString | None) assert_type(vectorized_func.excluded, set[int | str]) assert_type(vectorized_func.__doc__, str | None) assert_type(vectorized_func([1]), Any) @@ -46,137 +57,270 @@ assert_type( np.vectorize, ) +# rot90 +assert_type(np.rot90(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.rot90(AR_f8, k=2), npt.NDArray[np.float64]) -assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), npt.NDArray[Any]) +assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), np.ndarray) -assert_type(np.flip(f8), np.float64) -assert_type(np.flip(1.0), Any) +# flip +assert_type(np.flip(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.flip(AR_f8, axis=(0, 1)), npt.NDArray[np.float64]) -assert_type(np.flip(AR_LIKE_f8, axis=0), npt.NDArray[Any]) +assert_type(np.flip(AR_LIKE_f8, axis=0), np.ndarray) +# iterable assert_type(np.iterable(1), bool) assert_type(np.iterable([1]), bool) -assert_type(np.average(AR_f8), np.floating) -assert_type(np.average(AR_f8, weights=AR_c16), np.complexfloating) +# average +assert_type(np.average(AR_f8_2d), np.float64) +assert_type(np.average(AR_f8_2d, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.average(AR_f8), np.float64) +assert_type(np.average(AR_f8, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8, returned=True), tuple[np.float64, np.float64]) +assert_type(np.average(AR_f8, axis=1, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_f8, keepdims=True, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_LIKE_f8), np.float64) +assert_type(np.average(AR_LIKE_f8, weights=AR_f8), np.float64) +assert_type(np.average(AR_LIKE_f8, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_LIKE_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.average(AR_LIKE_f8, returned=True), tuple[np.float64, np.float64]) +assert_type(np.average(AR_LIKE_f8, axis=1, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_LIKE_f8, keepdims=True, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(np.average(AR_O), Any) -assert_type(np.average(AR_f8, returned=True), tuple[np.floating, np.floating]) -assert_type(np.average(AR_f8, weights=AR_c16, returned=True), tuple[np.complexfloating, np.complexfloating]) +assert_type(np.average(AR_O, axis=1), np.ndarray) +assert_type(np.average(AR_O, keepdims=True), np.ndarray) assert_type(np.average(AR_O, returned=True), tuple[Any, Any]) -assert_type(np.average(AR_f8, axis=0), Any) -assert_type(np.average(AR_f8, axis=0, returned=True), tuple[Any, Any]) +assert_type(np.average(AR_O, axis=1, returned=True), tuple[np.ndarray, np.ndarray]) +assert_type(np.average(AR_O, keepdims=True, returned=True), tuple[np.ndarray, np.ndarray]) +# asarray_chkfinite +assert_type(np.asarray_chkfinite(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.asarray_chkfinite(AR_f8), npt.NDArray[np.float64]) -assert_type(np.asarray_chkfinite(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.asarray_chkfinite(AR_LIKE_f8), np.ndarray) assert_type(np.asarray_chkfinite(AR_f8, dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.asarray_chkfinite(AR_f8, dtype=float), npt.NDArray[Any]) +assert_type(np.asarray_chkfinite(AR_f8, dtype=float), np.ndarray) +# piecewise +assert_type(np.piecewise(AR_f8_1d, AR_b, [func]), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.piecewise(AR_f8, AR_b, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b, [func_f8]), npt.NDArray[np.float64]) assert_type(np.piecewise(AR_f8, AR_b_list, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func_f8]), npt.NDArray[np.float64]) assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=""), npt.NDArray[np.float64]) assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=""), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), npt.NDArray[Any]) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), np.ndarray) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func_f8]), npt.NDArray[np.float64]) -assert_type(np.select([AR_f8], [AR_f8]), npt.NDArray[Any]) +# extract +assert_type(np.extract(AR_i8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.extract(AR_i8, AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.extract(AR_i8, AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.extract(AR_i8, AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.extract(AR_i8, AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) -assert_type(np.copy(AR_LIKE_f8), npt.NDArray[Any]) +# select +assert_type(np.select([AR_b], [AR_f8_1d]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.select([AR_b], [AR_f8]), npt.NDArray[np.float64]) + +# places +assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) + +# copy +assert_type(np.copy(AR_LIKE_f8), np.ndarray) assert_type(np.copy(AR_U), npt.NDArray[np.str_]) -assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright correctly infers `NDArray[str_]` assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) +# pyright correctly infers `NDArray[str_]` here +assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright: ignore[reportAssertTypeFailure] -assert_type(np.gradient(AR_f8, axis=None), Any) -assert_type(np.gradient(AR_LIKE_f8, edge_order=2), Any) - -assert_type(np.diff("bob", n=0), str) -assert_type(np.diff(AR_f8, axis=0), npt.NDArray[Any]) -assert_type(np.diff(AR_LIKE_f8, prepend=1.5), npt.NDArray[Any]) - +# gradient +assert_type(np.gradient(AR_f8_1d, 1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type( + np.gradient(AR_f8_2d, [1, 2], [2, 3.5, 4]), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.gradient(AR_f8_3d), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + ], +) +assert_type(np.gradient(AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]] | Any) +assert_type(np.gradient(AR_LIKE_f8, edge_order=2), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.gradient(AR_LIKE_c16, axis=0), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# diff +assert_type(np.diff("git", n=0), str) +assert_type(np.diff(AR_f8), npt.NDArray[np.float64]) +assert_type(np.diff(AR_f8_1d, axis=0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.diff(AR_f8_2d, axis=0), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.diff(AR_LIKE_f8, prepend=1.5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.diff(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.diff(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.diff(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# interp assert_type(np.interp(1, [1], AR_f8), np.float64) assert_type(np.interp(1, [1], [1]), np.float64) assert_type(np.interp(1, [1], AR_c16), np.complex128) -assert_type(np.interp(1, [1], [1j]), np.complex128) # pyright correctly infers `complex128 | float64` -assert_type(np.interp([1], [1], AR_f8), npt.NDArray[np.float64]) -assert_type(np.interp([1], [1], [1]), npt.NDArray[np.float64]) -assert_type(np.interp([1], [1], AR_c16), npt.NDArray[np.complex128]) -assert_type(np.interp([1], [1], [1j]), npt.NDArray[np.complex128]) # pyright correctly infers `NDArray[complex128 | float64]` - -assert_type(np.angle(f8), np.floating) -assert_type(np.angle(AR_f8), npt.NDArray[np.floating]) -assert_type(np.angle(AR_c16, deg=True), npt.NDArray[np.floating]) -assert_type(np.angle(AR_O), npt.NDArray[np.object_]) - -assert_type(np.unwrap(AR_f8), npt.NDArray[np.floating]) +assert_type(np.interp(1, [1], [1j]), np.complex128) +assert_type(np.interp([1], [1], AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.interp([1], [1], [1]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.interp([1], [1], AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.interp([1], [1], [1j]), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# angle +assert_type(np.angle(1), np.float64) +assert_type(np.angle(1, deg=True), np.float64) +assert_type(np.angle(1j), np.float64) +assert_type(np.angle(f8), np.float64) +assert_type(np.angle(AR_b), npt.NDArray[np.float64]) +assert_type(np.angle(AR_u1), npt.NDArray[np.float64]) +assert_type(np.angle(AR_i8), npt.NDArray[np.float64]) +assert_type(np.angle(AR_f2), npt.NDArray[np.float16]) +assert_type(np.angle(AR_f4), npt.NDArray[np.float32]) +assert_type(np.angle(AR_c8), npt.NDArray[np.float32]) +assert_type(np.angle(AR_f8), npt.NDArray[np.float64]) +assert_type(np.angle(AR_c16), npt.NDArray[np.float64]) +assert_type(np.angle(AR_f10), npt.NDArray[np.longdouble]) +assert_type(np.angle(AR_c20), npt.NDArray[np.longdouble]) +assert_type(np.angle(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# unwrap +assert_type(np.unwrap(AR_f2), npt.NDArray[np.float16]) +assert_type(np.unwrap(AR_f8), npt.NDArray[np.float64]) +assert_type(np.unwrap(AR_f10), npt.NDArray[np.longdouble]) assert_type(np.unwrap(AR_O), npt.NDArray[np.object_]) - -assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complexfloating]) - +assert_type(np.unwrap(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_f8_3d), np.ndarray[tuple[int, int, int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# sort_complex +assert_type(np.sort_complex(AR_u1), npt.NDArray[np.complex64]) +assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.sort_complex(AR_f10), npt.NDArray[np.clongdouble]) +assert_type(np.sort_complex(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.sort_complex(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# trim_zeros assert_type(np.trim_zeros(AR_f8), npt.NDArray[np.float64]) assert_type(np.trim_zeros(AR_LIKE_f8), list[float]) -assert_type(np.extract(AR_i8, AR_f8), npt.NDArray[np.float64]) -assert_type(np.extract(AR_i8, AR_LIKE_f8), npt.NDArray[Any]) - -assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) - -assert_type(np.cov(AR_f8, bias=True), npt.NDArray[np.floating]) -assert_type(np.cov(AR_f8, AR_c16, ddof=1), npt.NDArray[np.complexfloating]) -assert_type(np.cov(AR_f8, aweights=AR_f8, dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.cov(AR_f8, fweights=AR_f8, dtype=float), npt.NDArray[Any]) - -assert_type(np.corrcoef(AR_f8, rowvar=True), npt.NDArray[np.floating]) -assert_type(np.corrcoef(AR_f8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.corrcoef(AR_f8, dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.corrcoef(AR_f8, dtype=float), npt.NDArray[Any]) - -assert_type(np.blackman(5), npt.NDArray[np.floating]) -assert_type(np.bartlett(6), npt.NDArray[np.floating]) -assert_type(np.hanning(4.5), npt.NDArray[np.floating]) -assert_type(np.hamming(0), npt.NDArray[np.floating]) -assert_type(np.i0(AR_i8), npt.NDArray[np.floating]) -assert_type(np.kaiser(4, 5.9), npt.NDArray[np.floating]) - -assert_type(np.sinc(1.0), np.floating) -assert_type(np.sinc(1j), np.complexfloating) -assert_type(np.sinc(AR_f8), npt.NDArray[np.floating]) -assert_type(np.sinc(AR_c16), npt.NDArray[np.complexfloating]) - -assert_type(np.median(AR_f8, keepdims=False), np.floating) -assert_type(np.median(AR_c16, overwrite_input=True), np.complexfloating) +# cov +assert_type(np.cov(AR_f8_1d), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.cov(AR_f8_2d), npt.NDArray[np.float64]) +assert_type(np.cov(AR_f8), npt.NDArray[np.float64]) +assert_type(np.cov(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.cov(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.cov(AR_LIKE_f8), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.cov(AR_LIKE_f8, dtype=np.float16), np.ndarray[tuple[()], np.dtype[np.float16]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8, dtype=np.float32), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.cov(AR_f8, AR_f8, dtype=float), np.ndarray[tuple[int, int]]) +assert_type(np.cov(AR_LIKE_f8, dtype=float), np.ndarray[tuple[()]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8, dtype=float), np.ndarray[tuple[int, int]]) + +# corrcoef +assert_type(np.corrcoef(AR_f8_1d), np.float64) +assert_type(np.corrcoef(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]] | np.float64) +assert_type(np.corrcoef(AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]] | np.float64) +assert_type(np.corrcoef(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.corrcoef(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.corrcoef(AR_LIKE_f8), np.float64) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.corrcoef(AR_LIKE_f8, dtype=np.float16), np.float16) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8, dtype=np.float32), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.corrcoef(AR_f8, AR_f8, dtype=float), np.ndarray[tuple[int, int]]) +assert_type(np.corrcoef(AR_LIKE_f8, dtype=float), Any) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8, dtype=float), np.ndarray[tuple[int, int]]) + +# window functions +assert_type(np.blackman(5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.bartlett(6), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.hanning(4.5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.hamming(0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.kaiser(4, 5.9), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# i0 (bessel function) +assert_type(np.i0(AR_i8), npt.NDArray[np.float64]) + +# sinc (cardinal sine function) +assert_type(np.sinc(1.0), np.float64) +assert_type(np.sinc(1j), np.complex128 | Any) +assert_type(np.sinc(AR_f8), npt.NDArray[np.float64]) +assert_type(np.sinc(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.sinc(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.sinc(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# median +assert_type(np.median(AR_f8, keepdims=False), np.float64) +assert_type(np.median(AR_c16, overwrite_input=True), np.complex128) assert_type(np.median(AR_m), np.timedelta64) assert_type(np.median(AR_O), Any) -assert_type(np.median(AR_f8, keepdims=True), Any) -assert_type(np.median(AR_c16, axis=0), Any) +assert_type(np.median(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.median(AR_f8, axis=0), npt.NDArray[np.float64]) +assert_type(np.median(AR_c16, keepdims=True), npt.NDArray[np.complex128]) +assert_type(np.median(AR_c16, axis=0), npt.NDArray[np.complex128]) +assert_type(np.median(AR_LIKE_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.median(AR_LIKE_c16, keepdims=True), npt.NDArray[np.complex128]) assert_type(np.median(AR_LIKE_f8, out=AR_c16), npt.NDArray[np.complex128]) -assert_type(np.percentile(AR_f8, 50), np.floating) -assert_type(np.percentile(AR_c16, 50), np.complexfloating) +# percentile +assert_type(np.percentile(AR_f8, 50), np.float64) +assert_type(np.percentile(AR_f8, 50, axis=1), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, axis=(1, 0)), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_c16, 50), np.complex128) assert_type(np.percentile(AR_m, 50), np.timedelta64) assert_type(np.percentile(AR_M, 50, overwrite_input=True), np.datetime64) assert_type(np.percentile(AR_O, 50), Any) -assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.floating]) -assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complexfloating]) +assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, [50], axis=1), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, [50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complex128]) assert_type(np.percentile(AR_m, [50]), npt.NDArray[np.timedelta64]) assert_type(np.percentile(AR_M, [50], method="nearest"), npt.NDArray[np.datetime64]) assert_type(np.percentile(AR_O, [50]), npt.NDArray[np.object_]) -assert_type(np.percentile(AR_f8, [50], keepdims=True), Any) -assert_type(np.percentile(AR_f8, [50], axis=[1]), Any) +assert_type(np.percentile(AR_f8, [50], keepdims=True), npt.NDArray[np.float64]) assert_type(np.percentile(AR_f8, [50], out=AR_c16), npt.NDArray[np.complex128]) -assert_type(np.quantile(AR_f8, 0.5), np.floating) -assert_type(np.quantile(AR_c16, 0.5), np.complexfloating) -assert_type(np.quantile(AR_m, 0.5), np.timedelta64) -assert_type(np.quantile(AR_M, 0.5, overwrite_input=True), np.datetime64) -assert_type(np.quantile(AR_O, 0.5), Any) -assert_type(np.quantile(AR_f8, [0.5]), npt.NDArray[np.floating]) -assert_type(np.quantile(AR_c16, [0.5]), npt.NDArray[np.complexfloating]) -assert_type(np.quantile(AR_m, [0.5]), npt.NDArray[np.timedelta64]) -assert_type(np.quantile(AR_M, [0.5], method="nearest"), npt.NDArray[np.datetime64]) -assert_type(np.quantile(AR_O, [0.5]), npt.NDArray[np.object_]) -assert_type(np.quantile(AR_f8, [0.5], keepdims=True), Any) -assert_type(np.quantile(AR_f8, [0.5], axis=[1]), Any) -assert_type(np.quantile(AR_f8, [0.5], out=AR_c16), npt.NDArray[np.complex128]) - +# quantile +assert_type(np.quantile(AR_f8, 0.50), np.float64) +assert_type(np.quantile(AR_f8, 0.50, axis=1), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, axis=(1, 0)), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_c16, 0.50), np.complex128) +assert_type(np.quantile(AR_m, 0.50), np.timedelta64) +assert_type(np.quantile(AR_M, 0.50, overwrite_input=True), np.datetime64) +assert_type(np.quantile(AR_O, 0.50), Any) +assert_type(np.quantile(AR_f8, [0.50]), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], axis=1), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_c16, [0.50]), npt.NDArray[np.complex128]) +assert_type(np.quantile(AR_m, [0.50]), npt.NDArray[np.timedelta64]) +assert_type(np.quantile(AR_M, [0.50], method="nearest"), npt.NDArray[np.datetime64]) +assert_type(np.quantile(AR_O, [0.50]), npt.NDArray[np.object_]) +assert_type(np.quantile(AR_f8, [0.50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], out=AR_c16), npt.NDArray[np.complex128]) + +# trapezoid assert_type(np.trapezoid(AR_LIKE_f8), np.float64) assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_f8), np.float64) assert_type(np.trapezoid(AR_LIKE_c16), np.complex128) @@ -189,25 +333,77 @@ assert_type(np.trapezoid(AR_f8, AR_f8), np.float64 | npt.NDArray[np.float64]) assert_type(np.trapezoid(AR_c16), np.complex128 | npt.NDArray[np.complex128]) assert_type(np.trapezoid(AR_c16, AR_c16), np.complex128 | npt.NDArray[np.complex128]) assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) -assert_type(np.trapezoid(AR_O), float | npt.NDArray[np.object_]) -assert_type(np.trapezoid(AR_O, AR_LIKE_f8), float | npt.NDArray[np.object_]) +assert_type(np.trapezoid(AR_O), npt.NDArray[np.object_] | Any) +assert_type(np.trapezoid(AR_O, AR_LIKE_f8), npt.NDArray[np.object_] | Any) +# meshgrid assert_type(np.meshgrid(), tuple[()]) -assert_type(np.meshgrid(AR_c16, indexing="ij"), tuple[npt.NDArray[np.complex128]]) -assert_type(np.meshgrid(AR_i8, AR_f8, copy=False), tuple[npt.NDArray[np.int64], npt.NDArray[np.float64]]) -assert_type(np.meshgrid(AR_LIKE_f8, AR_f8), tuple[npt.NDArray[Any], npt.NDArray[np.float64]]) -assert_type(np.meshgrid(AR_LIKE_f8, AR_i8, AR_c16), tuple[npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.meshgrid(*AR_LIKE_f8), tuple[npt.NDArray[Any], ...]) +assert_type( + np.meshgrid(AR_f8), + tuple[ + np.ndarray[tuple[int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_c16, indexing="ij"), + tuple[ + np.ndarray[tuple[int], np.dtype[np.complex128]], + ], +) +assert_type( + np.meshgrid(AR_i8, AR_f8, copy=False), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.int64]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_LIKE_f8, AR_f8), + tuple[ + np.ndarray[tuple[int, int]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_f8, AR_LIKE_f8), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int]], + ], +) +assert_type( + np.meshgrid(AR_LIKE_f8, AR_LIKE_f8), + tuple[ + np.ndarray[tuple[int, int]], + np.ndarray[tuple[int, int]], + ], +) +assert_type( + np.meshgrid(AR_f8, AR_i8, AR_c16), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.int64]], + np.ndarray[tuple[int, int, int], np.dtype[np.complex128]], + ], +) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_f8), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_LIKE_f8), tuple[np.ndarray, ...]) +assert_type(np.meshgrid(*AR_LIKE_f8), tuple[np.ndarray, ...]) -assert_type(np.delete(AR_f8, np.s_[:5]), npt.NDArray[np.float64]) -assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), npt.NDArray[Any]) +# delete +assert_type(np.delete(AR_f8, np.s_[:5]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), np.ndarray) -assert_type(np.insert(AR_f8, np.s_[:5], 5), npt.NDArray[np.float64]) -assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), npt.NDArray[Any]) +# insert +assert_type(np.insert(AR_f8, np.s_[:5], 5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), np.ndarray) -assert_type(np.append(AR_f8, 5), npt.NDArray[Any]) -assert_type(np.append(AR_LIKE_f8, 1j, axis=0), npt.NDArray[Any]) +# append +assert_type(np.append(f8, f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.append(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.append(AR_LIKE_f8, AR_LIKE_c16, axis=0), np.ndarray) +assert_type(np.append(AR_f8, AR_LIKE_f8, axis=0), np.ndarray) +# digitize assert_type(np.digitize(4.5, [1]), np.intp) assert_type(np.digitize(AR_f8, [1, 2, 3]), npt.NDArray[np.intp]) From 3169a79ea90ccd40989dbbbcdeced337620da7b3 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 24 Nov 2025 17:34:16 +0100 Subject: [PATCH 0902/1018] TYP: fix shape-type of structured array fields (#30261) * TYP: fix shape-type of structed array fields * TYP: structured array field shape-type test --- numpy/__init__.pyi | 6 +++--- numpy/typing/tests/data/reveal/ndarray_assignability.pyi | 7 ++++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 7d91e3d384de..b607c54aa425 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2197,10 +2197,10 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... @overload def __getitem__(self, key: _ToIndices, /) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload # can be of any shape + def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co | _AnyShape]: ... @overload - def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co, np.dtype]: ... - @overload - def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co, _dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co | _AnyShape, dtype[void]]: ... @overload # flexible | object_ | bool def __setitem__( diff --git a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi index d754a94003d3..feaccf28f578 100644 --- a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi @@ -1,4 +1,4 @@ -from typing import Protocol, TypeAlias, TypeVar, assert_type +from typing import Any, Protocol, TypeAlias, TypeVar, assert_type import numpy as np from numpy._typing import _64Bit @@ -33,6 +33,7 @@ _LongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longdouble]] _Complex64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex64]] _Complex128_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] _CLongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.clongdouble]] +_Void_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.void]] b1_1d: _Bool_1d u1_1d: _UInt8_1d @@ -44,6 +45,7 @@ g_1d: _LongDouble_1d c8_1d: _Complex64_1d c16_1d: _Complex128_1d G_1d: _CLongDouble_1d +V_1d: _Void_1d assert_type(do_abs(b1_1d), _Bool_1d) assert_type(do_abs(u1_1d), _UInt8_1d) @@ -75,3 +77,6 @@ assert_type(do_pos(i2_1d), _Int16_1d) assert_type(do_pos(q_1d), _LongLong_1d) assert_type(do_pos(f4_1d), _Float32_1d) assert_type(do_pos(c16_1d), _Complex128_1d) + +# this shape is effectively equivalent to `tuple[int, *tuple[Any, ...]]`, i.e. ndim >= 1 +assert_type(V_1d["field"], np.ndarray[tuple[int] | tuple[Any, ...]]) From cd352ddc8a00a869df136d8def51c3cb630b5d09 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 24 Nov 2025 17:37:35 +0100 Subject: [PATCH 0903/1018] DEP, TYP: ``ndarray.shape`` setter pending deprecation (#30282) * DEP, TYP: ``ndarray.shape`` setter pending deprecation * DOC: release note for the pending deprecation of `ndarray.shape` --- doc/release/upcoming_changes/30282.deprecation.rst | 5 +++++ numpy/__init__.pyi | 7 +++++++ 2 files changed, 12 insertions(+) create mode 100644 doc/release/upcoming_changes/30282.deprecation.rst diff --git a/doc/release/upcoming_changes/30282.deprecation.rst b/doc/release/upcoming_changes/30282.deprecation.rst new file mode 100644 index 000000000000..e9aac9ae17d5 --- /dev/null +++ b/doc/release/upcoming_changes/30282.deprecation.rst @@ -0,0 +1,5 @@ +in-place modification of ``ndarray.shape`` is pending deprecation +----------------------------------------------------------------- +Setting the `ndarray.shape` attribute directly will be deprecated in a future release. +Instead of modifying the shape in place, it is recommended to use the `numpy.reshape` function. +Static type checkers might already report a warning for assignments to `ndarray.shape`. diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b607c54aa425..977ad37d80af 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2251,15 +2251,22 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @property def ctypes(self) -> _ctypes[int]: ... + + # @property def shape(self) -> _ShapeT_co: ... @shape.setter + @deprecated("In-place shape modification will be deprecated in NumPy 2.5.", category=PendingDeprecationWarning) def shape(self, value: _ShapeLike) -> None: ... + + # @property def strides(self) -> _Shape: ... @strides.setter @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") def strides(self, value: _ShapeLike) -> None: ... + + # def byteswap(self, inplace: builtins.bool = ...) -> Self: ... @property def flat(self) -> flatiter[Self]: ... From 41163fd996084ad9fde73db50865101698944e4b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 18:07:05 +0000 Subject: [PATCH 0904/1018] MAINT: Bump github/codeql-action from 4.31.4 to 4.31.5 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.4 to 4.31.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/e12f0178983d466f2f6028f5cc7a6d786fd97f4b...fdbfb4d2750291e159f0156def62b853c2798ca2) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index ae31d4f33040..c758d05e43ab 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v4.31.4 + uses: github/codeql-action/init@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v4.31.4 + uses: github/codeql-action/autobuild@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v4.31.4 + uses: github/codeql-action/analyze@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 5b5a78c59029..d8daf8779d92 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v2.1.27 + uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v2.1.27 with: sarif_file: results.sarif From c293cee5dd52a087824dc61aa02c97c70d4f7d2c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 24 Nov 2025 20:37:59 +0100 Subject: [PATCH 0905/1018] TYP: ``_core.overrides.set_module`` implicit re-export (#30289) This fixes mypy errors in e.g. `numpy/fft/_helper.py`, which imports `set_module` from `numpy._core.overrides`. --- numpy/_core/overrides.pyi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/overrides.pyi b/numpy/_core/overrides.pyi index 91d624203e81..6ef52566d782 100644 --- a/numpy/_core/overrides.pyi +++ b/numpy/_core/overrides.pyi @@ -1,6 +1,8 @@ from collections.abc import Callable, Iterable from typing import Any, Final, NamedTuple, ParamSpec, TypeAlias, TypeVar +from numpy._utils import set_module as set_module + _T = TypeVar("_T") _Tss = ParamSpec("_Tss") _FuncLikeT = TypeVar("_FuncLikeT", bound=type | Callable[..., object]) From 4a24fc4eb8681e69f2910f0bfc1e929be0453ad0 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 24 Nov 2025 20:39:18 +0100 Subject: [PATCH 0906/1018] TYP: move the ``normalize_axis_*`` function definitions from ``lib`` to ``_core`` (#30290) --- numpy/_core/multiarray.pyi | 4 +++- numpy/_core/numeric.pyi | 9 +++++++-- numpy/lib/_array_utils_impl.pyi | 22 +++------------------- 3 files changed, 13 insertions(+), 22 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index ee8a9c4f67d8..0293d193cbc4 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -90,7 +90,6 @@ from numpy._typing._ufunc import ( _PyFunc_Nin2_Nout1, _PyFunc_Nin3P_Nout1, ) -from numpy.lib._array_utils_impl import normalize_axis_index __all__ = [ "_ARRAY_API", @@ -538,6 +537,9 @@ def unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = "C" @overload def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[NDArray[intp], ...]: ... +# +def normalize_axis_index(axis: int, ndim: int, msg_prefix: str | None = None) -> int: ... + # NOTE: Allow any sequence of array-like objects @overload def concatenate( diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 9be6000f0a8a..4ad2881cf17e 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1,6 +1,6 @@ from _typeshed import Incomplete from builtins import bool as py_bool -from collections.abc import Callable, Sequence +from collections.abc import Callable, Iterable, Sequence from typing import ( Any, Final, @@ -53,7 +53,6 @@ from numpy._typing import ( _SupportsArrayFunc, _SupportsDType, ) -from numpy.lib._array_utils_impl import normalize_axis_tuple as normalize_axis_tuple from ._asarray import require from ._ufunc_config import ( @@ -1087,6 +1086,12 @@ def roll(a: ArrayLike, shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDA # def rollaxis(a: _ArrayT, axis: int, start: int = 0) -> _ArrayT: ... def moveaxis(a: _ArrayT, source: _ShapeLike, destination: _ShapeLike) -> _ArrayT: ... +def normalize_axis_tuple( + axis: int | Iterable[int], + ndim: int, + argname: str | None = None, + allow_duplicate: py_bool | None = False, +) -> tuple[int, ...]: ... # @overload # 0d, dtype=int (default), sparse=False (default) diff --git a/numpy/lib/_array_utils_impl.pyi b/numpy/lib/_array_utils_impl.pyi index 1b290829caf4..e33507a127c9 100644 --- a/numpy/lib/_array_utils_impl.pyi +++ b/numpy/lib/_array_utils_impl.pyi @@ -1,8 +1,5 @@ -from collections.abc import Iterable -from typing import Any - -from numpy import generic -from numpy.typing import NDArray +import numpy as np +from numpy._core.numeric import normalize_axis_index, normalize_axis_tuple __all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] @@ -10,17 +7,4 @@ __all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] # implementing the `__array_interface__` protocol. The caveat is # that certain keys, marked as optional in the spec, must be present for # `byte_bounds`. This concerns `"strides"` and `"data"`. -def byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ... - -def normalize_axis_tuple( - axis: int | Iterable[int], - ndim: int, - argname: str | None = None, - allow_duplicate: bool | None = False, -) -> tuple[int, ...]: ... - -def normalize_axis_index( - axis: int = ..., - ndim: int = ..., - msg_prefix: str | None = ..., -) -> int: ... +def byte_bounds(a: np.generic | np.ndarray) -> tuple[int, int]: ... From 4329d425d9caa88d9eb00f2c0e07abf30041d871 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 25 Nov 2025 00:39:38 +0100 Subject: [PATCH 0907/1018] TYP: ``lib._function_base_impl._quantile_ureduce_func`` inline annotation fix (#30291) --- numpy/lib/_function_base_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index f37f69bcb5dd..09ab5e91a33c 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4627,7 +4627,7 @@ def _inverted_cdf(n, quantiles): def _quantile_ureduce_func( a: np.ndarray, q: np.ndarray, - weights: np.ndarray, + weights: np.ndarray | None, axis: int | None = None, out: np.ndarray | None = None, overwrite_input: bool = False, From 786f92cc7297d55f6d5d365c734f747d98d6897c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 25 Nov 2025 00:41:13 +0100 Subject: [PATCH 0908/1018] TYP: move ``vectorize`` stubs to ``lib._function_base_impl`` (#30293) --- numpy/__init__.pyi | 22 +--------------------- numpy/lib/_function_base_impl.pyi | 23 ++++++++++++++++++++++- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 977ad37d80af..e19b130d73b6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -468,6 +468,7 @@ from numpy.lib._function_base_impl import ( # type: ignore[deprecated] append, interp, quantile, + vectorize, ) from numpy.lib._histograms_impl import ( @@ -6104,27 +6105,6 @@ class memmap(ndarray[_ShapeT_co, _DTypeT_co]): ) -> Any: ... def flush(self) -> None: ... -# TODO: Add a mypy plugin for managing functions whose output type is dependent -# on the literal value of some sort of signature (e.g. `einsum` and `vectorize`) -class vectorize: - pyfunc: Callable[..., Any] - cache: builtins.bool - signature: LiteralString | None - otypes: LiteralString | None - excluded: set[int | str] - __doc__: str | None - def __init__( - self, - /, - pyfunc: Callable[..., Any] | _NoValueType = ..., # = _NoValue - otypes: str | Iterable[DTypeLike] | None = None, - doc: str | None = None, - excluded: Iterable[int | str] | None = None, - cache: builtins.bool = False, - signature: str | None = None, - ) -> None: ... - def __call__(self, *args: Any, **kwargs: Any) -> Any: ... - class poly1d: @property def variable(self) -> LiteralString: ... diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 48d3743983b3..d68918560b69 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -16,7 +16,7 @@ from typing import ( from typing_extensions import TypeIs, TypeVar import numpy as np -from numpy import _OrderKACF, vectorize +from numpy import _OrderKACF from numpy._core.multiarray import bincount from numpy._globals import _NoValueType from numpy._typing import ( @@ -190,6 +190,27 @@ class _SizedIterable(Protocol[_T_co]): ### +class vectorize: + __doc__: str | None + __module__: L["numpy"] = "numpy" + pyfunc: Callable[..., Incomplete] + cache: bool + signature: str | None + otypes: str | None + excluded: set[int | str] + + def __init__( + self, + /, + pyfunc: Callable[..., Incomplete] | _NoValueType = ..., # = _NoValue + otypes: str | Iterable[DTypeLike] | None = None, + doc: str | None = None, + excluded: Iterable[int | str] | None = None, + cache: bool = False, + signature: str | None = None, + ) -> None: ... + def __call__(self, /, *args: Incomplete, **kwargs: Incomplete) -> Incomplete: ... + @overload def rot90(m: _ArrayT, k: int = 1, axes: tuple[int, int] = (0, 1)) -> _ArrayT: ... @overload From 863d93a5195344005a5b9a9bdf8f609c3fcf7eb6 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 25 Nov 2025 00:44:34 +0100 Subject: [PATCH 0909/1018] TYP: `_core.*`: stubs for some private functions and constants (#30294) --- numpy/_core/shape_base.pyi | 4 ++++ numpy/_core/umath.pyi | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index a0fbefce0b3c..b87cae8a5f0f 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -15,6 +15,7 @@ __all__ = [ "vstack", ] +_T = TypeVar("_T") _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT1 = TypeVar("_ScalarT1", bound=generic) _ScalarT2 = TypeVar("_ScalarT2", bound=generic) @@ -64,6 +65,9 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... +# used by numpy.lib._shape_base_impl +def _arrays_for_stack_dispatcher(arrays: Sequence[_T]) -> tuple[_T, ...]: ... + # keep in sync with `numpy.ma.extras.vstack` @overload def vstack( diff --git a/numpy/_core/umath.pyi b/numpy/_core/umath.pyi index d9f0d384cf6d..d3b662d79d66 100644 --- a/numpy/_core/umath.pyi +++ b/numpy/_core/umath.pyi @@ -1,3 +1,9 @@ +import contextvars +from _typeshed import SupportsWrite +from collections.abc import Callable +from typing import Any, Final, Literal, TypeAlias, TypedDict, Unpack, type_check_only +from typing_extensions import CapsuleType + from numpy import ( absolute, add, @@ -195,3 +201,30 @@ __all__ = [ "vecdot", "vecmat", ] + +### + +_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] +_ErrCall: TypeAlias = Callable[[str, int], Any] | SupportsWrite[str] + +@type_check_only +class _ExtOjbDict(TypedDict, total=False): + divide: _ErrKind + over: _ErrKind + under: _ErrKind + invalid: _ErrKind + call: _ErrCall | None + bufsize: int + +# re-exports from `_core._multiarray_umath` that are used by `_core._ufunc_config` + +NAN: Final[float] = float("nan") +PINF: Final[float] = float("+inf") +NINF: Final[float] = float("-inf") +PZERO: Final[float] = +0.0 +NZERO: Final[float] = -0.0 +_UFUNC_API: Final[CapsuleType] = ... +_extobj_contextvar: Final[contextvars.ContextVar[CapsuleType]] = ... + +def _get_extobj_dict() -> _ExtOjbDict: ... +def _make_extobj(*, all: _ErrKind = ..., **kwargs: Unpack[_ExtOjbDict]) -> CapsuleType: ... From ee10fbb6138caf3b724e65f570c64807c2120e4b Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 25 Nov 2025 02:16:05 +0100 Subject: [PATCH 0910/1018] MAINT: remove ``lib._shape_base_impl._replace_zero_by_x_arrays`` --- numpy/lib/_shape_base_impl.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index d1e55a48d711..c9e0fd316e04 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -730,15 +730,6 @@ def dstack(tup): return _nx.concatenate(arrs, 2) -def _replace_zero_by_x_arrays(sub_arys): - for i in range(len(sub_arys)): - if _nx.ndim(sub_arys[i]) == 0: - sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) - elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): - sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) - return sub_arys - - def _array_split_dispatcher(ary, indices_or_sections, axis=None): return (ary, indices_or_sections) From 3747387359b127814ca43cede453478d195d9625 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 25 Nov 2025 02:43:48 +0100 Subject: [PATCH 0911/1018] TYP: ``lib.*``: stubs for some private functions used by ``_function_base_impl`` --- numpy/lib/_stride_tricks_impl.pyi | 7 +++---- numpy/lib/_utils_impl.pyi | 6 ++++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index 94651dff36ed..008f2d544414 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -67,8 +67,7 @@ def broadcast_to( ) -> NDArray[Any]: ... def broadcast_shapes(*args: _ShapeLike) -> _AnyShape: ... +def broadcast_arrays(*args: ArrayLike, subok: bool = False) -> tuple[NDArray[Any], ...]: ... -def broadcast_arrays( - *args: ArrayLike, - subok: bool = False, -) -> tuple[NDArray[Any], ...]: ... +# used internally by `lib._function_base_impl._parse_input_dimensions` +def _broadcast_shape(*args: ArrayLike) -> _AnyShape: ... diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index 7a34f273c423..e73ba659a31c 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -6,11 +6,17 @@ import numpy as np __all__ = ["get_include", "info", "show_runtime"] +_ScalarOrArrayT = TypeVar("_ScalarOrArrayT", bound=np.generic | np.ndarray) _DTypeT = TypeVar("_DTypeT", bound=np.dtype) +### + def get_include() -> LiteralString: ... def show_runtime() -> None: ... def info( object: object = None, maxwidth: int = 76, output: SupportsWrite[str] | None = None, toplevel: str = "numpy" ) -> None: ... def drop_metadata(dtype: _DTypeT, /) -> _DTypeT: ... + +# used internally by `lib._function_base_impl._median` +def _median_nancheck(data: np.ndarray, result: _ScalarOrArrayT, axis: int) -> _ScalarOrArrayT: ... From e4235ad415636c0ed986d57e0055f15fe7b942dc Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 25 Nov 2025 02:53:13 +0100 Subject: [PATCH 0912/1018] MAINT: ``broadcast_shapes``: update presumed ``NPY_MAXARGS`` from 32 to 64 --- numpy/lib/_stride_tricks_impl.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index f6f3b8370a72..98a79b325f66 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -449,14 +449,14 @@ def _broadcast_shape(*args): """ # use the old-iterator because np.nditer does not handle size 0 arrays # consistently - b = np.broadcast(*args[:32]) - # unfortunately, it cannot handle 32 or more arguments directly - for pos in range(32, len(args), 31): + b = np.broadcast(*args[:64]) + # unfortunately, it cannot handle 64 or more arguments directly + for pos in range(64, len(args), 63): # ironically, np.broadcast does not properly handle np.broadcast # objects (it treats them as scalars) # use broadcasting to avoid allocating the full array b = broadcast_to(0, b.shape) - b = np.broadcast(b, *args[pos:(pos + 31)]) + b = np.broadcast(b, *args[pos:(pos + 63)]) return b.shape @@ -567,7 +567,7 @@ def broadcast_arrays(*args, subok=False): [5, 5, 5]])] """ - # nditer is not used here to avoid the limit of 32 arrays. + # nditer is not used here to avoid the limit of 64 arrays. # Otherwise, something like the following one-liner would suffice: # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], # order='C').itviews From 304f5f15d899dd26f3f47d6348e5a9c9fb6db31a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Nov 2025 12:00:46 -0700 Subject: [PATCH 0913/1018] MAINT: Bump actions/setup-python from 6.0.0 to 6.1.0 (#30303) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 6.0.0 to 6.1.0. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/e797f83bcb11b83ae66e0230d6156d7c80228e7c...83679a892e2d95755f2dac6acb0bfd1e9ac5d548) --- updated-dependencies: - dependency-name: actions/setup-python dependency-version: 6.1.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/linux.yml | 16 ++++++++-------- .github/workflows/linux_blas.yml | 12 ++++++------ .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/windows.yml | 6 +++--- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 1d8cb119705b..9d1c3ac20a45 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -42,7 +42,7 @@ jobs: submodules: recursive fetch-depth: 0 persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - name: Install linter requirements @@ -73,7 +73,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: ${{ matrix.version }} - uses: ./.github/meson_actions @@ -88,7 +88,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: 'pypy3.11-v7.3.20' - name: Setup using scipy-openblas @@ -137,7 +137,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -222,7 +222,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - name: Install build and benchmarking dependencies @@ -261,7 +261,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - name: Install gfortran and setup OpenBLAS (sdist build) @@ -310,7 +310,7 @@ jobs: path: 'array-api-tests' persist-credentials: false - name: Set up Python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -339,7 +339,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 8dba9281443b..77cb9aaf91fe 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -74,7 +74,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -197,7 +197,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -225,7 +225,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -288,7 +288,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -352,7 +352,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -389,7 +389,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 6078e2f15378..dcc483eaf6df 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -63,7 +63,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - uses: ./.github/meson_actions @@ -81,7 +81,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -127,7 +127,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - name: Install dependencies @@ -175,7 +175,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "${{ matrix.BUILD_PROP[2] }}" - uses: ./.github/meson_actions @@ -190,7 +190,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -240,7 +240,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 76d87a5e5022..2787a7c81d3a 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -127,7 +127,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: ${{ matrix.version }} diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index dcdad4ff36ba..03b471b10c63 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -32,7 +32,7 @@ jobs: with: path: numpy_to_test fetch-depth: 0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.12" - name: Install dependencies diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 20584f60219f..a0604fa87aa9 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -32,7 +32,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.14t" @@ -90,7 +90,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' architecture: ${{ matrix.architecture }} @@ -146,7 +146,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: ${{ matrix.pyver }} From 1930db7b29d6a696fe72e2a6c0eb302607717340 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 25 Nov 2025 20:28:59 +0100 Subject: [PATCH 0914/1018] MAINT: add ``matmul`` to ``_core.umath.__all__`` (#30286) --- numpy/_core/umath.py | 4 ++-- numpy/_core/umath.pyi | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/_core/umath.py b/numpy/_core/umath.py index 94f97c059187..cc4b8a1238f0 100644 --- a/numpy/_core/umath.py +++ b/numpy/_core/umath.py @@ -53,8 +53,8 @@ 'heaviside', 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', - 'logical_or', 'logical_xor', 'matvec', 'maximum', 'minimum', 'mod', 'modf', - 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive', + 'logical_or', 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'mod', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', 'true_divide', 'trunc', 'vecdot', 'vecmat'] diff --git a/numpy/_core/umath.pyi b/numpy/_core/umath.pyi index d3b662d79d66..faf6fb545d5f 100644 --- a/numpy/_core/umath.pyi +++ b/numpy/_core/umath.pyi @@ -69,6 +69,7 @@ from numpy import ( logical_not, logical_or, logical_xor, + matmul, matvec, maximum, minimum, @@ -168,6 +169,7 @@ __all__ = [ "logical_not", "logical_or", "logical_xor", + "matmul", "matvec", "maximum", "minimum", From 2c2a0d3e905a2b97543988a2d90794cfa49ad583 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 25 Nov 2025 23:12:20 +0100 Subject: [PATCH 0915/1018] DEP: deprecate ``numpy.lib.user_array.container`` (#30284) * DEP: deprecate ``numpy.lib.user_array.container`` * DEP: add deprecation notice for ``numpy.lib.user_array.container`` * DEP: add deprecation comment with date to ``lib.user_array.container`` Co-authored-by: Charles Harris --------- Co-authored-by: Charles Harris --- doc/release/upcoming_changes/30284.deprecation.rst | 3 +++ numpy/lib/_user_array_impl.py | 11 +++++++++++ numpy/lib/_user_array_impl.pyi | 4 +++- numpy/typing/tests/data/pass/lib_user_array.py | 2 +- 4 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 doc/release/upcoming_changes/30284.deprecation.rst diff --git a/doc/release/upcoming_changes/30284.deprecation.rst b/doc/release/upcoming_changes/30284.deprecation.rst new file mode 100644 index 000000000000..8803f3225cd3 --- /dev/null +++ b/doc/release/upcoming_changes/30284.deprecation.rst @@ -0,0 +1,3 @@ +Deprecation of ``numpy.lib.user_array.container`` +------------------------------------------------- +The ``numpy.lib.user_array.container`` class is deprecated and will be removed in a future version. diff --git a/numpy/lib/_user_array_impl.py b/numpy/lib/_user_array_impl.py index f3a6c0f518be..2465f5f70b99 100644 --- a/numpy/lib/_user_array_impl.py +++ b/numpy/lib/_user_array_impl.py @@ -53,6 +53,17 @@ class container: astype """ + def __init_subclass__(cls) -> None: + # Deprecated in NumPy 2.4, 2025-11-24 + import warnings + + warnings.warn( + "The numpy.lib.user_array.container class is deprecated and will be " + "removed in a future version.", + DeprecationWarning, + stacklevel=2, + ) + def __init__(self, data, dtype=None, copy=True): self.array = array(data, dtype, copy=copy) diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index 0aeec42129af..7f364b495450 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -1,7 +1,7 @@ from _typeshed import Incomplete from types import EllipsisType from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload -from typing_extensions import TypeVar, override +from typing_extensions import TypeVar, deprecated, override import numpy as np import numpy.typing as npt @@ -37,7 +37,9 @@ _ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice _ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] ### +# pyright: reportDeprecated = false +@deprecated("The numpy.lib.user_array.container class is deprecated and will be removed in a future version.") class container(Generic[_ShapeT_co, _DTypeT_co]): array: np.ndarray[_ShapeT_co, _DTypeT_co] diff --git a/numpy/typing/tests/data/pass/lib_user_array.py b/numpy/typing/tests/data/pass/lib_user_array.py index 62b7e85d7ff1..f79dc38af508 100644 --- a/numpy/typing/tests/data/pass/lib_user_array.py +++ b/numpy/typing/tests/data/pass/lib_user_array.py @@ -3,7 +3,7 @@ from __future__ import annotations import numpy as np -from numpy.lib.user_array import container +from numpy.lib.user_array import container # type: ignore[deprecated] N = 10_000 W = H = int(N**0.5) From 4e6251ab4df4ca8bf6599a7fce02cbadb9945342 Mon Sep 17 00:00:00 2001 From: Rupesh <206439536+Rupeshhsharma@users.noreply.github.com> Date: Wed, 26 Nov 2025 21:56:42 +0530 Subject: [PATCH 0916/1018] BUG: Fix RecursionError and raise ValueError for unmatched parentheses (#30300) * Fix RecursionError and raise ValueError for unmatched parentheses * Fix RecursionError and raise ValueError for unmatched parentheses --- numpy/f2py/symbolic.py | 2 ++ numpy/f2py/tests/test_symbolic.py | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py index 409b84da531c..c768b3c470ed 100644 --- a/numpy/f2py/symbolic.py +++ b/numpy/f2py/symbolic.py @@ -1236,6 +1236,8 @@ def replace_parenthesis(s): i = mn_i j = s.find(right, i) + if j == -1: + raise ValueError(f'Mismatch of {left + right} parenthesis in {s!r}') while s.count(left, i + 1, j) != s.count(right, i + 1, j): j = s.find(right, j + 1) diff --git a/numpy/f2py/tests/test_symbolic.py b/numpy/f2py/tests/test_symbolic.py index ec23f522128b..fbf5abd9aa18 100644 --- a/numpy/f2py/tests/test_symbolic.py +++ b/numpy/f2py/tests/test_symbolic.py @@ -493,3 +493,8 @@ def test_polynomial_atoms(self): assert (y(x) + x).polynomial_atoms() == {y(x), x} assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]} assert (y(x)**x).polynomial_atoms() == {y(x)} + + def test_unmatched_parenthesis_gh30268(self): + #gh - 30268 + with pytest.raises(ValueError, match=r"Mismatch of \(\) parenthesis"): + Expr.parse("DATA (A, I=1, N", language=Language.Fortran) From 5306844284b1a4d365510b7fb04175dbd66e00ce Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 26 Nov 2025 10:42:38 -0700 Subject: [PATCH 0917/1018] MAINT: avoid unused variable warnings in dtype tests (#30310) --- numpy/_core/src/umath/_scaled_float_dtype.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index da842cd8c55d..020e903b5fc8 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -787,8 +787,7 @@ sfloat_stable_sort_loop( { assert(data[0] == data[1]); assert(strides[0] == sizeof(npy_float64) && strides[1] == sizeof(npy_float64)); - PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; - assert(parameters->flags == NPY_SORT_STABLE); + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_STABLE); npy_intp N = dimensions[0]; char *in = data[0]; @@ -807,8 +806,7 @@ sfloat_default_sort_loop( { assert(data[0] == data[1]); assert(strides[0] == sizeof(npy_float64) && strides[1] == sizeof(npy_float64)); - PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; - assert(parameters->flags == NPY_SORT_DEFAULT); + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_DEFAULT); npy_intp N = dimensions[0]; char *in = data[0]; @@ -874,8 +872,7 @@ sfloat_stable_argsort_loop( const npy_intp *strides, NpyAuxData *NPY_UNUSED(auxdata)) { - PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; - assert(parameters->flags == NPY_SORT_STABLE); + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_STABLE); assert(strides[0] == sizeof(npy_float64)); assert(strides[1] == sizeof(npy_intp)); @@ -895,8 +892,7 @@ sfloat_default_argsort_loop( const npy_intp *strides, NpyAuxData *NPY_UNUSED(auxdata)) { - PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; - assert(parameters->flags == NPY_SORT_DEFAULT); + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_DEFAULT); assert(strides[0] == sizeof(npy_float64)); assert(strides[1] == sizeof(npy_intp)); From 2b6c35db8712217158185fc58bf7d83892b1d967 Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Wed, 26 Nov 2025 13:06:27 -0500 Subject: [PATCH 0918/1018] ENH: New-style sorting for StringDType (#30266) * ENH: New-style sorting for StringDType * BUG: fix descriptor handling in stringdtype_sort_resolve_descriptors * REF: Cleanup braces and function order * REF: rename fill_sort_data_with_array to get_sort_data_from_array for clarity * MAINT: Move sort type selection one level down, reducing duplication. * MAINT: Combine two levels of indirection * REF: remove unnecessary view_offset assignment in stringdtype_sort_resolve_descriptors * REF: change function visibility to static for stringdtype sort functions --------- Co-authored-by: Marten Henric van Kerkwijk --- numpy/_core/src/common/npy_sort.h.src | 23 +++ numpy/_core/src/multiarray/item_selection.c | 18 +- .../_core/src/multiarray/stringdtype/dtype.c | 186 ++++++++++++++++++ numpy/_core/src/npysort/mergesort.cpp | 36 +++- numpy/_core/src/npysort/npysort_common.h | 14 ++ numpy/_core/src/npysort/quicksort.cpp | 30 ++- 6 files changed, 284 insertions(+), 23 deletions(-) diff --git a/numpy/_core/src/common/npy_sort.h.src b/numpy/_core/src/common/npy_sort.h.src index d6e4357225a8..1f82b07659f4 100644 --- a/numpy/_core/src/common/npy_sort.h.src +++ b/numpy/_core/src/common/npy_sort.h.src @@ -107,6 +107,29 @@ NPY_NO_EXPORT int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *ar NPY_NO_EXPORT int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); NPY_NO_EXPORT int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); + +/* + ***************************************************************************** + ** GENERIC SORT IMPLEMENTATIONS ** + ***************************************************************************** + */ + +typedef int (PyArray_SortImpl)(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +typedef int (PyArray_ArgSortImpl)(void *vv, npy_intp *tosort, npy_intp n, + void *varr, npy_intp elsize, + PyArray_CompareFunc *cmp); + +NPY_NO_EXPORT int npy_quicksort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +NPY_NO_EXPORT int npy_mergesort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +NPY_NO_EXPORT int npy_aquicksort_impl(void *vv, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +NPY_NO_EXPORT int npy_amergesort_impl(void *v, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); + + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index a50b8c49c3fa..f9d753f9e7ba 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -1226,8 +1226,8 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, if (N <= 1 || PyArray_SIZE(op) == 0) { return 0; } - - if (method_flags != NULL) { + + if (strided_loop != NULL) { needs_api = *method_flags & NPY_METH_REQUIRES_PYAPI; } else { @@ -1441,7 +1441,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, rstride = PyArray_STRIDE(rop, axis); needidxbuffer = rstride != sizeof(npy_intp); - if (method_flags != NULL) { + if (strided_loop != NULL) { needs_api = *method_flags & NPY_METH_REQUIRES_PYAPI; } else { @@ -3142,7 +3142,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) PyArrayMethod_Context context = {0}; PyArray_Descr *loop_descrs[2]; NpyAuxData *auxdata = NULL; - NPY_ARRAYMETHOD_FLAGS *method_flags = NULL; + NPY_ARRAYMETHOD_FLAGS method_flags = 0; PyArray_SortFunc **sort_table = NULL; PyArray_SortFunc *sort = NULL; @@ -3184,7 +3184,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; if (sort_method->get_strided_loop( - &context, 1, 0, strides, &strided_loop, &auxdata, method_flags) < 0) { + &context, 1, 0, strides, &strided_loop, &auxdata, &method_flags) < 0) { ret = -1; goto fail; } @@ -3229,7 +3229,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) } ret = _new_sortlike(op, axis, sort, strided_loop, - &context, auxdata, method_flags, NULL, NULL, 0); + &context, auxdata, &method_flags, NULL, NULL, 0); fail: if (sort_method != NULL) { @@ -3259,7 +3259,7 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) PyArrayMethod_Context context = {0}; PyArray_Descr *loop_descrs[2]; NpyAuxData *auxdata = NULL; - NPY_ARRAYMETHOD_FLAGS *method_flags = NULL; + NPY_ARRAYMETHOD_FLAGS method_flags = 0; PyArray_ArgSortFunc **argsort_table = NULL; PyArray_ArgSortFunc *argsort = NULL; @@ -3295,7 +3295,7 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; if (argsort_method->get_strided_loop( - &context, 1, 0, strides, &strided_loop, &auxdata, method_flags) < 0) { + &context, 1, 0, strides, &strided_loop, &auxdata, &method_flags) < 0) { ret = NULL; goto fail; } @@ -3346,7 +3346,7 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) } ret = _new_argsortlike(op2, axis, argsort, strided_loop, - &context, auxdata, method_flags, NULL, NULL, 0); + &context, auxdata, &method_flags, NULL, NULL, 0); Py_DECREF(op2); fail: diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index a06e7a1ed1b6..85514ef15df6 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -18,6 +18,7 @@ #include "conversion_utils.h" #include "npy_import.h" #include "multiarraymodule.h" +#include "npy_sort.h" /* * Internal helper to create new instances @@ -459,6 +460,7 @@ compare(void *a, void *b, void *arr) return ret; } +// We assume the allocator mutex is already held. int _compare(void *a, void *b, PyArray_StringDTypeObject *descr_a, PyArray_StringDTypeObject *descr_b) @@ -516,6 +518,17 @@ _compare(void *a, void *b, PyArray_StringDTypeObject *descr_a, return NpyString_cmp(&s_a, &s_b); } +int +_sort_compare(const void *a, const void *b, void *context) +{ + PyArrayMethod_Context *sort_context = (PyArrayMethod_Context *)context; + PyArray_StringDTypeObject *sdescr = + (PyArray_StringDTypeObject *)sort_context->descriptors[0]; + + int ret = _compare((void *)a, (void *)b, sdescr, sdescr); + return ret; +} + // PyArray_ArgFunc // The max element is the one with the highest unicode code point. int @@ -667,6 +680,121 @@ static PyType_Slot PyArray_StringDType_Slots[] = { {0, NULL}}; +/* + * Wrap the sort loop to acquire/release the string allocator, + * and pick the correct internal implementation. + */ +static int +stringdtype_wrap_sort_loop( + PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_StringDTypeObject *sdescr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + PyArray_SortImpl *sort_loop = + ((PyArrayMethod_SortParameters *)context->parameters)->flags + == NPY_SORT_STABLE ? &npy_mergesort_impl : &npy_quicksort_impl; + + npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); + int ret = sort_loop( + data[0], dimensions[0], context, + context->descriptors[0]->elsize, &_sort_compare); + NpyString_release_allocator(allocator); + return ret; +} + +/* + * This is currently required even though the default implementation would work, + * because the output, though enforced to be equal to the input, is parametric. + */ +static NPY_CASTING +stringdtype_sort_resolve_descriptors( + PyArrayMethodObject *method, + PyArray_DTypeMeta *const *dtypes, + PyArray_Descr *const *input_descrs, + PyArray_Descr **output_descrs, + npy_intp *view_offset) +{ + output_descrs[0] = NPY_DT_CALL_ensure_canonical(input_descrs[0]); + if (NPY_UNLIKELY(output_descrs[0] == NULL)) { + return -1; + } + output_descrs[1] = NPY_DT_CALL_ensure_canonical(input_descrs[1]); + if (NPY_UNLIKELY(output_descrs[1] == NULL)) { + Py_XDECREF(output_descrs[0]); + return -1; + } + + return method->casting; +} + +static int +stringdtype_wrap_argsort_loop( + PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_StringDTypeObject *sdescr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + PyArray_ArgSortImpl *argsort_loop = + ((PyArrayMethod_SortParameters *)context->parameters) + ->flags == NPY_SORT_STABLE ? &npy_amergesort_impl : &npy_aquicksort_impl; + + npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); + int ret = argsort_loop( + data[0], (npy_intp *)data[1], dimensions[0], context, + context->descriptors[0]->elsize, &_sort_compare); + NpyString_release_allocator(allocator); + return ret; +} + +static int +stringdtype_get_sort_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + *flags |= NPY_METH_NO_FLOATINGPOINT_ERRORS; + + if ((parameters->flags == NPY_SORT_STABLE) + || parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)stringdtype_wrap_sort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + +static int +stringdtype_get_argsort_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + *flags |= NPY_METH_NO_FLOATINGPOINT_ERRORS; + + if (parameters->flags == NPY_SORT_STABLE + || parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)stringdtype_wrap_argsort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + static PyObject * stringdtype_new(PyTypeObject *NPY_UNUSED(cls), PyObject *args, PyObject *kwds) { @@ -831,6 +959,60 @@ PyArray_DTypeMeta PyArray_StringDType = { /* rest, filled in during DTypeMeta initialization */ }; +NPY_NO_EXPORT int +init_stringdtype_sorts(void) +{ + PyArray_DTypeMeta *stringdtype = &PyArray_StringDType; + + PyArray_DTypeMeta *sort_dtypes[2] = {stringdtype, stringdtype}; + PyType_Slot sort_slots[3] = { + {NPY_METH_resolve_descriptors, &stringdtype_sort_resolve_descriptors}, + {NPY_METH_get_loop, &stringdtype_get_sort_loop}, + {0, NULL} + }; + PyArrayMethod_Spec sort_spec = { + .name = "stringdtype_sort", + .nin = 1, + .nout = 1, + .dtypes = sort_dtypes, + .slots = sort_slots, + .flags = NPY_METH_NO_FLOATINGPOINT_ERRORS, + }; + + PyBoundArrayMethodObject *sort_method = PyArrayMethod_FromSpec_int( + &sort_spec, 1); + if (sort_method == NULL) { + return -1; + } + NPY_DT_SLOTS(stringdtype)->sort_meth = sort_method->method; + Py_INCREF(sort_method->method); + Py_DECREF(sort_method); + + PyArray_DTypeMeta *argsort_dtypes[2] = {stringdtype, &PyArray_IntpDType}; + PyType_Slot argsort_slots[2] = { + {NPY_METH_get_loop, &stringdtype_get_argsort_loop}, + {0, NULL} + }; + PyArrayMethod_Spec argsort_spec = { + .name = "stringdtype_argsort", + .nin = 1, + .nout = 1, + .dtypes = argsort_dtypes, + .slots = argsort_slots, + .flags = NPY_METH_NO_FLOATINGPOINT_ERRORS, + }; + + PyBoundArrayMethodObject *argsort_method = PyArrayMethod_FromSpec_int( + &argsort_spec, 1); + if (argsort_method == NULL) { + return -1; + } + NPY_DT_SLOTS(stringdtype)->argsort_meth = argsort_method->method; + Py_INCREF(argsort_method->method); + Py_DECREF(argsort_method); + return 0; +} + NPY_NO_EXPORT int init_string_dtype(void) { @@ -876,6 +1058,10 @@ init_string_dtype(void) PyMem_Free(PyArray_StringDType_casts); + if (init_stringdtype_sorts() < 0) { + return -1; + } + return 0; } diff --git a/numpy/_core/src/npysort/mergesort.cpp b/numpy/_core/src/npysort/mergesort.cpp index 2fac0ccfafcd..1cfe04b1d266 100644 --- a/numpy/_core/src/npysort/mergesort.cpp +++ b/numpy/_core/src/npysort/mergesort.cpp @@ -337,7 +337,7 @@ string_amergesort_(type *v, npy_intp *tosort, npy_intp num, void *varr) static void npy_mergesort0(char *pl, char *pr, char *pw, char *vp, npy_intp elsize, - PyArray_CompareFunc *cmp, PyArrayObject *arr) + PyArray_CompareFunc *cmp, void *arr) { char *pi, *pj, *pk, *pm; @@ -383,9 +383,19 @@ npy_mergesort0(char *pl, char *pr, char *pw, char *vp, npy_intp elsize, NPY_NO_EXPORT int npy_mergesort(void *start, npy_intp num, void *varr) { - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; + void *arr = varr; + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(arr, &elsize, &cmp); + + return npy_mergesort_impl(start, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_mergesort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; char *pl = (char *)start; char *pr = pl + num * elsize; char *pw; @@ -413,7 +423,7 @@ npy_mergesort(void *start, npy_intp num, void *varr) static void npy_amergesort0(npy_intp *pl, npy_intp *pr, char *v, npy_intp *pw, - npy_intp elsize, PyArray_CompareFunc *cmp, PyArrayObject *arr) + npy_intp elsize, PyArray_CompareFunc *cmp, void *arr) { char *vp; npy_intp vi, *pi, *pj, *pk, *pm; @@ -459,9 +469,19 @@ npy_amergesort0(npy_intp *pl, npy_intp *pr, char *v, npy_intp *pw, NPY_NO_EXPORT int npy_amergesort(void *v, npy_intp *tosort, npy_intp num, void *varr) { - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; + void *arr = varr; + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(arr, &elsize, &cmp); + + return npy_amergesort_impl(v, tosort, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_amergesort_impl(void *v, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; npy_intp *pl, *pr, *pw; /* Items that have zero size don't make sense to sort */ diff --git a/numpy/_core/src/npysort/npysort_common.h b/numpy/_core/src/npysort/npysort_common.h index 8b7e0ef43f88..f2b99e3b7f66 100644 --- a/numpy/_core/src/npysort/npysort_common.h +++ b/numpy/_core/src/npysort/npysort_common.h @@ -40,6 +40,20 @@ extern "C" { /* Need this for the argsort functions */ #define INTP_SWAP(a,b) {npy_intp tmp = (b); (b)=(a); (a) = tmp;} +/* + ****************************************************************************** + ** SORTING WRAPPERS ** + ****************************************************************************** + */ + +static inline void +get_sort_data_from_array(void *varr, npy_intp *elsize, PyArray_CompareFunc **cmp) +{ + PyArrayObject *arr = (PyArrayObject *)varr; + *elsize = PyArray_ITEMSIZE(arr); + *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; +} + /* ***************************************************************************** ** COMPARISON FUNCTIONS ** diff --git a/numpy/_core/src/npysort/quicksort.cpp b/numpy/_core/src/npysort/quicksort.cpp index ddf4fce0c28b..2f5adde17b64 100644 --- a/numpy/_core/src/npysort/quicksort.cpp +++ b/numpy/_core/src/npysort/quicksort.cpp @@ -508,9 +508,18 @@ string_aquicksort_(type *vv, npy_intp *tosort, npy_intp num, void *varr) NPY_NO_EXPORT int npy_quicksort(void *start, npy_intp num, void *varr) { - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(varr, &elsize, &cmp); + + return npy_quicksort_impl(start, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_quicksort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; char *vp; char *pl = (char *)start; char *pr = pl + (num - 1) * elsize; @@ -612,10 +621,19 @@ npy_quicksort(void *start, npy_intp num, void *varr) NPY_NO_EXPORT int npy_aquicksort(void *vv, npy_intp *tosort, npy_intp num, void *varr) { + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(varr, &elsize, &cmp); + + return npy_aquicksort_impl(vv, tosort, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_aquicksort_impl(void *vv, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; char *v = (char *)vv; - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; char *vp; npy_intp *pl = tosort; npy_intp *pr = tosort + num - 1; From b8f76dd6b0adae1506db4e07e675413e74de1b7f Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Wed, 26 Nov 2025 19:26:52 -0800 Subject: [PATCH 0919/1018] DOC: record a data -> record a data point [skip azp][skip cirrus][skip actions] (#30313) --- doc/source/reference/maskedarray.generic.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst index 3fbe25d5b03c..4f53c6146b53 100644 --- a/doc/source/reference/maskedarray.generic.rst +++ b/doc/source/reference/maskedarray.generic.rst @@ -20,8 +20,8 @@ What is a masked array? ----------------------- In many circumstances, datasets can be incomplete or tainted by the presence -of invalid data. For example, a sensor may have failed to record a data, or -recorded an invalid value. The :mod:`numpy.ma` module provides a convenient +of invalid data. For example, a sensor may have failed to record a data point, +or recorded an invalid value. The :mod:`numpy.ma` module provides a convenient way to address this issue, by introducing masked arrays. A masked array is the combination of a standard :class:`numpy.ndarray` and a From e16fc01051e598807ad87c9df4683b7e9f42364e Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 27 Nov 2025 17:26:16 +0100 Subject: [PATCH 0920/1018] MAINT,API: Introduce __numpy_dtype__ and fix dtype attribute recursion (#30179) * MAINT,API: Introduce __numpy_dtype__ and fix dtype attribute recursion This is a lot more complex then I expected. We, correctly, deprecated `.dtype` attribute lookup recursion, however... The code still had a `try/except` that still recursed and that try except carried meaning at least in a weird path of: ``` class mydtype(np.void): dtype = ... ``` Now does that make sense? Maybe not... we also fall back to object in some paths which should have been broken when a dtype attribute existed on a type but it is a property/descriptor. So, this removes the recursion, but adds a check for `__get__` to filter out those cases, this is something we successfully did for other protocols `__array__`, `__array_interface__`, etc. Signed-off-by: Sebastian Berg * DOC: Add release note snippet * DOC: Make release note more precise, array-likes don't need the new attr --------- Signed-off-by: Sebastian Berg --- .../upcoming_changes/30179.new_feature.rst | 13 +++ doc/source/user/basics.interoperability.rst | 12 +++ numpy/_core/src/multiarray/descriptor.c | 82 +++++++++---------- numpy/_core/src/multiarray/descriptor.h | 3 - numpy/_core/src/multiarray/npy_static_data.c | 1 + numpy/_core/src/multiarray/npy_static_data.h | 1 + numpy/_core/src/multiarray/scalarapi.c | 36 +++++--- numpy/_core/tests/test_dtype.py | 57 ++++++++++--- 8 files changed, 137 insertions(+), 68 deletions(-) create mode 100644 doc/release/upcoming_changes/30179.new_feature.rst diff --git a/doc/release/upcoming_changes/30179.new_feature.rst b/doc/release/upcoming_changes/30179.new_feature.rst new file mode 100644 index 000000000000..e19815289351 --- /dev/null +++ b/doc/release/upcoming_changes/30179.new_feature.rst @@ -0,0 +1,13 @@ +New ``__numpy_dtype__`` protocol +-------------------------------- +NumPy now has a new ``__numpy_dtype__`` protocol. NumPy will check +for this attribute when converting to a NumPy dtype via ``np.dtype(obj)`` +or any ``dtype=`` argument. + +Downstream projects are encouraged to implement this for all dtype like +objects which may previously have used a ``.dtype`` attribute that returned +a NumPy dtype. +We expect to deprecate ``.dtype`` in the future to prevent interpreting +array-like objects with a ``.dtype`` attribute as a dtype. +If you wish you can implement ``__numpy_dtype__`` to ensure an earlier +warning or error (``.dtype`` is ignored if this is found). diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst index ca0c39d7081f..b1c115ff1de0 100644 --- a/doc/source/user/basics.interoperability.rst +++ b/doc/source/user/basics.interoperability.rst @@ -65,6 +65,18 @@ and outputs a NumPy ndarray (which is generally a view of the input object's dat buffer). The :ref:`dlpack:python-spec` page explains the ``__dlpack__`` protocol in detail. +``dtype`` interoperability +~~~~~~~~~~~~~~~~~~~~~~~~~~ +Similar to ``__array__()`` for array objects, defining ``__numpy_dtype__`` +allows a custom dtype object to be interoperable with NumPy. +The ``__numpy_dtype__`` must return a NumPy dtype instance (note that +``np.float64`` is not a dtype instance, ``np.dtype(np.float64)`` is). + +.. versionadded:: 2.4 + Before NumPy 2.4 a ``.dtype`` attribute was treated similarly. As of NumPy 2.4 + both is accepted and implementing ``__numpy_dtype__`` prevents ``.dtype`` + from being checked. + The array interface protocol ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 852a2c768948..37776f6a60cc 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -15,6 +15,7 @@ #include "npy_config.h" #include "npy_ctypes.h" #include "npy_import.h" +#include "npy_pycompat.h" // PyObject_GetOptionalAttr #include "_datetime.h" @@ -84,63 +85,54 @@ _try_convert_from_ctypes_type(PyTypeObject *type) } /* - * This function creates a dtype object when the object has a "dtype" attribute, - * and it can be converted to a dtype object. + * This function creates a dtype object when the object has a "__numpy_dtype__" + * or "dtype" attribute which must be valid NumPy dtype instance. * * Returns `Py_NotImplemented` if this is not possible. - * Currently the only failure mode for a NULL return is a RecursionError. */ static PyArray_Descr * _try_convert_from_dtype_attr(PyObject *obj) { + int used_dtype_attr = 0; /* For arbitrary objects that have a "dtype" attribute */ - PyObject *dtypedescr = PyObject_GetAttrString(obj, "dtype"); - if (dtypedescr == NULL) { + PyObject *attr; + int res = PyObject_GetOptionalAttr(obj, npy_interned_str.numpy_dtype, &attr); + if (res < 0) { + return NULL; + } + else if (res == 0) { /* - * This can be reached due to recursion limit being hit while fetching - * the attribute (tested for py3.7). This removes the custom message. + * When "__numpy_dtype__" does not exist, also check "dtype". This should + * be removed in the future. + * We do however support a weird `class myclass(np.void): dtype = ...` + * syntax. */ - goto fail; - } - - if (PyArray_DescrCheck(dtypedescr)) { - /* The dtype attribute is already a valid descriptor */ - return (PyArray_Descr *)dtypedescr; + used_dtype_attr = 1; + int res = PyObject_GetOptionalAttr(obj, npy_interned_str.dtype, &attr); + if (res < 0) { + return NULL; + } + else if (res == 0) { + Py_INCREF(Py_NotImplemented); + return (PyArray_Descr *)Py_NotImplemented; + } } - - if (Py_EnterRecursiveCall( - " while trying to convert the given data type from its " - "`.dtype` attribute.") != 0) { - Py_DECREF(dtypedescr); + if (!PyArray_DescrCheck(attr)) { + if (PyType_Check(obj) && PyObject_HasAttrString(attr, "__get__")) { + /* If the object has a __get__, assume this is a class property. */ + Py_DECREF(attr); + Py_INCREF(Py_NotImplemented); + return (PyArray_Descr *)Py_NotImplemented; + } + PyErr_Format(PyExc_ValueError, + "Could not convert %R to a NumPy dtype (via `.%S` value %R).", obj, + used_dtype_attr ? npy_interned_str.dtype : npy_interned_str.numpy_dtype, + attr); + Py_DECREF(attr); return NULL; } - - PyArray_Descr *newdescr = _convert_from_any(dtypedescr, 0); - Py_DECREF(dtypedescr); - Py_LeaveRecursiveCall(); - if (newdescr == NULL) { - goto fail; - } - - Py_DECREF(newdescr); - PyErr_SetString(PyExc_ValueError, "dtype attribute is not a valid dtype instance"); - return NULL; - - fail: - /* Ignore all but recursion errors, to give ctypes a full try. */ - if (!PyErr_ExceptionMatches(PyExc_RecursionError)) { - PyErr_Clear(); - Py_INCREF(Py_NotImplemented); - return (PyArray_Descr *)Py_NotImplemented; - } - return NULL; -} - -/* Expose to another file with a prefixed name */ -NPY_NO_EXPORT PyArray_Descr * -_arraydescr_try_convert_from_dtype_attr(PyObject *obj) -{ - return _try_convert_from_dtype_attr(obj); + /* The dtype attribute is already a valid descriptor */ + return (PyArray_Descr *)attr; } /* diff --git a/numpy/_core/src/multiarray/descriptor.h b/numpy/_core/src/multiarray/descriptor.h index 820e53f0c3e8..284afabe96fc 100644 --- a/numpy/_core/src/multiarray/descriptor.h +++ b/numpy/_core/src/multiarray/descriptor.h @@ -44,9 +44,6 @@ NPY_NO_EXPORT PyObject *arraydescr_protocol_descr_get( NPY_NO_EXPORT PyObject * array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args); -NPY_NO_EXPORT PyArray_Descr * -_arraydescr_try_convert_from_dtype_attr(PyObject *obj); - NPY_NO_EXPORT int is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype); diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index c7f4c51c6675..997c798c665d 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -38,6 +38,7 @@ intern_strings(void) INTERN_STRING(array_ufunc, "__array_ufunc__"); INTERN_STRING(array_wrap, "__array_wrap__"); INTERN_STRING(array_finalize, "__array_finalize__"); + INTERN_STRING(numpy_dtype, "__numpy_dtype__"); INTERN_STRING(implementation, "_implementation"); INTERN_STRING(axis1, "axis1"); INTERN_STRING(axis2, "axis2"); diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index 365673ffb0b4..f3d1135ec044 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -24,6 +24,7 @@ typedef struct npy_interned_str_struct { PyObject *array_wrap; PyObject *array_finalize; PyObject *array_ufunc; + PyObject *numpy_dtype; PyObject *implementation; PyObject *axis1; PyObject *axis2; diff --git a/numpy/_core/src/multiarray/scalarapi.c b/numpy/_core/src/multiarray/scalarapi.c index 7d65d972998d..a602e312727b 100644 --- a/numpy/_core/src/multiarray/scalarapi.c +++ b/numpy/_core/src/multiarray/scalarapi.c @@ -11,8 +11,7 @@ #include "numpy/npy_math.h" #include "npy_config.h" - - +#include "npy_pycompat.h" // PyObject_GetOptionalAttr #include "array_coercion.h" #include "ctors.h" @@ -343,17 +342,34 @@ PyArray_DescrFromTypeObject(PyObject *type) /* Do special thing for VOID sub-types */ if (PyType_IsSubtype((PyTypeObject *)type, &PyVoidArrType_Type)) { + PyObject *attr; + _PyArray_LegacyDescr *conv = NULL; + int res = PyObject_GetOptionalAttr(type, npy_interned_str.dtype, &attr); + if (res < 0) { + return NULL; // Should be a rather criticial error, so just fail. + } + if (res == 1) { + if (!PyArray_DescrCheck(attr)) { + if (PyObject_HasAttrString(attr, "__get__")) { + /* If the object has a __get__, assume this is a class property. */ + Py_DECREF(attr); + conv = NULL; + } + else { + PyErr_Format(PyExc_ValueError, + "`.dtype` attribute %R is not a valid dtype instance", + attr); + Py_DECREF(attr); + return NULL; + } + } + } + _PyArray_LegacyDescr *new = (_PyArray_LegacyDescr *)PyArray_DescrNewFromType(NPY_VOID); if (new == NULL) { return NULL; } - _PyArray_LegacyDescr *conv = (_PyArray_LegacyDescr *)( - _arraydescr_try_convert_from_dtype_attr(type)); - if (conv == NULL) { - Py_DECREF(new); - return NULL; - } - if ((PyObject *)conv != Py_NotImplemented && PyDataType_ISLEGACY(conv)) { + if (conv != NULL && PyDataType_ISLEGACY(conv)) { new->fields = conv->fields; Py_XINCREF(new->fields); new->names = conv->names; @@ -362,7 +378,7 @@ PyArray_DescrFromTypeObject(PyObject *type) new->subarray = conv->subarray; conv->subarray = NULL; } - Py_DECREF(conv); + Py_XDECREF(conv); Py_XDECREF(new->typeobj); new->typeobj = (PyTypeObject *)type; Py_INCREF(type); diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 974e8765be47..616e1fec02e3 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1582,19 +1582,19 @@ class dt: assert np.dtype(dt) == np.float64 assert np.dtype(dt()) == np.float64 - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") - def test_recursion(self): + def test_recursive(self): + # This used to recurse. It now doesn't, we enforce the + # dtype attribute to be a dtype (and will not recurse). class dt: pass dt.dtype = dt - with pytest.raises(RecursionError): + with pytest.raises(ValueError): np.dtype(dt) dt_instance = dt() dt_instance.dtype = dt - with pytest.raises(RecursionError): + with pytest.raises(ValueError): np.dtype(dt_instance) def test_void_subtype(self): @@ -1607,21 +1607,58 @@ class dt(np.void): np.dtype(dt) np.dtype(dt(1)) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") - def test_void_subtype_recursion(self): + def test_void_subtype_recursive(self): + # Used to recurse, but dtype is now enforced to be a dtype instance + # so that we do not recurse. class vdt(np.void): pass vdt.dtype = vdt - with pytest.raises(RecursionError): + with pytest.raises(ValueError): np.dtype(vdt) - with pytest.raises(RecursionError): + with pytest.raises(ValueError): np.dtype(vdt(1)) +class TestFromDTypeProtocol: + def test_simple(self): + class A: + dtype = np.dtype("f8") + + assert np.dtype(A()) == np.dtype(np.float64) + + def test_not_a_dtype(self): + # This also prevents coercion as a trivial path, although + # a custom error may be nicer. + class ArrayLike: + __numpy_dtype__ = None + dtype = np.dtype("f8") + + with pytest.raises(ValueError, match=".*__numpy_dtype__.*"): + np.dtype(ArrayLike()) + + def test_prevent_dtype_explicit(self): + class ArrayLike: + @property + def __numpy_dtype__(self): + raise RuntimeError("my error!") + + with pytest.raises(RuntimeError, match="my error!"): + np.dtype(ArrayLike()) + + def test_type_object(self): + class TypeWithProperty: + @property + def __numpy_dtype__(self): + raise RuntimeError("not reached") + + # Arbitrary types go to object currently, and the + # protocol doesn't prevent that. + assert np.dtype(TypeWithProperty) == object + + class TestDTypeClasses: @pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational]) def test_basic_dtypes_subclass_properties(self, dtype): From 91f0451b2353b18cf981e2dd842ef673318dd8c6 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 28 Nov 2025 10:13:38 -0500 Subject: [PATCH 0921/1018] MAINT: Implement some RAII classes and use them in stringdtype/casts.cpp (#30312) * MAINT: Implement some RAII classes and use them in stringdtype/casts.cpp * MAINT: Delete copy, move, etc. to avoid potential double-release by the RAII classes. * MAINT: Fix include guard name in raii_utils.hpp * MAINT: Fix a typo in raii_utils.hpp --- numpy/_core/src/common/raii_utils.hpp | 166 ++++++++++++++++++ .../src/multiarray/stringdtype/casts.cpp | 27 ++- 2 files changed, 176 insertions(+), 17 deletions(-) create mode 100644 numpy/_core/src/common/raii_utils.hpp diff --git a/numpy/_core/src/common/raii_utils.hpp b/numpy/_core/src/common/raii_utils.hpp new file mode 100644 index 000000000000..e92d0eae9269 --- /dev/null +++ b/numpy/_core/src/common/raii_utils.hpp @@ -0,0 +1,166 @@ +#ifndef NUMPY_CORE_SRC_COMMON_RAII_UTILS_HPP_ +#define NUMPY_CORE_SRC_COMMON_RAII_UTILS_HPP_ + +// +// Utilities for RAII management of resources. +// +// Another (and arguably clearer) name for this resource management pattern +// is "Scope-Bound Resource Management", but RAII is much more common, so we +// use the familiar acronym. +// + +#include + +// For npy_string_allocator, PyArray_StringDTypeObject, NPY_NO_EXPORT: +#include "numpy/ndarraytypes.h" + +// Forward declarations not currently in a header. +// XXX Where should these be moved? +NPY_NO_EXPORT npy_string_allocator * +NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr); +NPY_NO_EXPORT void +NpyString_release_allocator(npy_string_allocator *allocator); + + +namespace np { namespace raii { + +// +// RAII for PyGILState_* API. +// +// In C++ code, use this at the beginning of a scope, e.g. +// +// { +// np::raii::EnsureGIL ensure_gil{}; +// [code that uses the Python C API here] +// } +// +// instead of +// +// PyGILState_STATE gil_state = PyGILState_Ensure(); +// [code that uses the Python C API here] +// PyGILState_Release(gil_state); +// +// or +// NPY_ALLOW_C_API_DEF +// NPY_ALLOW_C_API +// [code that uses the Python C API here] +// NPY_DISABLE_C_API +// +// This ensures that PyGILState_Release(gil_state) is called, even if the +// wrapped code throws an exception or executes a return or a goto. +// +class EnsureGIL +{ + PyGILState_STATE gil_state; + +public: + + EnsureGIL() { + gil_state = PyGILState_Ensure(); + } + + ~EnsureGIL() { + PyGILState_Release(gil_state); + } + + EnsureGIL(const EnsureGIL&) = delete; + EnsureGIL(EnsureGIL&& other) = delete; + EnsureGIL& operator=(const EnsureGIL&) = delete; + EnsureGIL& operator=(EnsureGIL&&) = delete; +}; + + +// +// RAII for Python thread state. +// +// In C++ code, use this at the beginning of a scope, e.g. +// +// { +// np::raii::SaveThreadState save_thread_state{}; +// [code...] +// } +// +// instead of +// +// PyThreadState *thread_state = PyEval_SaveThread(); +// [code...] +// PyEval_RestoreThread(thread_state); +// +// or +// Py_BEGIN_ALLOW_THREADS +// [code...] +// Py_END_ALLOW_THREADS +// +// or +// NPY_BEGIN_THREADS_DEF +// NPY_BEGIN_THREADS +// [code...] +// NPY_END_THREADS +// +// This ensures that PyEval_RestoreThread(thread_state) is called, even +// if the wrapped code throws an exception or executes a return or a goto. +// +class SaveThreadState +{ + PyThreadState *thread_state; + +public: + + SaveThreadState() { + thread_state = PyEval_SaveThread(); + } + + ~SaveThreadState() { + PyEval_RestoreThread(thread_state); + } + + SaveThreadState(const SaveThreadState&) = delete; + SaveThreadState(SaveThreadState&& other) = delete; + SaveThreadState& operator=(const SaveThreadState&) = delete; + SaveThreadState& operator=(SaveThreadState&&) = delete; +}; + + +// +// RAII for npy_string_allocator. +// +// Instead of +// +// npy_string_allocator *allocator = NpyString_acquire_allocator(descr); +// [code that uses allocator] +// NpyString_release_allocator(allocator); +// +// use +// +// { +// np::raii::NpyStringAcquireAllocator alloc(descr); +// [code that uses alloc.allocator()] +// } +// +class NpyStringAcquireAllocator +{ + npy_string_allocator *_allocator; + +public: + + NpyStringAcquireAllocator(PyArray_StringDTypeObject *descr) { + _allocator = NpyString_acquire_allocator(descr); + } + + ~NpyStringAcquireAllocator() { + NpyString_release_allocator(_allocator); + } + + NpyStringAcquireAllocator(const NpyStringAcquireAllocator&) = delete; + NpyStringAcquireAllocator(NpyStringAcquireAllocator&& other) = delete; + NpyStringAcquireAllocator& operator=(const NpyStringAcquireAllocator&) = delete; + NpyStringAcquireAllocator& operator=(NpyStringAcquireAllocator&&) = delete; + + npy_string_allocator *allocator() { + return _allocator; + } +}; + +}} // namespace np { namespace raii { + +#endif diff --git a/numpy/_core/src/multiarray/stringdtype/casts.cpp b/numpy/_core/src/multiarray/stringdtype/casts.cpp index 3632e359c9a9..20ef3013bf86 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.cpp +++ b/numpy/_core/src/multiarray/stringdtype/casts.cpp @@ -18,6 +18,7 @@ #include "numpyos.h" #include "umathmodule.h" #include "gil_utils.h" +#include "raii_utils.hpp" #include "static_string.h" #include "dtypemeta.h" #include "dtype.h" @@ -1910,7 +1911,8 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], NpyAuxData *NPY_UNUSED(auxdata)) { PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)context->descriptors[0]; - npy_string_allocator *allocator = NpyString_acquire_allocator(descr); + np::raii::NpyStringAcquireAllocator alloc(descr); + int has_null = descr->na_object != NULL; int has_string_na = descr->has_string_na; const npy_static_string *default_string = &descr->default_string; @@ -1926,22 +1928,22 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], const npy_packed_static_string *ps = (npy_packed_static_string *)in; npy_static_string s = {0, NULL}; if (load_nullable_string(ps, &s, has_null, has_string_na, - default_string, na_name, allocator, + default_string, na_name, alloc.allocator(), "in string to bytes cast") == -1) { - goto fail; + return -1; } for (size_t i=0; i 127) { - NPY_ALLOW_C_API_DEF; - NPY_ALLOW_C_API; + np::raii::EnsureGIL ensure_gil{}; + PyObject *str = PyUnicode_FromStringAndSize(s.buf, s.size); if (str == NULL) { PyErr_SetString( PyExc_UnicodeEncodeError, "Invalid character encountered during unicode encoding." ); - goto fail; + return -1; } PyObject *exc = PyObject_CallFunction( @@ -1956,14 +1958,13 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], if (exc == NULL) { Py_DECREF(str); - goto fail; + return -1; } PyErr_SetObject(PyExceptionInstance_Class(exc), exc); Py_DECREF(exc); Py_DECREF(str); - NPY_DISABLE_C_API; - goto fail; + return -1; } } @@ -1976,15 +1977,7 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], out += out_stride; } - NpyString_release_allocator(allocator); - return 0; - -fail: - - NpyString_release_allocator(allocator); - - return -1; } static PyType_Slot s2bytes_slots[] = { From 51f39f3110713b0ae93d4799f3108cb7d11bf155 Mon Sep 17 00:00:00 2001 From: Faizan-Ul Huda <61704685+faizanhuda12@users.noreply.github.com> Date: Fri, 28 Nov 2025 11:12:46 -0500 Subject: [PATCH 0922/1018] fixed documentation issue (#30318) fixed an issue I found related to doucmentation --- doc/TESTS.rst | 2 -- doc/source/reference/index.rst | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/TESTS.rst b/doc/TESTS.rst index e57cf7711da8..803625e727ae 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -143,8 +143,6 @@ module called ``test_yyy.py``. If you only need to test one aspect of More often, we need to group a number of tests together, so we create a test class:: - import pytest - # import xxx symbols from numpy.xxx.yyy import zzz import pytest diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index 02e3248953fb..aa6c692d6b2b 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -61,6 +61,7 @@ Other topics thread_safety global_state security + testing distutils_status_migration distutils_guide swig From 9f0519e83247b8a1b46c12fb583d4d575d992bd7 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 28 Nov 2025 17:18:33 +0100 Subject: [PATCH 0923/1018] BUG: Fix descriptor changes related build/parse value issues and skip test on 32bit (#30314) * TST: Skip test that requires 64bit system more explicitly This test wasn't always skipped reliably in debian builds. * BUG: Fix large-pickle round-trip and hash * Use int64 for parsing, but not uint64 (backward-compat) I guess this means we can't use the top flag for a bit longer ;) --- numpy/_core/src/multiarray/descriptor.c | 26 ++++++++++++------------- numpy/_core/src/multiarray/hashdescr.c | 2 +- numpy/_core/tests/test_dtype.py | 12 +++++++++++- numpy/_core/tests/test_nditer.py | 2 ++ 4 files changed, 27 insertions(+), 15 deletions(-) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 37776f6a60cc..1fc5b76d1f00 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2866,13 +2866,13 @@ _descr_find_object(PyArray_Descr *self) static PyObject * arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) { - int elsize = -1, alignment = -1; + Py_ssize_t elsize = -1, alignment = -1; int version = 4; char endian; PyObject *endian_obj; PyObject *subarray, *fields, *names = NULL, *metadata=NULL; int incref_names = 1; - int int_dtypeflags = 0; + npy_int64 signed_dtypeflags = 0; npy_uint64 dtypeflags; if (!PyDataType_ISLEGACY(self)) { @@ -2891,24 +2891,24 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) } switch (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0))) { case 9: - if (!PyArg_ParseTuple(args, "(iOOOOiiiO):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOOnnkO):__setstate__", &version, &endian_obj, &subarray, &names, &fields, &elsize, - &alignment, &int_dtypeflags, &metadata)) { + &alignment, &signed_dtypeflags, &metadata)) { PyErr_Clear(); return NULL; } break; case 8: - if (!PyArg_ParseTuple(args, "(iOOOOiii):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOOnnk):__setstate__", &version, &endian_obj, &subarray, &names, &fields, &elsize, - &alignment, &int_dtypeflags)) { + &alignment, &signed_dtypeflags)) { return NULL; } break; case 7: - if (!PyArg_ParseTuple(args, "(iOOOOii):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOOnn):__setstate__", &version, &endian_obj, &subarray, &names, &fields, &elsize, &alignment)) { @@ -2916,7 +2916,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) } break; case 6: - if (!PyArg_ParseTuple(args, "(iOOOii):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOnn):__setstate__", &version, &endian_obj, &subarray, &fields, &elsize, &alignment)) { @@ -2925,7 +2925,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) break; case 5: version = 0; - if (!PyArg_ParseTuple(args, "(OOOii):__setstate__", + if (!PyArg_ParseTuple(args, "(OOOnn):__setstate__", &endian_obj, &subarray, &fields, &elsize, &alignment)) { return NULL; @@ -3173,12 +3173,12 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) * flags as an int even though it actually was a char in the PyArray_Descr * structure */ - if (int_dtypeflags < 0 && int_dtypeflags >= -128) { + if (signed_dtypeflags < 0 && signed_dtypeflags >= -128) { /* NumPy used to use a char. So normalize if signed. */ - int_dtypeflags += 128; + signed_dtypeflags += 128; } - dtypeflags = int_dtypeflags; - if (dtypeflags != int_dtypeflags) { + dtypeflags = (npy_uint64)signed_dtypeflags; + if (dtypeflags != signed_dtypeflags) { PyErr_Format(PyExc_ValueError, "incorrect value for flags variable (overflow)"); return NULL; diff --git a/numpy/_core/src/multiarray/hashdescr.c b/numpy/_core/src/multiarray/hashdescr.c index 0117ef218a81..853e247e0b74 100644 --- a/numpy/_core/src/multiarray/hashdescr.c +++ b/numpy/_core/src/multiarray/hashdescr.c @@ -79,7 +79,7 @@ static int _array_descr_builtin(PyArray_Descr* descr, PyObject *l) * For builtin type, hash relies on : kind + byteorder + flags + * type_num + elsize + alignment */ - t = Py_BuildValue("(cccii)", descr->kind, nbyteorder, + t = Py_BuildValue("(ccKnn)", descr->kind, nbyteorder, descr->flags, descr->elsize, descr->alignment); for(i = 0; i < PyTuple_Size(t); ++i) { diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 616e1fec02e3..8819262c4e21 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -18,6 +18,7 @@ from numpy._core._rational_tests import rational from numpy.testing import ( HAS_REFCOUNT, + IS_64BIT, IS_PYPY, IS_PYSTON, IS_WASM, @@ -1376,6 +1377,15 @@ def check_pickling(self, dtype): assert_equal(x, y) assert_equal(x[0], y[0]) + @pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") + @pytest.mark.xfail(reason="dtype conversion doesn't allow this yet.") + def test_pickling_large(self): + # The actual itemsize is larger than a c-integer here. + dtype = np.dtype(f"({2**31},)i") + self.check_pickling(dtype) + dtype = np.dtype(f"({2**31},)i", metadata={"a": "b"}) + self.check_pickling(dtype) + @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object, bool]) def test_builtin(self, t): @@ -1440,7 +1450,7 @@ def test_pickle_dtype(self, dt): for proto in range(pickle.HIGHEST_PROTOCOL + 1): roundtrip_dt = pickle.loads(pickle.dumps(dt, proto)) assert roundtrip_dt == dt - assert hash(dt) == pre_pickle_hash + assert hash(roundtrip_dt) == pre_pickle_hash class TestPromotion: diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 84a88d54548e..943c25cdaa13 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -12,6 +12,7 @@ from numpy import all, arange, array, nditer from numpy.testing import ( HAS_REFCOUNT, + IS_64BIT, IS_PYPY, IS_WASM, assert_, @@ -3404,6 +3405,7 @@ def test_arbitrary_number_of_ops_nested(): @pytest.mark.slow +@pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") @requires_memory(9 * np.iinfo(np.intc).max) @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_arbitrary_number_of_ops_error(): From 3a811fb9af105372da2d07d83e77ae085d51f54e Mon Sep 17 00:00:00 2001 From: Steven Hur <138725592+Jongwan93@users.noreply.github.com> Date: Fri, 28 Nov 2025 12:20:13 -0500 Subject: [PATCH 0924/1018] BUG: Fix misleading ValueError in convolve on empty inputs due to variable swapping (#30277) * BUG: Fix misleading ValueError in convolve on empty inputs Previously, numpy.convolve swapped arguments for optimization if the second argument was longer than the first. This occurred before checking for empty arrays. Consequently, if the first argument was empty, the swap would cause the error message to incorrectly report that the second argument was empty (e.g., "v cannot be empty"). --- numpy/_core/numeric.py | 4 ++-- numpy/_core/tests/test_numeric.py | 10 ++++++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index b050b81834c6..11527c9de442 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -893,12 +893,12 @@ def convolve(a, v, mode='full'): """ a, v = array(a, copy=None, ndmin=1), array(v, copy=None, ndmin=1) - if (len(v) > len(a)): - a, v = v, a if len(a) == 0: raise ValueError('a cannot be empty') if len(v) == 0: raise ValueError('v cannot be empty') + if len(v) > len(a): + a, v = v, a return multiarray.correlate(a, v[::-1], mode) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 2c5535a19c77..9e71b7c6b1b8 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -3686,6 +3686,16 @@ def test_mode(self): with assert_raises(TypeError): np.convolve(d, k, mode=None) + def test_convolve_empty_input_error_message(self): + """ + Test that convolve raises the correct error message when inputs are empty. + Regression test for gh-30272 (variable swapping bug). + """ + with pytest.raises(ValueError, match="a cannot be empty"): + np.convolve(np.array([]), np.array([1, 2])) + + with pytest.raises(ValueError, match="v cannot be empty"): + np.convolve(np.array([1, 2]), np.array([])) class TestArgwhere: From 0a5840644cfb0a1bbd4ce1dceddb8dc594e805e6 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 29 Nov 2025 01:18:53 +0100 Subject: [PATCH 0925/1018] TYP: ``__numpy_dtype__`` (#30321) * TYP: ``__numpy_dtype__`` * TYP: type-test for ``dtype`` construction with ``__numpy_dtype__`` --- numpy/_typing/__init__.py | 1 + numpy/_typing/_dtype_like.py | 15 +++++++++++---- numpy/lib/_index_tricks_impl.pyi | 4 ++-- numpy/typing/tests/data/reveal/dtype.pyi | 5 +++++ numpy/typing/tests/test_runtime.py | 5 ----- 5 files changed, 19 insertions(+), 11 deletions(-) diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 11ed6e62f1be..22eb6e3f30bb 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -98,6 +98,7 @@ _DTypeLikeTD64 as _DTypeLikeTD64, _DTypeLikeUInt as _DTypeLikeUInt, _DTypeLikeVoid as _DTypeLikeVoid, + _HasDType as _HasDType, _SupportsDType as _SupportsDType, _VoidDTypeLike as _VoidDTypeLike, ) diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index b44dd06e0d7f..34c4bd44f519 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -1,5 +1,5 @@ from collections.abc import Sequence # noqa: F811 -from typing import Any, Protocol, TypeAlias, TypedDict, TypeVar, runtime_checkable +from typing import Any, Protocol, TypeAlias, TypedDict, TypeVar import numpy as np @@ -19,6 +19,7 @@ ) _ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, covariant=True) _DTypeLikeNested: TypeAlias = Any # TODO: wait for support for recursive types @@ -40,13 +41,19 @@ class _DTypeDict(_DTypeDictBase, total=False): aligned: bool -# A protocol for anything with the dtype attribute -@runtime_checkable -class _SupportsDType(Protocol[_DTypeT_co]): +class _HasDType(Protocol[_DTypeT_co]): @property def dtype(self) -> _DTypeT_co: ... +class _HasNumPyDType(Protocol[_DTypeT_co]): + @property + def __numpy_dtype__(self, /) -> _DTypeT_co: ... + + +_SupportsDType: TypeAlias = _HasDType[_DTypeT] | _HasNumPyDType[_DTypeT] + + # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` _DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 570688fe8d62..aad3124445a3 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -24,9 +24,9 @@ from numpy._typing import ( _ArrayLike, _DTypeLike, _FiniteNestedSequence, + _HasDType, _NestedSequence, _SupportsArray, - _SupportsDType, ) __all__ = [ # noqa: RUF022 @@ -235,7 +235,7 @@ class IndexExpression(Generic[_BoolT_co]): def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ... @overload -def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ... +def ix_(*args: _FiniteNestedSequence[_HasDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ... @overload def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... @overload diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 1794c944b3ae..7f670b3be51c 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -125,3 +125,8 @@ assert_type(dtype_V["f0"], np.dtype) assert_type(dtype_V[0], np.dtype) assert_type(dtype_V[["f0", "f1"]], np.dtype[np.void]) assert_type(dtype_V[["f0"]], np.dtype[np.void]) + +class _D: + __numpy_dtype__: np.dtype[np.int8] + +assert_type(np.dtype(_D()), np.dtype[np.int8]) diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py index 8e49fda8185c..462fe4eabdc0 100644 --- a/numpy/typing/tests/test_runtime.py +++ b/numpy/typing/tests/test_runtime.py @@ -93,7 +93,6 @@ def test_keys() -> None: PROTOCOLS: dict[str, tuple[type[Any], object]] = { - "_SupportsDType": (_npt._SupportsDType, np.int64(1)), "_SupportsArray": (_npt._SupportsArray, np.arange(10)), "_SupportsArrayFunc": (_npt._SupportsArrayFunc, np.arange(10)), "_NestedSequence": (_npt._NestedSequence, [1]), @@ -107,9 +106,5 @@ def test_isinstance(self, cls: type[Any], obj: object) -> None: assert not isinstance(None, cls) def test_issubclass(self, cls: type[Any], obj: object) -> None: - if cls is _npt._SupportsDType: - pytest.xfail( - "Protocols with non-method members don't support issubclass()" - ) assert issubclass(type(obj), cls) assert not issubclass(type(None), cls) From 82a7312eea1e72b73b6a22b8828d9d2851b7d171 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 29 Nov 2025 01:30:17 +0100 Subject: [PATCH 0926/1018] TYP: ``ndenumerate`` generic type parameter default (#30324) --- numpy/lib/_index_tricks_impl.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index aad3124445a3..ff316f566993 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -53,7 +53,7 @@ _TupleT = TypeVar("_TupleT", bound=tuple[Any, ...]) _ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) _DTypeT = TypeVar("_DTypeT", bound=np.dtype) _ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) _BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True) _AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True) From 355a1ac4e39f431d13266c99549e72df34683419 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 29 Nov 2025 01:32:48 +0100 Subject: [PATCH 0927/1018] DOC, TYP: document the ``numpy.typing`` deprecations from 2.3 (#30325) --- numpy/typing/__init__.py | 34 +++++++++++++++++++++++++++++++++- numpy/typing/mypy_plugin.py | 5 +++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 163655bd7662..ef4c0885257b 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -104,13 +104,45 @@ >>> import numpy.typing as npt >>> T = TypeVar("T", bound=npt.NBitBase) - >>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]": + >>> def func(a: np.floating[T], b: np.floating[T]) -> np.floating[T]: ... ... Consequently, the likes of `~numpy.float16`, `~numpy.float32` and `~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to runtime, they're not necessarily considered as sub-classes. +.. deprecated:: 2.3 + The :class:`~numpy.typing.NBitBase` helper is deprecated and will be + removed in a future release. Prefer expressing precision relationships via + ``typing.overload`` or ``TypeVar`` definitions bounded by concrete scalar + classes. For example: + + .. code-block:: python + + from typing import TypeVar + import numpy as np + + S = TypeVar("S", bound=np.floating) + + def func(a: S, b: S) -> S: + ... + + or in the case of different input types mapping to different output types: + + .. code-block:: python + + from typing import overload + import numpy as np + + @overload + def phase(x: np.complex64) -> np.float32: ... + @overload + def phase(x: np.complex128) -> np.float64: ... + @overload + def phase(x: np.clongdouble) -> np.longdouble: ... + def phase(x: np.complexfloating) -> np.floating: + ... + Timedelta64 ~~~~~~~~~~~ diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index dc1e2564fc32..fb78eb077c44 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -18,6 +18,11 @@ .. versionadded:: 1.22 .. deprecated:: 2.3 + The :mod:`numpy.typing.mypy_plugin` entry-point is deprecated in favor of + platform-agnostic static type inference. Remove + ``numpy.typing.mypy_plugin`` from the ``plugins`` section of your mypy + configuration; if that surfaces new errors, please open an issue with a + minimal reproducer. Examples -------- From d0a303ea4d60bdb2edd028d86757867085f83250 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 29 Nov 2025 01:34:19 +0100 Subject: [PATCH 0928/1018] TYP: ``ma.mrecords.MaskedRecords`` generic type parameter defaults (#30326) --- numpy/ma/mrecords.pyi | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index d5cdf097bb84..737a34ebdb70 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -1,8 +1,10 @@ -from typing import Any, TypeVar +from typing import Any, Generic +from typing_extensions import TypeVar -from numpy import dtype +import numpy as np +from numpy._typing import _AnyShape -from . import MaskedArray +from .core import MaskedArray __all__ = [ "MaskedRecords", @@ -13,10 +15,10 @@ __all__ = [ "addfield", ] -_ShapeT_co = TypeVar("_ShapeT_co", covariant=True, bound=tuple[int, ...]) -_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, covariant=True) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co]): +class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DTypeT_co]): def __new__( cls, shape, From 250285f3cefd950e3a41e049a8733a3f6a9b920c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 29 Nov 2025 01:36:49 +0100 Subject: [PATCH 0929/1018] TYP: ``_core._umath_tests`` module stubs (#30327) --- numpy/_core/_umath_tests.pyi | 47 ++++++++++++++++++++++++++++++++++++ numpy/_core/meson.build | 5 ++-- tools/stubtest/allowlist.txt | 13 ++++++++-- 3 files changed, 61 insertions(+), 4 deletions(-) create mode 100644 numpy/_core/_umath_tests.pyi diff --git a/numpy/_core/_umath_tests.pyi b/numpy/_core/_umath_tests.pyi new file mode 100644 index 000000000000..696cec3b755e --- /dev/null +++ b/numpy/_core/_umath_tests.pyi @@ -0,0 +1,47 @@ +# undocumented internal testing module for ufunc features, defined in +# numpy/_core/src/umath/_umath_tests.c.src + +from typing import Final, Literal as L, TypedDict, type_check_only + +import numpy as np +from numpy._typing import _GUFunc_Nin2_Nout1, _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1 + +@type_check_only +class _TestDispatchResult(TypedDict): + func: str # e.g. 'func_AVX2' + var: str # e.g. 'var_AVX2' + func_xb: str # e.g. 'func_AVX2' + var_xb: str # e.g. 'var_AVX2' + all: list[str] # e.g. ['func_AVX2', 'func_SSE41', 'func'] + +### + +# undocumented +def test_signature( + nin: int, nout: int, signature: str, / +) -> tuple[ + L[0, 1], # core_enabled (0 for scalar ufunc; 1 for generalized ufunc) + tuple[int, ...] | None, # core_num_dims + tuple[int, ...] | None, # core_dim_ixs + tuple[int, ...] | None, # core_dim_flags + tuple[int, ...] | None, # core_dim_sizes +]: ... + +# undocumented +def test_dispatch() -> _TestDispatchResult: ... + +# undocumented ufuncs and gufuncs +always_error: Final[_UFunc_Nin2_Nout1[L["always_error"], L[1], None]] = ... +always_error_unary: Final[_UFunc_Nin1_Nout1[L["always_error_unary"], L[1], None]] = ... +always_error_gufunc: Final[_GUFunc_Nin2_Nout1[L["always_error_gufunc"], L[1], None, L["(i),()->()"]]] = ... +inner1d: Final[_GUFunc_Nin2_Nout1[L["inner1d"], L[2], None, L["(i),(i)->()"]]] = ... +innerwt: Final[np.ufunc] = ... # we have no specialized type for 3->1 gufuncs +matrix_multiply: Final[_GUFunc_Nin2_Nout1[L["matrix_multiply"], L[3], None, L["(m,n),(n,p)->(m,p)"]]] = ... +matmul: Final[_GUFunc_Nin2_Nout1[L["matmul"], L[3], None, L["(m?,n),(n,p?)->(m?,p?)"]]] = ... +euclidean_pdist: Final[_GUFunc_Nin2_Nout1[L["euclidean_pdist"], L[2], None, L["(n,d)->(p)"]]] = ... +cumsum: Final[np.ufunc] = ... # we have no specialized type for 1->1 gufuncs +inner1d_no_doc: Final[_GUFunc_Nin2_Nout1[L["inner1d_no_doc"], L[2], None, L["(i),(i)->()"]]] = ... +cross1d: Final[_GUFunc_Nin2_Nout1[L["cross1d"], L[2], None, L["(3),(3)->(3)"]]] = ... +_pickleable_module_global_ufunc: Final[np.ufunc] = ... # 0->0 ufunc; segfaults if called +indexed_negative: Final[_UFunc_Nin1_Nout1[L["indexed_negative"], L[0], L[0]]] = ... # ntypes=0; can't be called +conv1d_full: Final[_GUFunc_Nin2_Nout1[L["conv1d_full"], L[1], None, L["(m),(n)->(p)"]]] = ... diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 487a5a684f97..3a0b52c3b079 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -927,7 +927,7 @@ foreach gen_mtargets : [ 'loops_arithm_fp.dispatch.h', src_file.process('src/umath/loops_arithm_fp.dispatch.c.src'), [ - X86_V3, X86_V2, + X86_V3, X86_V2, ASIMD, NEON, VSX3, VSX2, VXE, VX, @@ -960,7 +960,7 @@ foreach gen_mtargets : [ 'loops_exponent_log.dispatch.h', src_file.process('src/umath/loops_exponent_log.dispatch.c.src'), [ - X86_V4, X86_V3, + X86_V4, X86_V3, ] ], [ @@ -1403,6 +1403,7 @@ python_sources = [ '_type_aliases.pyi', '_ufunc_config.py', '_ufunc_config.pyi', + '_umath_tests.pyi', 'arrayprint.py', 'arrayprint.pyi', 'cversions.py', diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index 2cfa30d3c0b1..30f4cd120cc9 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -59,7 +59,7 @@ numpy\.core\.shape_base.* numpy\.core\.umath.* numpy\.typing\.mypy_plugin -# ufuncs +# ufuncs, see https://github.com/python/mypy/issues/20223 numpy\.(\w+\.)*abs numpy\.(\w+\.)*absolute numpy\.(\w+\.)*acos @@ -177,7 +177,16 @@ numpy\.(\w+\.)*istitle numpy\.(\w+\.)*isupper numpy\.(\w+\.)*str_len numpy\._core\._methods\.umr_bitwise_count -numpy\._core\._methods\.umr_bitwise_count +numpy\._core\._umath_tests\.always_error +numpy\._core\._umath_tests\.always_error_gufunc +numpy\._core\._umath_tests\.always_error_unary +numpy\._core\._umath_tests\.conv1d_full +numpy\._core\._umath_tests\.cross1d +numpy\._core\._umath_tests\.euclidean_pdist +numpy\._core\._umath_tests\.indexed_negative +numpy\._core\._umath_tests\.inner1d +numpy\._core\._umath_tests\.inner1d_no_doc +numpy\._core\._umath_tests\.matrix_multiply numpy\.linalg\._umath_linalg\.qr_complete numpy\.linalg\._umath_linalg\.qr_reduced numpy\.linalg\._umath_linalg\.solve From afd1ccee30edda4526bbe9c323cc9280b5922fc0 Mon Sep 17 00:00:00 2001 From: Tontonio3 <81591435+Tontonio3@users.noreply.github.com> Date: Sun, 30 Nov 2025 10:57:08 -0500 Subject: [PATCH 0930/1018] BUG: quantile should error when weights are all zeros (#28595) * added err messages and tests * Modified tests and added release note * Fixed tests * Fixed bug to handle object dtypes * Fixed bug to handle object dtypes * Streamlined testing, improved error handling capabilities * Changed infinite error message * Bug fix * Fixed lint test * Improved testing * Changed error handling, made it faster, removed dtype=object special cases * More comprehensive testing * More comprehensive testing * Fixed lint * Fixed tests * Fixed CircleCI test * streamlined checks * lint fix * lint fix * Fix and simplify things (but don't bother with being overly specific) * Appease linter gods and also use one valid entry for other test * rename release note fragment --------- Co-authored-by: Sebastian Berg --- .../upcoming_changes/28595.improvement.rst | 7 ++++ numpy/lib/_function_base_impl.py | 3 ++ numpy/lib/tests/test_function_base.py | 32 +++++++++++++++++++ 3 files changed, 42 insertions(+) create mode 100644 doc/release/upcoming_changes/28595.improvement.rst diff --git a/doc/release/upcoming_changes/28595.improvement.rst b/doc/release/upcoming_changes/28595.improvement.rst new file mode 100644 index 000000000000..aea833f5179c --- /dev/null +++ b/doc/release/upcoming_changes/28595.improvement.rst @@ -0,0 +1,7 @@ +Improved error handling in `np.quantile` +---------------------------------------- +`np.quantile` now raises errors if: + +* All weights are zero +* At least one weight is `np.nan` +* At least one weight is `np.inf` \ No newline at end of file diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 09ab5e91a33c..87a834850fda 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4827,6 +4827,9 @@ def _quantile( # distribution function cdf cdf = weights.cumsum(axis=0, dtype=np.float64) cdf /= cdf[-1, ...] # normalization to 1 + if np.isnan(cdf[-1]).any(): + # Above calculations should normally warn for the zero/inf case. + raise ValueError("Weights included NaN, inf or were all zero.") # Search index i such that # sum(weights[j], j=0..i-1) < quantile <= sum(weights[j], j=0..i) # is then equivalent to diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 1aaca3bae1d6..5b8b0adbdd0d 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -4277,6 +4277,37 @@ def test_closest_observation(self): assert_equal(4, np.quantile(arr[0:9], q, method=m)) assert_equal(5, np.quantile(arr, q, method=m)) + @pytest.mark.parametrize("weights", + [[1, np.inf, 1, 1], [1, np.inf, 1, np.inf], [0, 0, 0, 0], + [np.finfo("float64").max] * 4]) + @pytest.mark.parametrize("dty", ["f8", "O"]) + def test_inf_zeroes_err(self, weights, dty): + m = "inverted_cdf" + q = 0.5 + arr = np.array([[1, 2, 3, 4]] * 2) + # Make one entry have bad weights and another good ones. + wgts = np.array([weights, [0.5] * 4], dtype=dty) + with pytest.raises(ValueError, + match=r"Weights included NaN, inf or were all zero"): + # We (currently) don't bother to check ahead so 0/0 or + # overflow to `inf` while summing weights, or `inf / inf` + # will all warn before the error is raised. + with np.errstate(all="ignore"): + a = np.quantile(arr, q, weights=wgts, method=m, axis=1) + + @pytest.mark.parametrize("weights", + [[1, np.nan, 1, 1], [1, np.nan, np.nan, 1]]) + @pytest.mark.parametrize(["err", "dty"], + [(ValueError, "f8"), ((RuntimeWarning, ValueError), "O")]) + def test_nan_err(self, err, dty, weights): + m = "inverted_cdf" + q = 0.5 + arr = np.array([[1, 2, 3, 4]] * 2) + # Make one entry have bad weights and another good ones. + wgts = np.array([weights, [0.5] * 4], dtype=dty) + with pytest.raises(err): + a = np.quantile(arr, q, weights=wgts, method=m) + def test_quantile_gh_29003_Fraction(self): r = np.quantile([1, 2], q=Fraction(1)) assert r == Fraction(2) @@ -4293,6 +4324,7 @@ def test_float16_gh_29003(self): assert value == q * 50_000 assert value.dtype == np.float16 + class TestLerp: @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, min_value=0, max_value=1), From 2de293aed2d8307db261faf4f5071f79376ccc85 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 30 Nov 2025 09:32:25 -0700 Subject: [PATCH 0931/1018] BEG, MAINT: Begin NumPy 2.5 development (#30333) - Create 2.5.0-notes.rst - Update release.rst - Update pyproject.toml - Update cversions.txt - Update numpyconfig.h - Update numpy/_core/meson.build - Delete release fragments [skip cirrus] [skip actions] --- .../upcoming_changes/28590.improvement.rst | 33 ----------- .../upcoming_changes/28595.improvement.rst | 7 --- doc/release/upcoming_changes/28767.change.rst | 10 ---- .../upcoming_changes/28767.performance.rst | 10 ---- doc/release/upcoming_changes/28896.change.rst | 56 ------------------- .../upcoming_changes/28925.deprecation.rst | 9 --- .../upcoming_changes/29030.compatibility.rst | 6 -- .../upcoming_changes/29052.deprecation.rst | 10 ---- doc/release/upcoming_changes/29060.change.rst | 3 - .../upcoming_changes/29094.compatibility.rst | 7 --- doc/release/upcoming_changes/29105.change.rst | 1 - .../upcoming_changes/29112.improvement.rst | 5 -- .../upcoming_changes/29129.enhancement.rst | 7 --- .../upcoming_changes/29137.compatibility.rst | 3 - .../upcoming_changes/29165.performance.rst | 7 --- doc/release/upcoming_changes/29179.change.rst | 4 -- .../upcoming_changes/29240.new_feature.rst | 1 - .../upcoming_changes/29273.new_feature.rst | 1 - .../upcoming_changes/29301.deprecation.rst | 7 --- doc/release/upcoming_changes/29338.change.rst | 9 --- .../upcoming_changes/29396.improvement.rst | 5 -- .../upcoming_changes/29423.new_feature.rst | 7 --- doc/release/upcoming_changes/29537.change.rst | 7 --- .../upcoming_changes/29537.performance.rst | 9 --- .../upcoming_changes/29550.deprecation.rst | 6 -- .../upcoming_changes/29569.new_feature.rst | 27 --------- doc/release/upcoming_changes/29642.c_api.rst | 13 ----- doc/release/upcoming_changes/29642.change.rst | 7 --- doc/release/upcoming_changes/29739.change.rst | 15 ----- doc/release/upcoming_changes/29750.change.rst | 5 -- .../upcoming_changes/29813.new_feature.rst | 6 -- .../upcoming_changes/29819.improvement.rst | 6 -- doc/release/upcoming_changes/29836.c_api.rst | 15 ----- .../upcoming_changes/29836.improvement.rst | 26 --------- .../upcoming_changes/29841.expired.rst | 6 -- doc/release/upcoming_changes/29900.c_api.rst | 5 -- .../upcoming_changes/29900.new_feature.rst | 9 --- .../upcoming_changes/29909.expired.rst | 10 ---- .../upcoming_changes/29947.improvement.rst | 7 --- .../upcoming_changes/29973.expired.rst | 12 ---- .../upcoming_changes/29978.expired.rst | 4 -- .../upcoming_changes/29980.expired.rst | 5 -- .../upcoming_changes/29984.expired.rst | 5 -- .../upcoming_changes/29986.expired.rst | 10 ---- .../upcoming_changes/29994.expired.rst | 6 -- .../upcoming_changes/29997.expired.rst | 9 --- .../upcoming_changes/30021.expired.rst | 5 -- .../upcoming_changes/30068.expired.rst | 12 ---- .../upcoming_changes/30147.compatibility.rst | 4 -- .../upcoming_changes/30168.deprecation.rst | 5 -- .../upcoming_changes/30179.new_feature.rst | 13 ----- .../upcoming_changes/30208.highlight.rst | 2 - .../upcoming_changes/30208.improvement.rst | 12 ---- .../upcoming_changes/30282.deprecation.rst | 5 -- .../upcoming_changes/30284.deprecation.rst | 3 - doc/source/release.rst | 1 + doc/source/release/2.5.0-notes.rst | 19 +++++++ numpy/_core/code_generators/cversions.txt | 1 + numpy/_core/include/numpy/numpyconfig.h | 16 ++++-- numpy/_core/meson.build | 1 + pyproject.toml | 3 +- 61 files changed, 33 insertions(+), 507 deletions(-) delete mode 100644 doc/release/upcoming_changes/28590.improvement.rst delete mode 100644 doc/release/upcoming_changes/28595.improvement.rst delete mode 100644 doc/release/upcoming_changes/28767.change.rst delete mode 100644 doc/release/upcoming_changes/28767.performance.rst delete mode 100644 doc/release/upcoming_changes/28896.change.rst delete mode 100644 doc/release/upcoming_changes/28925.deprecation.rst delete mode 100644 doc/release/upcoming_changes/29030.compatibility.rst delete mode 100644 doc/release/upcoming_changes/29052.deprecation.rst delete mode 100644 doc/release/upcoming_changes/29060.change.rst delete mode 100644 doc/release/upcoming_changes/29094.compatibility.rst delete mode 100644 doc/release/upcoming_changes/29105.change.rst delete mode 100644 doc/release/upcoming_changes/29112.improvement.rst delete mode 100644 doc/release/upcoming_changes/29129.enhancement.rst delete mode 100644 doc/release/upcoming_changes/29137.compatibility.rst delete mode 100644 doc/release/upcoming_changes/29165.performance.rst delete mode 100644 doc/release/upcoming_changes/29179.change.rst delete mode 100644 doc/release/upcoming_changes/29240.new_feature.rst delete mode 100644 doc/release/upcoming_changes/29273.new_feature.rst delete mode 100644 doc/release/upcoming_changes/29301.deprecation.rst delete mode 100644 doc/release/upcoming_changes/29338.change.rst delete mode 100644 doc/release/upcoming_changes/29396.improvement.rst delete mode 100644 doc/release/upcoming_changes/29423.new_feature.rst delete mode 100644 doc/release/upcoming_changes/29537.change.rst delete mode 100644 doc/release/upcoming_changes/29537.performance.rst delete mode 100644 doc/release/upcoming_changes/29550.deprecation.rst delete mode 100644 doc/release/upcoming_changes/29569.new_feature.rst delete mode 100644 doc/release/upcoming_changes/29642.c_api.rst delete mode 100644 doc/release/upcoming_changes/29642.change.rst delete mode 100644 doc/release/upcoming_changes/29739.change.rst delete mode 100644 doc/release/upcoming_changes/29750.change.rst delete mode 100644 doc/release/upcoming_changes/29813.new_feature.rst delete mode 100644 doc/release/upcoming_changes/29819.improvement.rst delete mode 100644 doc/release/upcoming_changes/29836.c_api.rst delete mode 100644 doc/release/upcoming_changes/29836.improvement.rst delete mode 100644 doc/release/upcoming_changes/29841.expired.rst delete mode 100644 doc/release/upcoming_changes/29900.c_api.rst delete mode 100644 doc/release/upcoming_changes/29900.new_feature.rst delete mode 100644 doc/release/upcoming_changes/29909.expired.rst delete mode 100644 doc/release/upcoming_changes/29947.improvement.rst delete mode 100644 doc/release/upcoming_changes/29973.expired.rst delete mode 100644 doc/release/upcoming_changes/29978.expired.rst delete mode 100644 doc/release/upcoming_changes/29980.expired.rst delete mode 100644 doc/release/upcoming_changes/29984.expired.rst delete mode 100644 doc/release/upcoming_changes/29986.expired.rst delete mode 100644 doc/release/upcoming_changes/29994.expired.rst delete mode 100644 doc/release/upcoming_changes/29997.expired.rst delete mode 100644 doc/release/upcoming_changes/30021.expired.rst delete mode 100644 doc/release/upcoming_changes/30068.expired.rst delete mode 100644 doc/release/upcoming_changes/30147.compatibility.rst delete mode 100644 doc/release/upcoming_changes/30168.deprecation.rst delete mode 100644 doc/release/upcoming_changes/30179.new_feature.rst delete mode 100644 doc/release/upcoming_changes/30208.highlight.rst delete mode 100644 doc/release/upcoming_changes/30208.improvement.rst delete mode 100644 doc/release/upcoming_changes/30282.deprecation.rst delete mode 100644 doc/release/upcoming_changes/30284.deprecation.rst create mode 100644 doc/source/release/2.5.0-notes.rst diff --git a/doc/release/upcoming_changes/28590.improvement.rst b/doc/release/upcoming_changes/28590.improvement.rst deleted file mode 100644 index 35f5cb3c2ad2..000000000000 --- a/doc/release/upcoming_changes/28590.improvement.rst +++ /dev/null @@ -1,33 +0,0 @@ -Fix ``flatiter`` indexing edge cases ------------------------------------- - -The ``flatiter`` object now shares the same index preparation logic as -``ndarray``, ensuring consistent behavior and fixing several issues where -invalid indices were previously accepted or misinterpreted. - -Key fixes and improvements: - -* Stricter index validation - - - Boolean non-array indices like ``arr.flat[[True, True]]`` were - incorrectly treated as ``arr.flat[np.array([1, 1], dtype=int)]``. - They now raise an index error. Note that indices that match the - iterator's shape are expected to not raise in the future and be - handled as regular boolean indices. Use ``np.asarray()`` if - you want to match that behavior. - - Float non-array indices were also cast to integer and incorrectly - treated as ``arr.flat[np.array([1.0, 1.0], dtype=int)]``. This is now - deprecated and will be removed in a future version. - - 0-dimensional boolean indices like ``arr.flat[True]`` are also - deprecated and will be removed in a future version. - -* Consistent error types: - - Certain invalid `flatiter` indices that previously raised `ValueError` - now correctly raise `IndexError`, aligning with `ndarray` behavior. - -* Improved error messages: - - The error message for unsupported index operations now provides more - specific details, including explicitly listing the valid index types, - instead of the generic ``IndexError: unsupported index operation``. diff --git a/doc/release/upcoming_changes/28595.improvement.rst b/doc/release/upcoming_changes/28595.improvement.rst deleted file mode 100644 index aea833f5179c..000000000000 --- a/doc/release/upcoming_changes/28595.improvement.rst +++ /dev/null @@ -1,7 +0,0 @@ -Improved error handling in `np.quantile` ----------------------------------------- -`np.quantile` now raises errors if: - -* All weights are zero -* At least one weight is `np.nan` -* At least one weight is `np.inf` \ No newline at end of file diff --git a/doc/release/upcoming_changes/28767.change.rst b/doc/release/upcoming_changes/28767.change.rst deleted file mode 100644 index ec173c3672b0..000000000000 --- a/doc/release/upcoming_changes/28767.change.rst +++ /dev/null @@ -1,10 +0,0 @@ -``unique_values`` for string dtypes may return unsorted data ------------------------------------------------------------- -np.unique now supports hash‐based duplicate removal for string dtypes. -This enhancement extends the hash-table algorithm to byte strings ('S'), -Unicode strings ('U'), and the experimental string dtype ('T', StringDType). -As a result, calling np.unique() on an array of strings will use -the faster hash-based method to obtain unique values. -Note that this hash-based method does not guarantee that the returned unique values will be sorted. -This also works for StringDType arrays containing None (missing values) -when using equal_nan=True (treating missing values as equal). diff --git a/doc/release/upcoming_changes/28767.performance.rst b/doc/release/upcoming_changes/28767.performance.rst deleted file mode 100644 index ef8ac1c3a45d..000000000000 --- a/doc/release/upcoming_changes/28767.performance.rst +++ /dev/null @@ -1,10 +0,0 @@ -Performance improvements to ``np.unique`` for string dtypes ------------------------------------------------------------ -The hash-based algorithm for unique extraction provides -an order-of-magnitude speedup on large string arrays. -In an internal benchmark with about 1 billion string elements, -the hash-based np.unique completed in roughly 33.5 seconds, -compared to 498 seconds with the sort-based method -– about 15× faster for unsorted unique operations on strings. -This improvement greatly reduces the time to find unique values -in very large string datasets. diff --git a/doc/release/upcoming_changes/28896.change.rst b/doc/release/upcoming_changes/28896.change.rst deleted file mode 100644 index 47538b7b22b2..000000000000 --- a/doc/release/upcoming_changes/28896.change.rst +++ /dev/null @@ -1,56 +0,0 @@ -Modulate dispatched x86 CPU features ------------------------------------- - -**IMPORTANT**: The default setting for `cpu-baseline`` on x86 has been raised to `x86-64-v2` microarchitecture. -This can be changed to none during build time to support older CPUs, -though SIMD optimizations for pre-2009 processors are no longer maintained. - -NumPy has reorganized x86 CPU features into microarchitecture-based groups instead of individual features, -aligning with Linux distribution standards and Google Highway requirements. - -Key changes: -* Replaced individual x86 features with microarchitecture levels: ``X86_V2``, ``X86_V3``, and ``X86_V4`` -* Raised the baseline to ``X86_V2`` -* Improved ``-`` operator behavior to properly exclude successor features that imply the excluded feature -* Added meson redirections for removed feature names to maintain backward compatibility -* Removed compiler compatibility workarounds for partial feature support (e.g., AVX512 without mask operations) -* Removed legacy AMD features (XOP, FMA4) and discontinued Intel Xeon Phi support - -New Feature Group Hierarchy: - -.. list-table:: - :header-rows: 1 - :align: left - - * - Name - - Implies - - Includes - * - ``X86_V2`` - - - - ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE4_1`` ``SSE4_2`` ``POPCNT`` ``CX16`` ``LAHF`` - * - ``X86_V3`` - - ``X86_V2`` - - ``AVX`` ``AVX2`` ``FMA3`` ``BMI`` ``BMI2`` ``LZCNT`` ``F16C`` ``MOVBE`` - * - ``X86_V4`` - - ``X86_V3`` - - ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` - * - ``AVX512_ICL`` - - ``X86_V4`` - - ``AVX512VBMI`` ``AVX512VBMI2`` ``AVX512VNNI`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` ``AVX512IFMA`` ``VAES`` ``GFNI`` ``VPCLMULQDQ`` - * - ``AVX512_SPR`` - - ``AVX512_ICL`` - - ``AVX512FP16`` - - -These groups correspond to CPU generations: - -- ``X86_V2``: x86-64-v2 microarchitectures (CPUs since 2009) -- ``X86_V3``: x86-64-v3 microarchitectures (CPUs since 2015) -- ``X86_V4``: x86-64-v4 microarchitectures (AVX-512 capable CPUs) -- ``AVX512_ICL``: Intel Ice Lake and similar CPUs -- ``AVX512_SPR``: Intel Sapphire Rapids and newer CPUs - -.. note:: - On 32-bit x86, ``cx16`` is excluded from ``X86_V2``. - -Documentation has been updated with details on using these new feature groups with the current meson build system. diff --git a/doc/release/upcoming_changes/28925.deprecation.rst b/doc/release/upcoming_changes/28925.deprecation.rst deleted file mode 100644 index a421839394fa..000000000000 --- a/doc/release/upcoming_changes/28925.deprecation.rst +++ /dev/null @@ -1,9 +0,0 @@ -Setting the ``strides`` attribute is deprecated ------------------------------------------------ -Setting the strides attribute is now deprecated since mutating -an array is unsafe if an array is shared, especially by multiple -threads. As an alternative, you can create a new view (no copy) via: -* `np.lib.stride_tricks.strided_window_view` if applicable, -* `np.lib.stride_tricks.as_strided` for the general case, -* or the `np.ndarray` constructor (``buffer`` is the original array) for a light-weight version. - diff --git a/doc/release/upcoming_changes/29030.compatibility.rst b/doc/release/upcoming_changes/29030.compatibility.rst deleted file mode 100644 index cf08551e28ee..000000000000 --- a/doc/release/upcoming_changes/29030.compatibility.rst +++ /dev/null @@ -1,6 +0,0 @@ -* NumPy's C extension modules have begun to use multi-phase initialisation, - as defined by :pep:`489`. As part of this, a new explicit check has been added - that each such module is only imported once per Python process. This comes with - the side-effect that deleting ``numpy`` from ``sys.modules`` and re-importing - it will now fail with an ``ImportError``. This has always been unsafe, with - unexpected side-effects, though did not previously raise an error. diff --git a/doc/release/upcoming_changes/29052.deprecation.rst b/doc/release/upcoming_changes/29052.deprecation.rst deleted file mode 100644 index e302907abfba..000000000000 --- a/doc/release/upcoming_changes/29052.deprecation.rst +++ /dev/null @@ -1,10 +0,0 @@ -Positional ``out`` argument to `np.maximum`, `np.minimum` is deprecated ------------------------------------------------------------------------ -Passing the output array ``out`` positionally to `numpy.maximum` and -`numpy.minimum` is deprecated. For example, ``np.maximum(a, b, c)`` will -emit a deprecation warning, since ``c`` is treated as the output buffer -rather than a third input. - -Always pass the output with the keyword form, e.g. -``np.maximum(a, b, out=c)``. This makes intent clear and simplifies -type annotations. diff --git a/doc/release/upcoming_changes/29060.change.rst b/doc/release/upcoming_changes/29060.change.rst deleted file mode 100644 index 1561da7bf94e..000000000000 --- a/doc/release/upcoming_changes/29060.change.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Multiplication between a string and integer now raises OverflowError instead - of MemoryError if the result of the multiplication would create a string that - is too large to be represented. This follows Python's behavior. diff --git a/doc/release/upcoming_changes/29094.compatibility.rst b/doc/release/upcoming_changes/29094.compatibility.rst deleted file mode 100644 index 961ee6504dae..000000000000 --- a/doc/release/upcoming_changes/29094.compatibility.rst +++ /dev/null @@ -1,7 +0,0 @@ -The Macro NPY_ALIGNMENT_REQUIRED has been removed -------------------------------------------------- -The macro was defined in the `npy_cpu.h` file, so might be regarded as -semipublic. As it turns out, with modern compilers and hardware it is almost -always the case that alignment is required, so numpy no longer uses the macro. -It is unlikely anyone uses it, but you might want to compile with the `-Wundef` -flag or equivalent to be sure. diff --git a/doc/release/upcoming_changes/29105.change.rst b/doc/release/upcoming_changes/29105.change.rst deleted file mode 100644 index b5d4a9838f30..000000000000 --- a/doc/release/upcoming_changes/29105.change.rst +++ /dev/null @@ -1 +0,0 @@ -* The accuracy of ``np.quantile`` and ``np.percentile`` for 16- and 32-bit floating point input data has been improved. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29112.improvement.rst b/doc/release/upcoming_changes/29112.improvement.rst deleted file mode 100644 index 01baa668b9fe..000000000000 --- a/doc/release/upcoming_changes/29112.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -Improved error message for `assert_array_compare` -------------------------------------------------- -The error message generated by `assert_array_compare` which is used by functions -like `assert_allclose`, `assert_array_less` etc. now also includes information -about the indices at which the assertion fails. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29129.enhancement.rst b/doc/release/upcoming_changes/29129.enhancement.rst deleted file mode 100644 index 9a14f13c1f4a..000000000000 --- a/doc/release/upcoming_changes/29129.enhancement.rst +++ /dev/null @@ -1,7 +0,0 @@ -``'same_value'`` for casting by value -------------------------------------- -The ``casting`` kwarg now has a ``'same_value'`` option that checks the actual -values can be round-trip cast without changing value. Currently it is only -implemented in `ndarray.astype`. This will raise a ``ValueError`` if any of the -values in the array would change as a result of the cast, including rounding of -floats or overflowing of ints. diff --git a/doc/release/upcoming_changes/29137.compatibility.rst b/doc/release/upcoming_changes/29137.compatibility.rst deleted file mode 100644 index 3ac9da2a4c48..000000000000 --- a/doc/release/upcoming_changes/29137.compatibility.rst +++ /dev/null @@ -1,3 +0,0 @@ -* `numpy.round` now always returns a copy. Previously, it returned a view - for integer inputs for ``decimals >= 0`` and a copy in all other cases. - This change brings ``round`` in line with ``ceil``, ``floor`` and ``trunc``. diff --git a/doc/release/upcoming_changes/29165.performance.rst b/doc/release/upcoming_changes/29165.performance.rst deleted file mode 100644 index 4e1a9a4ecdbc..000000000000 --- a/doc/release/upcoming_changes/29165.performance.rst +++ /dev/null @@ -1,7 +0,0 @@ -Rewrite of `np.ndindex` using `itertools.product` --------------------------------------------------- -The `numpy.ndindex` function now uses `itertools.product` internally, -providing significant improvements in performance for large iteration spaces, -while maintaining the original behavior and interface. -For example, for an array of shape (50, 60, 90) the NumPy `ndindex` -benchmark improves performance by a factor 5.2. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29179.change.rst b/doc/release/upcoming_changes/29179.change.rst deleted file mode 100644 index 12eb6804d3dd..000000000000 --- a/doc/release/upcoming_changes/29179.change.rst +++ /dev/null @@ -1,4 +0,0 @@ -Fix bug in ``matmul`` for non-contiguous out kwarg parameter ------------------------------------------------------------- -In some cases, if ``out`` was non-contiguous, ``np.matmul`` would cause -memory corruption or a c-level assert. This was new to v2.3.0 and fixed in v2.3.1. diff --git a/doc/release/upcoming_changes/29240.new_feature.rst b/doc/release/upcoming_changes/29240.new_feature.rst deleted file mode 100644 index 02d43364b200..000000000000 --- a/doc/release/upcoming_changes/29240.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -* Let ``np.size`` accept multiple axes. diff --git a/doc/release/upcoming_changes/29273.new_feature.rst b/doc/release/upcoming_changes/29273.new_feature.rst deleted file mode 100644 index 3e380ca0dbe6..000000000000 --- a/doc/release/upcoming_changes/29273.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -Extend ``numpy.pad`` to accept a dictionary for the ``pad_width`` argument. diff --git a/doc/release/upcoming_changes/29301.deprecation.rst b/doc/release/upcoming_changes/29301.deprecation.rst deleted file mode 100644 index e520b692458d..000000000000 --- a/doc/release/upcoming_changes/29301.deprecation.rst +++ /dev/null @@ -1,7 +0,0 @@ -``align=`` must be passed as boolean to ``np.dtype()`` ------------------------------------------------------- -When creating a new ``dtype`` a ``VisibleDeprecationWarning`` will be -given if ``align=`` is not a boolean. -This is mainly to prevent accidentally passing a subarray align flag where it -has no effect, such as ``np.dtype("f8", 3)`` instead of ``np.dtype(("f8", 3))``. -We strongly suggest to always pass ``align=`` as a keyword argument. diff --git a/doc/release/upcoming_changes/29338.change.rst b/doc/release/upcoming_changes/29338.change.rst deleted file mode 100644 index 64bf188009c8..000000000000 --- a/doc/release/upcoming_changes/29338.change.rst +++ /dev/null @@ -1,9 +0,0 @@ -``__array_interface__`` with NULL pointer changed -------------------------------------------------- -The array interface now accepts NULL pointers (NumPy will do -its own dummy allocation, though). -Previously, these incorrectly triggered an undocumented -scalar path. -In the unlikely event that the scalar path was actually desired, -you can (for now) achieve the previous behavior via the correct -scalar path by not providing a ``data`` field at all. diff --git a/doc/release/upcoming_changes/29396.improvement.rst b/doc/release/upcoming_changes/29396.improvement.rst deleted file mode 100644 index 2cd3d81ad9d8..000000000000 --- a/doc/release/upcoming_changes/29396.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -Show unit information in ``__repr__`` for ``datetime64("NaT")`` ------------------------------------------------------------------- -When a `datetime64` object is "Not a Time" (NaT), its ``__repr__`` method now -includes the time unit of the datetime64 type. This makes it consistent with -the behavior of a `timedelta64` object. diff --git a/doc/release/upcoming_changes/29423.new_feature.rst b/doc/release/upcoming_changes/29423.new_feature.rst deleted file mode 100644 index 7e83604b0049..000000000000 --- a/doc/release/upcoming_changes/29423.new_feature.rst +++ /dev/null @@ -1,7 +0,0 @@ -``StringDType`` fill_value support in `numpy.ma.MaskedArray` ------------------------------------------------------------- -Masked arrays now accept and preserve a Python ``str`` as their ``fill_value`` when -using the variable‑width ``StringDType`` (kind ``'T'``), including through slicing -and views. The default is ``'N/A'`` and may be overridden by any valid string. -This fixes issue `gh‑29421 `__ and was -implemented in pull request `gh‑29423 `__. diff --git a/doc/release/upcoming_changes/29537.change.rst b/doc/release/upcoming_changes/29537.change.rst deleted file mode 100644 index 63abbbb5a347..000000000000 --- a/doc/release/upcoming_changes/29537.change.rst +++ /dev/null @@ -1,7 +0,0 @@ -``unique_values`` for complex dtypes may return unsorted data -------------------------------------------------------------- -np.unique now supports hash‐based duplicate removal for complex dtypes. -This enhancement extends the hash‐table algorithm -to all complex types ('c'), and their extended precision variants. -The hash‐based method provides faster extraction of unique values -but does not guarantee that the result will be sorted. diff --git a/doc/release/upcoming_changes/29537.performance.rst b/doc/release/upcoming_changes/29537.performance.rst deleted file mode 100644 index 8c78dc202a2e..000000000000 --- a/doc/release/upcoming_changes/29537.performance.rst +++ /dev/null @@ -1,9 +0,0 @@ -Performance improvements to ``np.unique`` for complex dtypes ------------------------------------------------------------- -The hash-based algorithm for unique extraction now also supports -complex dtypes, offering noticeable performance gains. - -In our benchmarks on complex128 arrays with 200,000 elements, -the hash-based approach was about 1.4–1.5× faster -than the sort-based baseline when there were 20% of unique values, -and about 5× faster when there were 0.2% of unique values. diff --git a/doc/release/upcoming_changes/29550.deprecation.rst b/doc/release/upcoming_changes/29550.deprecation.rst deleted file mode 100644 index ce35477c5010..000000000000 --- a/doc/release/upcoming_changes/29550.deprecation.rst +++ /dev/null @@ -1,6 +0,0 @@ -Assertion and warning control utilities are deprecated ------------------------------------------------------- - -`np.testing.assert_warns` and `np.testing.suppress_warnings` are deprecated. -Use `warnings.catch_warnings`, `warnings.filterwarnings`, ``pytest.warns``, or -``pytest.filterwarnings`` instead. diff --git a/doc/release/upcoming_changes/29569.new_feature.rst b/doc/release/upcoming_changes/29569.new_feature.rst deleted file mode 100644 index ac014c07c7a0..000000000000 --- a/doc/release/upcoming_changes/29569.new_feature.rst +++ /dev/null @@ -1,27 +0,0 @@ -``ndmax`` option for `numpy.array` ----------------------------------------------------- -The ``ndmax`` option is now available for `numpy.array`. -It explicitly limits the maximum number of dimensions created from nested sequences. - -This is particularly useful when creating arrays of list-like objects with ``dtype=object``. -By default, NumPy recurses through all nesting levels to create the highest possible -dimensional array, but this behavior may not be desired when the intent is to preserve -nested structures as objects. The ``ndmax`` parameter provides explicit control over -this recursion depth. - -.. code-block:: python - - # Default behavior: Creates a 2D array - >>> a = np.array([[1, 2], [3, 4]], dtype=object) - >>> a - array([[1, 2], - [3, 4]], dtype=object) - >>> a.shape - (2, 2) - - # With ndmax=1: Creates a 1D array - >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) - >>> b - array([list([1, 2]), list([3, 4])], dtype=object) - >>> b.shape - (2,) diff --git a/doc/release/upcoming_changes/29642.c_api.rst b/doc/release/upcoming_changes/29642.c_api.rst deleted file mode 100644 index 65c804ef829b..000000000000 --- a/doc/release/upcoming_changes/29642.c_api.rst +++ /dev/null @@ -1,13 +0,0 @@ -The NPY_SORTKIND enum has been enhanced with new variables ----------------------------------------------------------- -This is of interest if you are using ``PyArray_Sort`` or ``PyArray_ArgSort``. -We have changed the semantics of the old names in the NPY_SORTKIND enum and -added new ones. The changes are backward compatible, and no recompilation is -needed. The new names of interest are: - -* NPY_SORT_DEFAULT -- default sort (same value as NPY_QUICKSORT) -* NPY_SORT_STABLE -- the sort must be stable (same value as NPY_MERGESORT) -* NPY_SORT_DESCENDING -- the sort must be descending - -The semantic change is that NPY_HEAPSORT is mapped to NPY_QUICKSORT when used. -Note that NPY_SORT_DESCENDING is not yet implemented. diff --git a/doc/release/upcoming_changes/29642.change.rst b/doc/release/upcoming_changes/29642.change.rst deleted file mode 100644 index 4a1706e00bab..000000000000 --- a/doc/release/upcoming_changes/29642.change.rst +++ /dev/null @@ -1,7 +0,0 @@ -Sorting ``kind='heapsort'`` now maps to ``kind='quicksort'`` ------------------------------------------------------------- -It is unlikely that this change will be noticed, but if you do see a change in -execution time or unstable argsort order, that is likely the cause. Please let -us know if there is a performance regression. Congratulate us if it is -improved :) - diff --git a/doc/release/upcoming_changes/29739.change.rst b/doc/release/upcoming_changes/29739.change.rst deleted file mode 100644 index 5d1316a1ba41..000000000000 --- a/doc/release/upcoming_changes/29739.change.rst +++ /dev/null @@ -1,15 +0,0 @@ -``numpy.typing.DTypeLike`` no longer accepts ``None`` ------------------------------------------------------ -The type alias ``numpy.typing.DTypeLike`` no longer accepts ``None``. Instead of - -.. code-block:: python - - dtype: DTypeLike = None - -it should now be - -.. code-block:: python - - dtype: DTypeLike | None = None - -instead. diff --git a/doc/release/upcoming_changes/29750.change.rst b/doc/release/upcoming_changes/29750.change.rst deleted file mode 100644 index 2759c08d8349..000000000000 --- a/doc/release/upcoming_changes/29750.change.rst +++ /dev/null @@ -1,5 +0,0 @@ -The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a -``.a`` file extension on win-arm64, for compatibility for building with MSVC and -``setuptools``. Please note that using these static libraries is discouraged -and for existing projects using it, it's best to use it with a matching -compiler toolchain, which is ``clang-cl`` on Windows on Arm. diff --git a/doc/release/upcoming_changes/29813.new_feature.rst b/doc/release/upcoming_changes/29813.new_feature.rst deleted file mode 100644 index 690d7ca88799..000000000000 --- a/doc/release/upcoming_changes/29813.new_feature.rst +++ /dev/null @@ -1,6 +0,0 @@ -Warning emitted when using `where` without `out` ------------------------------------------------- -Ufuncs called with a ``where`` mask and without an ``out`` positional or kwarg will -now emit a warning. This usage tends to trip up users who expect some value in -output locations where the mask is ``False`` (the ufunc will not touch those -locations). The warning can be supressed by using ``out=None``. diff --git a/doc/release/upcoming_changes/29819.improvement.rst b/doc/release/upcoming_changes/29819.improvement.rst deleted file mode 100644 index fa4ac07f2a08..000000000000 --- a/doc/release/upcoming_changes/29819.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -Performance increase for scalar calculations --------------------------------------------- -The speed of calculations on scalars has been improved by about a factor 6 for -ufuncs that take only one input (like ``np.sin(scalar)``), reducing the speed -difference from their ``math`` equivalents from a factor 19 to 3 (the speed -for arrays is left unchanged). diff --git a/doc/release/upcoming_changes/29836.c_api.rst b/doc/release/upcoming_changes/29836.c_api.rst deleted file mode 100644 index 9ac5478c742a..000000000000 --- a/doc/release/upcoming_changes/29836.c_api.rst +++ /dev/null @@ -1,15 +0,0 @@ -New ``NPY_DT_get_constant`` slot for DType constant retrieval -------------------------------------------------------------- -A new slot ``NPY_DT_get_constant`` has been added to the DType API, allowing -dtype implementations to provide constant values such as machine limits and -special values. The slot function has the signature:: - - int get_constant(PyArray_Descr *descr, int constant_id, void *ptr) - -It returns 1 on success, 0 if the constant is not available, or -1 on error. -The function is always called with the GIL held and may write to unaligned memory. - -Integer constants (marked with the ``1 << 16`` bit) return ``npy_intp`` values, -while floating-point constants return values of the dtype's native type. - -Implementing this can be used by user DTypes to provide `numpy.finfo` values. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29836.improvement.rst b/doc/release/upcoming_changes/29836.improvement.rst deleted file mode 100644 index 0d7df429d125..000000000000 --- a/doc/release/upcoming_changes/29836.improvement.rst +++ /dev/null @@ -1,26 +0,0 @@ -``numpy.finfo`` Refactor ------------------------- -The ``numpy.finfo`` class has been completely refactored to obtain floating-point -constants directly from C compiler macros rather than deriving them at runtime. -This provides better accuracy, platform compatibility and corrected -several attribute calculations: - -* Constants like ``eps``, ``min``, ``max``, ``smallest_normal``, and - ``smallest_subnormal`` now come directly from standard C macros (``FLT_EPSILON``, - ``DBL_MIN``, etc.), ensuring platform-correct values. - -* The deprecated ``MachAr`` runtime discovery mechanism has been removed. - -* Derived attributes have been corrected to match standard definitions: - ``machep`` and ``negep`` now use ``int(log2(eps))``; ``nexp`` accounts for - all exponent patterns; ``nmant`` excludes the implicit bit; and ``minexp`` - follows the C standard definition. - -* longdouble constants, Specifically ``smallest_normal`` now follows the - C standard definitions as per respecitive platform. - -* Special handling added for PowerPC's IBM double-double format. - -* New test suite added in ``test_finfo.py`` to validate all - ``finfo`` properties against expected machine arithmetic values for - float16, float32, and float64 types. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29841.expired.rst b/doc/release/upcoming_changes/29841.expired.rst deleted file mode 100644 index 34977cec2f70..000000000000 --- a/doc/release/upcoming_changes/29841.expired.rst +++ /dev/null @@ -1,6 +0,0 @@ -Raise `TypeError` on attempt to convert array with `ndim > 0` to scalar ------------------------------------------------------------------------ -Conversion of an array with `ndim > 0` to a scalar was deprecated in -NumPy 1.25. Now, attempting to do so raises `TypeError`. -Ensure you extract a single element from your array before performing -this operation. diff --git a/doc/release/upcoming_changes/29900.c_api.rst b/doc/release/upcoming_changes/29900.c_api.rst deleted file mode 100644 index b29014ac95fc..000000000000 --- a/doc/release/upcoming_changes/29900.c_api.rst +++ /dev/null @@ -1,5 +0,0 @@ -A new `PyUFunc_AddLoopsFromSpecs` convenience function has been added to the C API. ------------------------------------------------------------------------------------ -This function allows adding multiple ufunc loops from their specs in one call using -a NULL-terminated array of `PyUFunc_LoopSlot` structs. It allows registering -sorting and argsorting loops using the new ArrayMethod API. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29900.new_feature.rst b/doc/release/upcoming_changes/29900.new_feature.rst deleted file mode 100644 index 1799b6043e29..000000000000 --- a/doc/release/upcoming_changes/29900.new_feature.rst +++ /dev/null @@ -1,9 +0,0 @@ -DType sorting and argsorting supports the ArrayMethod API ---------------------------------------------------------- -User-defined dtypes can now implement custom sorting and argsorting using -the ArrayMethod API. This mechanism can be used in place of the `PyArray_ArrFuncs` -slots which may be deprecated in the future. - -The sorting and argsorting methods are registered by passing the arraymethod -specs that implement the operations to the new `PyUFunc_AddLoopsFromSpecs` function. -See the ArrayMethod API documentation for details. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29909.expired.rst b/doc/release/upcoming_changes/29909.expired.rst deleted file mode 100644 index 6a2ee4f53c09..000000000000 --- a/doc/release/upcoming_changes/29909.expired.rst +++ /dev/null @@ -1,10 +0,0 @@ -Remove numpy.linalg.linalg and numpy.fft.helper ------------------------------------------------ - -The following were deprecated in NumPy 2.0 and have been moved to private modules - -* ``numpy.linalg.linalg`` - Use :mod:`numpy.linalg` instead. - -* ``numpy.fft.helper`` - Use :mod:`numpy.fft` instead. diff --git a/doc/release/upcoming_changes/29947.improvement.rst b/doc/release/upcoming_changes/29947.improvement.rst deleted file mode 100644 index 99c67e598347..000000000000 --- a/doc/release/upcoming_changes/29947.improvement.rst +++ /dev/null @@ -1,7 +0,0 @@ -Multiple axes are now supported in ``numpy.trim_zeros`` -------------------------------------------------------- -The ``axis`` argument of `numpy.trim_zeros` now accepts a sequence; for example -``np.trim_zeros(x, axis=(0, 1))`` will trim the zeros from a multi-dimensional -array ``x`` along axes 0 and 1. This fixes issue -`gh‑29945 `__ and was implemented -in pull request `gh‑29947 `__. diff --git a/doc/release/upcoming_changes/29973.expired.rst b/doc/release/upcoming_changes/29973.expired.rst deleted file mode 100644 index 5b51cb7cf428..000000000000 --- a/doc/release/upcoming_changes/29973.expired.rst +++ /dev/null @@ -1,12 +0,0 @@ -Remove ``interpolation`` parameter from quantile and percentile functions -------------------------------------------------------------------------- - -The ``interpolation`` parameter was deprecated in NumPy 1.22.0 and has been -removed from the following functions: - -* ``numpy.percentile`` -* ``numpy.nanpercentile`` -* ``numpy.quantile`` -* ``numpy.nanquantile`` - -Use the ``method`` parameter instead. diff --git a/doc/release/upcoming_changes/29978.expired.rst b/doc/release/upcoming_changes/29978.expired.rst deleted file mode 100644 index e0f4de1d8715..000000000000 --- a/doc/release/upcoming_changes/29978.expired.rst +++ /dev/null @@ -1,4 +0,0 @@ -Removed ``numpy.in1d`` ----------------------- - -``numpy.in1d`` has been deprecated since NumPy 2.0 and is now removed in favor of ``numpy.isin``. diff --git a/doc/release/upcoming_changes/29980.expired.rst b/doc/release/upcoming_changes/29980.expired.rst deleted file mode 100644 index 563ba8aa6929..000000000000 --- a/doc/release/upcoming_changes/29980.expired.rst +++ /dev/null @@ -1,5 +0,0 @@ -Removed ``numpy.ndindex.ndincr()`` ----------------------------------- - -The ``ndindex.ndincr()`` method has been deprecated since NumPy 1.20 and is now removed; -use ``next(ndindex)`` instead. diff --git a/doc/release/upcoming_changes/29984.expired.rst b/doc/release/upcoming_changes/29984.expired.rst deleted file mode 100644 index bcce0dedd4a7..000000000000 --- a/doc/release/upcoming_changes/29984.expired.rst +++ /dev/null @@ -1,5 +0,0 @@ -Removed ``fix_imports`` parameter from ``numpy.save`` ------------------------------------------------------ - -The ``fix_imports`` parameter was deprecated in NumPy 2.1.0 and is now removed. -This flag has been ignored since NumPy 1.17 and was only needed to support loading files in Python 2 that were written in Python 3. diff --git a/doc/release/upcoming_changes/29986.expired.rst b/doc/release/upcoming_changes/29986.expired.rst deleted file mode 100644 index 2a6b44380dd4..000000000000 --- a/doc/release/upcoming_changes/29986.expired.rst +++ /dev/null @@ -1,10 +0,0 @@ -Removal of four undocumented ``ndarray.ctypes`` methods -------------------------------------------------------- -Four undocumented methods of the ``ndarray.ctypes`` object have been removed: - -* ``_ctypes.get_data()`` (use ``_ctypes.data`` instead) -* ``_ctypes.get_shape()`` (use ``_ctypes.shape`` instead) -* ``_ctypes.get_strides()`` (use ``_ctypes.strides`` instead) -* ``_ctypes.get_as_parameter()`` (use ``_ctypes._as_parameter_`` instead) - -These methods have been deprecated since NumPy 1.21. diff --git a/doc/release/upcoming_changes/29994.expired.rst b/doc/release/upcoming_changes/29994.expired.rst deleted file mode 100644 index 11331da6e810..000000000000 --- a/doc/release/upcoming_changes/29994.expired.rst +++ /dev/null @@ -1,6 +0,0 @@ -Remove ``newshape`` parameter from ``numpy.reshape`` ----------------------------------------------------- - -The ``newshape`` parameter was deprecated in NumPy 2.1.0 and has been -removed from ``numpy.reshape``. Pass it positionally or use ``shape=`` -on newer NumPy versions. diff --git a/doc/release/upcoming_changes/29997.expired.rst b/doc/release/upcoming_changes/29997.expired.rst deleted file mode 100644 index 6bdfa792e4e6..000000000000 --- a/doc/release/upcoming_changes/29997.expired.rst +++ /dev/null @@ -1,9 +0,0 @@ -Removal of deprecated functions and arguments ---------------------------------------------- - -The following long-deprecated APIs have been removed: - -* ``numpy.trapz`` — deprecated since NumPy 2.0 (2023-08-18). Use ``numpy.trapezoid`` or - ``scipy.integrate`` functions instead. -* ``disp`` function — deprecated from 2.0 release and no longer functional. Use your own printing function instead. -* ``bias`` and ``ddof`` arguments in ``numpy.corrcoef`` — these had no effect since NumPy 1.10. diff --git a/doc/release/upcoming_changes/30021.expired.rst b/doc/release/upcoming_changes/30021.expired.rst deleted file mode 100644 index 31ca300ce35f..000000000000 --- a/doc/release/upcoming_changes/30021.expired.rst +++ /dev/null @@ -1,5 +0,0 @@ -Remove ``delimitor`` parameter from ``numpy.ma.mrecords.fromtextfile()`` ------------------------------------------------------------------------- - -The ``delimitor`` parameter was deprecated in NumPy 1.22.0 and has been -removed from ``numpy.ma.mrecords.fromtextfile()``. Use ``delimiter`` instead. diff --git a/doc/release/upcoming_changes/30068.expired.rst b/doc/release/upcoming_changes/30068.expired.rst deleted file mode 100644 index 5d41c98b3260..000000000000 --- a/doc/release/upcoming_changes/30068.expired.rst +++ /dev/null @@ -1,12 +0,0 @@ -``numpy.array2string`` and ``numpy.sum`` deprecations finalized ---------------------------------------------------------------- - -The following long-deprecated APIs have been removed or converted to errors: - -* The ``style`` parameter has been removed from ``numpy.array2string``. - This argument had no effect since Numpy 1.14.0. Any arguments following - it, such as ``formatter`` have now been made keyword-only. - -* Calling ``np.sum(generator)`` directly on a generator object now raises a `TypeError`. - This behavior was deprecated in NumPy 1.15.0. Use ``np.sum(np.fromiter(generator))`` - or the python ``sum`` builtin instead. diff --git a/doc/release/upcoming_changes/30147.compatibility.rst b/doc/release/upcoming_changes/30147.compatibility.rst deleted file mode 100644 index c5d13323fe6e..000000000000 --- a/doc/release/upcoming_changes/30147.compatibility.rst +++ /dev/null @@ -1,4 +0,0 @@ -* Type-checkers will no longer accept calls to `numpy.arange` with - ``start`` as a keyword argument. This was done for compatibility with - the Array API standard. At runtime it is still possible to use - `numpy.arange` with ``start`` as a keyword argument. diff --git a/doc/release/upcoming_changes/30168.deprecation.rst b/doc/release/upcoming_changes/30168.deprecation.rst deleted file mode 100644 index 81673397590d..000000000000 --- a/doc/release/upcoming_changes/30168.deprecation.rst +++ /dev/null @@ -1,5 +0,0 @@ -``np.fix`` is pending deprecation ---------------------------------- -The `numpy.fix` function will be deprecated in a future release. It is recommended to use -`numpy.trunc` instead, as it provides the same functionality of truncating decimal values to their -integer parts. Static type checkers might already report a warning for the use of `numpy.fix`. diff --git a/doc/release/upcoming_changes/30179.new_feature.rst b/doc/release/upcoming_changes/30179.new_feature.rst deleted file mode 100644 index e19815289351..000000000000 --- a/doc/release/upcoming_changes/30179.new_feature.rst +++ /dev/null @@ -1,13 +0,0 @@ -New ``__numpy_dtype__`` protocol --------------------------------- -NumPy now has a new ``__numpy_dtype__`` protocol. NumPy will check -for this attribute when converting to a NumPy dtype via ``np.dtype(obj)`` -or any ``dtype=`` argument. - -Downstream projects are encouraged to implement this for all dtype like -objects which may previously have used a ``.dtype`` attribute that returned -a NumPy dtype. -We expect to deprecate ``.dtype`` in the future to prevent interpreting -array-like objects with a ``.dtype`` attribute as a dtype. -If you wish you can implement ``__numpy_dtype__`` to ensure an earlier -warning or error (``.dtype`` is ignored if this is found). diff --git a/doc/release/upcoming_changes/30208.highlight.rst b/doc/release/upcoming_changes/30208.highlight.rst deleted file mode 100644 index c13c46c056cc..000000000000 --- a/doc/release/upcoming_changes/30208.highlight.rst +++ /dev/null @@ -1,2 +0,0 @@ -* Runtime signature introspection support has been significantly improved. See the - corresponding improvement note for details. diff --git a/doc/release/upcoming_changes/30208.improvement.rst b/doc/release/upcoming_changes/30208.improvement.rst deleted file mode 100644 index ad9faaedfb6b..000000000000 --- a/doc/release/upcoming_changes/30208.improvement.rst +++ /dev/null @@ -1,12 +0,0 @@ -Runtime signature introspection support has been significantly improved ------------------------------------------------------------------------ -Many NumPy functions, classes, and methods that previously raised ``ValueError`` when passed -to ``inspect.signature()`` now return meaningful signatures. This improves support for runtime type -checking, IDE autocomplete, documentation generation, and runtime introspection capabilities across -the NumPy API. - -Over three hundred classes and functions have been updated in total, including, but not limited to, -core classes such as `ndarray`, `generic`, `dtype`, `ufunc`, `broadcast`, `nditer`, etc., -most methods of `ndarray` and scalar types, array constructor functions (`array`, `empty`, -`arange`, `fromiter`, etc.), all :ref:`ufuncs`, and many other commonly used functions, including -`dot`, `concat`, `where`, `bincount`, `can_cast`, and numerous others. diff --git a/doc/release/upcoming_changes/30282.deprecation.rst b/doc/release/upcoming_changes/30282.deprecation.rst deleted file mode 100644 index e9aac9ae17d5..000000000000 --- a/doc/release/upcoming_changes/30282.deprecation.rst +++ /dev/null @@ -1,5 +0,0 @@ -in-place modification of ``ndarray.shape`` is pending deprecation ------------------------------------------------------------------ -Setting the `ndarray.shape` attribute directly will be deprecated in a future release. -Instead of modifying the shape in place, it is recommended to use the `numpy.reshape` function. -Static type checkers might already report a warning for assignments to `ndarray.shape`. diff --git a/doc/release/upcoming_changes/30284.deprecation.rst b/doc/release/upcoming_changes/30284.deprecation.rst deleted file mode 100644 index 8803f3225cd3..000000000000 --- a/doc/release/upcoming_changes/30284.deprecation.rst +++ /dev/null @@ -1,3 +0,0 @@ -Deprecation of ``numpy.lib.user_array.container`` -------------------------------------------------- -The ``numpy.lib.user_array.container`` class is deprecated and will be removed in a future version. diff --git a/doc/source/release.rst b/doc/source/release.rst index 190d0512faeb..5842fa9fc61a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.5.0 2.4.0 2.3.5 2.3.4 diff --git a/doc/source/release/2.5.0-notes.rst b/doc/source/release/2.5.0-notes.rst new file mode 100644 index 000000000000..1c07e859a7b9 --- /dev/null +++ b/doc/source/release/2.5.0-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.5.0 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index 3a480dfd4ab3..b058875d0455 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -83,4 +83,5 @@ # Version 21 (NumPy 2.4.0) # Add 'same_value' casting, header additions. # General loop registration for ufuncs, sort, and argsort +# Version 21 (NumPy 2.5.0) No change 0x00000015 = fbd24fc5b2ba4f7cd3606ec6128de7a5 diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index c129a3aceb6d..0bd934e37a8d 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -85,6 +85,7 @@ #define NPY_2_2_API_VERSION 0x00000013 #define NPY_2_3_API_VERSION 0x00000014 #define NPY_2_4_API_VERSION 0x00000015 +#define NPY_2_5_API_VERSION 0x00000016 /* @@ -107,10 +108,11 @@ * default, or narrow it down if they wish to use newer API. If you adjust * this, consider the Python version support (example for 1.25.x): * - * NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12) - * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 - * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 - * NumPy 1.15.x supports Python: ... 3.6 3.7 + * NumPy 1.26.x supports Python: 3.9 3.10 3.11 3.12 + * NumPy 1.25.x supports Python: 3.9 3.10 3.11 + * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 + * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 + * NumPy 1.15.x supports Python: ... 3.6 3.7 * * Users of the stable ABI may wish to target the last Python that is not * end of life. This would be 3.8 at NumPy 1.25 release time. @@ -124,8 +126,8 @@ /* user provided a target version, use it */ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION #else - /* Use the default (increase when dropping Python 3.11 support) */ - #define NPY_FEATURE_VERSION NPY_1_23_API_VERSION + /* Use the default (increase when dropping Python 3.12 support) */ + #define NPY_FEATURE_VERSION NPY_1_25_API_VERSION #endif /* Sanity check the (requested) feature version */ @@ -177,6 +179,8 @@ #define NPY_FEATURE_VERSION_STRING "2.3" #elif NPY_FEATURE_VERSION == NPY_2_4_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.4" +#elif NPY_FEATURE_VERSION == NPY_2_5_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.5" #else #error "Missing version string define for new NumPy version." #endif diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 3a0b52c3b079..b5695c8f3cde 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -51,6 +51,7 @@ C_ABI_VERSION = '0x02000000' # 0x00000013 - 2.2.x # 0x00000014 - 2.3.x # 0x00000015 - 2.4.x +# 0x00000015 - 2.5.x C_API_VERSION = '0x00000015' # Check whether we have a mismatch between the set C API VERSION and the diff --git a/pyproject.toml b/pyproject.toml index 0dba95b04e84..e7830c5248a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.4.0.dev0" +version = "2.5.0.dev0" description = "Fundamental package for array computing in Python" authors = [{name = "Travis E. Oliphant et al."}] maintainers = [ @@ -22,7 +22,6 @@ classifiers = [ 'Programming Language :: C', 'Programming Language :: Python', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: 3.13', 'Programming Language :: Python :: 3.14', From d6fd351cc8544243cc164cdad265f2df0580fcc6 Mon Sep 17 00:00:00 2001 From: Nathaniel Starkman Date: Sun, 30 Nov 2025 16:43:35 -0500 Subject: [PATCH 0932/1018] TYP: add overloads for `generic.__getitem__` (#30335) Co-authored-by: Joren Hammudoglu --- numpy/__init__.pyi | 46 +++++++++++ numpy/_core/records.pyi | 3 +- numpy/typing/tests/data/pass/scalars.py | 13 ++++ numpy/typing/tests/data/reveal/scalars.pyi | 88 ++++++++++++++++++++++ 4 files changed, 149 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e19b130d73b6..975a04857db7 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3678,6 +3678,29 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @overload def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ... + # + @overload + def __getitem__(self, key: tuple[()], /) -> Self: ... + @overload + def __getitem__( + self, key: EllipsisType | tuple[EllipsisType], / + ) -> ndarray[tuple[()], dtype[Self]]: ... + @overload + def __getitem__( + self, key: None | tuple[None], / + ) -> ndarray[tuple[int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None], / + ) -> ndarray[tuple[int, int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None, None], / + ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... + @overload # Limited support for (None,) * N > 3 + def __getitem__(self, key: tuple[None, ...], /) -> NDArray[Self]: ... + + # @overload def __array_wrap__( self, @@ -5708,10 +5731,33 @@ class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] @overload def __new__(cls, length_or_data: object, /, dtype: _DTypeLikeVoid) -> Self: ... + # + @overload + def __getitem__(self, key: tuple[()], /) -> Self: ... + @overload + def __getitem__( + self, key: EllipsisType | tuple[EllipsisType], / + ) -> ndarray[tuple[()], dtype[Self]]: ... + @overload + def __getitem__( + self, key: None | tuple[None], / + ) -> ndarray[tuple[int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None], / + ) -> ndarray[tuple[int, int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None, None], / + ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... + @overload # Limited support for (None,) * N > 3 + def __getitem__(self, key: tuple[None, ...], /) -> NDArray[Self]: ... @overload def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... @overload def __getitem__(self, key: list[str], /) -> void: ... + + # def __setitem__(self, key: str | list[str] | SupportsIndex, value: ArrayLike, /) -> None: ... def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ... diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 511a6a764829..2b133630cf35 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -64,7 +64,8 @@ class record(np.void): # type: ignore[misc] def __getattribute__(self, attr: str, /) -> Any: ... def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... - @overload + # + @overload # type: ignore[override] def __getitem__(self, key: str | SupportsIndex, /) -> Incomplete: ... @overload def __getitem__(self, key: list[str], /) -> record: ... diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index eeb707b255e1..133c5627e6e5 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -247,3 +247,16 @@ def __float__(self) -> float: c16.reshape(1) U.reshape(1) S.reshape(1) + +# Indexing scalars with any of {None, ..., tuple[()], tuple[None], tuple[...], +# tuple[None, ...]} should be valid +b[None] +i8[None] +u8[None] +f8[None] +c16[None] +c16[...] +c16[()] +c16[(None,)] +c16[(...,)] +c16[None, None] diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index 67444e33dfc3..ab926baa7f15 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -44,6 +44,94 @@ assert_type(c16.imag, np.float64) assert_type(np.str_("foo"), np.str_) +# Indexing +assert_type(b[()], np.bool) +assert_type(i8[()], np.int64) +assert_type(u8[()], np.uint64) +assert_type(f8[()], np.float64) +assert_type(c8[()], np.complex64) +assert_type(c16[()], np.complex128) +assert_type(U[()], np.str_) +assert_type(S[()], np.bytes_) +assert_type(V[()], np.void) + +assert_type(b[...], np.ndarray[tuple[()], np.dtype[np.bool]]) +assert_type(b[(...,)], np.ndarray[tuple[()], np.dtype[np.bool]]) +assert_type(i8[...], np.ndarray[tuple[()], np.dtype[np.int64]]) +assert_type(i8[(...,)], np.ndarray[tuple[()], np.dtype[np.int64]]) +assert_type(u8[...], np.ndarray[tuple[()], np.dtype[np.uint64]]) +assert_type(u8[(...,)], np.ndarray[tuple[()], np.dtype[np.uint64]]) +assert_type(f8[...], np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(f8[(...,)], np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(c8[...], np.ndarray[tuple[()], np.dtype[np.complex64]]) +assert_type(c8[(...,)], np.ndarray[tuple[()], np.dtype[np.complex64]]) +assert_type(c16[...], np.ndarray[tuple[()], np.dtype[np.complex128]]) +assert_type(c16[(...,)], np.ndarray[tuple[()], np.dtype[np.complex128]]) +assert_type(U[...], np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(U[(...,)], np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(S[...], np.ndarray[tuple[()], np.dtype[np.bytes_]]) +assert_type(S[(...,)], np.ndarray[tuple[()], np.dtype[np.bytes_]]) +assert_type(V[...], np.ndarray[tuple[()], np.dtype[np.void]]) +assert_type(V[(...,)], np.ndarray[tuple[()], np.dtype[np.void]]) + +None1 = (None,) +None2 = (None, None) +None3 = (None, None, None) +None4 = (None, None, None, None) + +assert_type(b[None], np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(b[None1], np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(b[None2], np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(b[None3], np.ndarray[tuple[int, int, int], np.dtype[np.bool]]) +assert_type(b[None4], np.ndarray[tuple[Any, ...], np.dtype[np.bool]]) + +assert_type(u8[None], np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(u8[None1], np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(u8[None2], np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(u8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.uint64]]) +assert_type(u8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.uint64]]) + +assert_type(i8[None], np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(i8[None1], np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(i8[None2], np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(i8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.int64]]) +assert_type(i8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.int64]]) + +assert_type(f8[None], np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8[None1], np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8[None2], np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(f8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.float64]]) +assert_type(f8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.float64]]) + +assert_type(c8[None], np.ndarray[tuple[int], np.dtype[np.complex64]]) +assert_type(c8[None1], np.ndarray[tuple[int], np.dtype[np.complex64]]) +assert_type(c8[None2], np.ndarray[tuple[int, int], np.dtype[np.complex64]]) +assert_type(c8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.complex64]]) +assert_type(c8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.complex64]]) + +assert_type(c16[None], np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(c16[None1], np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(c16[None2], np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(c16[None3], np.ndarray[tuple[int, int, int], np.dtype[np.complex128]]) +assert_type(c16[None4], np.ndarray[tuple[Any, ...], np.dtype[np.complex128]]) + +assert_type(U[None], np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(U[None1], np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(U[None2], np.ndarray[tuple[int, int], np.dtype[np.str_]]) +assert_type(U[None3], np.ndarray[tuple[int, int, int], np.dtype[np.str_]]) +assert_type(U[None4], np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) + +assert_type(S[None], np.ndarray[tuple[int], np.dtype[np.bytes_]]) +assert_type(S[None1], np.ndarray[tuple[int], np.dtype[np.bytes_]]) +assert_type(S[None2], np.ndarray[tuple[int, int], np.dtype[np.bytes_]]) +assert_type(S[None3], np.ndarray[tuple[int, int, int], np.dtype[np.bytes_]]) +assert_type(S[None4], np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) + +assert_type(V[None], np.ndarray[tuple[int], np.dtype[np.void]]) +assert_type(V[None1], np.ndarray[tuple[int], np.dtype[np.void]]) +assert_type(V[None2], np.ndarray[tuple[int, int], np.dtype[np.void]]) +assert_type(V[None3], np.ndarray[tuple[int, int, int], np.dtype[np.void]]) +assert_type(V[None4], np.ndarray[tuple[Any, ...], np.dtype[np.void]]) assert_type(V[0], Any) assert_type(V["field1"], Any) assert_type(V[["field1", "field2"]], np.void) From 7ed9d07c75dcc0f29855b51a844a7ff3a0aa81e8 Mon Sep 17 00:00:00 2001 From: Karan Singh Date: Mon, 1 Dec 2025 05:36:28 +0530 Subject: [PATCH 0933/1018] DOC: Fix typos and NumPy branding in 1.11.0 release notes (#30336) --- doc/source/release/1.11.0-notes.rst | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/doc/source/release/1.11.0-notes.rst b/doc/source/release/1.11.0-notes.rst index f6fe84a4b17f..4700e37203ce 100644 --- a/doc/source/release/1.11.0-notes.rst +++ b/doc/source/release/1.11.0-notes.rst @@ -27,11 +27,11 @@ Details of these improvements can be found below. Build System Changes ==================== -* Numpy now uses ``setuptools`` for its builds instead of plain distutils. +* NumPy now uses ``setuptools`` for its builds instead of plain distutils. This fixes usage of ``install_requires='numpy'`` in the ``setup.py`` files of - projects that depend on Numpy (see gh-6551). It potentially affects the way - that build/install methods for Numpy itself behave though. Please report any - unexpected behavior on the Numpy issue tracker. + projects that depend on NumPy (see gh-6551). It potentially affects the way + that build/install methods for NumPy itself behave though. Please report any + unexpected behavior on the NumPy issue tracker. * Bento build support and related files have been removed. * Single file build support and related files have been removed. @@ -39,7 +39,7 @@ Build System Changes Future Changes ============== -The following changes are scheduled for Numpy 1.12.0. +The following changes are scheduled for NumPy 1.12.0. * Support for Python 2.6, 3.2, and 3.3 will be dropped. * Relaxed stride checking will become the default. See the 1.8.0 release @@ -61,7 +61,7 @@ The following changes are scheduled for Numpy 1.12.0. In a future release the following changes will be made. * The ``rand`` function exposed in ``numpy.testing`` will be removed. That - function is left over from early Numpy and was implemented using the + function is left over from early NumPy and was implemented using the Python random module. The random number generators from ``numpy.random`` should be used instead. * The ``ndarray.view`` method will only allow c_contiguous arrays to be @@ -124,7 +124,7 @@ non-integers for degree specification. *np.dot* now raises ``TypeError`` instead of ``ValueError`` ----------------------------------------------------------- -This behaviour mimics that of other functions such as ``np.inner``. If the two +This behavior mimics that of other functions such as ``np.inner``. If the two arguments cannot be cast to a common type, it could have raised a ``TypeError`` or ``ValueError`` depending on their order. Now, ``np.dot`` will now always raise a ``TypeError``. @@ -194,7 +194,7 @@ New Features * ``f2py.compile`` has a new ``extension`` keyword parameter that allows the fortran extension to be specified for generated temp files. For instance, - the files can be specifies to be ``*.f90``. The ``verbose`` argument is + the files can be specified to be ``*.f90``. The ``verbose`` argument is also activated, it was previously ignored. * A ``dtype`` parameter has been added to ``np.random.randint`` @@ -254,7 +254,7 @@ Memory and speed improvements for masked arrays ----------------------------------------------- Creating a masked array with ``mask=True`` (resp. ``mask=False``) now uses ``np.ones`` (resp. ``np.zeros``) to create the mask, which is faster and -avoid a big memory peak. Another optimization was done to avoid a memory +avoids a big memory peak. Another optimization was done to avoid a memory peak and useless computations when printing a masked array. ``ndarray.tofile`` now uses fallocate on linux @@ -304,13 +304,13 @@ Instead, ``np.broadcast`` can be used in all cases. ``np.trace`` now respects array subclasses ------------------------------------------ -This behaviour mimics that of other functions such as ``np.diagonal`` and +This behavior mimics that of other functions such as ``np.diagonal`` and ensures, e.g., that for masked arrays ``np.trace(ma)`` and ``ma.trace()`` give the same result. ``np.dot`` now raises ``TypeError`` instead of ``ValueError`` ------------------------------------------------------------- -This behaviour mimics that of other functions such as ``np.inner``. If the two +This behavior is now consistent with other functions such as ``np.inner``. If the two arguments cannot be cast to a common type, it could have raised a ``TypeError`` or ``ValueError`` depending on their order. Now, ``np.dot`` will now always raise a ``TypeError``. From 50997e3c3b1f9331d3b03e3867c4896422278e52 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 1 Dec 2025 09:50:21 +0100 Subject: [PATCH 0934/1018] Revert "MAINT: undo change to `fromstring` text signature for 2.4.0" (#30236) --- numpy/_core/_add_newdocs.py | 4 +++- numpy/_core/tests/test_multiarray.py | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 6b937389defe..eded752b2721 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1482,9 +1482,11 @@ """) -# Signature can be updated for 2.5.0 release, see gh-30235 for details add_newdoc('numpy._core.multiarray', 'fromstring', """ + fromstring(string, dtype=None, count=-1, *, sep, like=None) + -- + fromstring(string, dtype=float, count=-1, *, sep, like=None) A new 1-D array initialized from text data in a string. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 218fcc79592e..aa2dee8d58bb 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10993,6 +10993,7 @@ def test_c_func_dispatcher_signature(self, func): (np.fromfile, ("file", "dtype", "count", "sep", "offset", "like")), (np.fromiter, ("iter", "dtype", "count", "like")), (np.frompyfunc, ("func", "nin", "nout", "kwargs")), + (np.fromstring, ("string", "dtype", "count", "sep", "like")), (np.nested_iters, ( "op", "axes", "flags", "op_flags", "op_dtypes", "order", "casting", "buffersize", From 0c6c15b468ca9201b1e74b6a2e84e71139cba181 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 12:32:55 -0700 Subject: [PATCH 0935/1018] MAINT: Bump int128/hide-comment-action from 1.47.0 to 1.48.0 (#30346) Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.47.0 to 1.48.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/3580fff2b9b7c0e16466686530622f0eed93132a...9cdf7fd49089308931b20966baee90f4aadb9f6e) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.48.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 9f4cda234717..13eda8c230b3 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@3580fff2b9b7c0e16466686530622f0eed93132a # v1.47.0 + uses: int128/hide-comment-action@9cdf7fd49089308931b20966baee90f4aadb9f6e # v1.48.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From 6b8ad75b19be63b9619fbf5c2290cfb970051222 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 12:33:26 -0700 Subject: [PATCH 0936/1018] MAINT: Bump github/codeql-action from 4.31.5 to 4.31.6 (#30345) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.5 to 4.31.6. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/fdbfb4d2750291e159f0156def62b853c2798ca2...fe4161a26a8629af62121b670040955b330f9af2) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index c758d05e43ab..cbeb3ca07349 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 + uses: github/codeql-action/init@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 + uses: github/codeql-action/autobuild@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 + uses: github/codeql-action/analyze@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index d8daf8779d92..4f9bf10985a5 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v2.1.27 + uses: github/codeql-action/upload-sarif@fe4161a26a8629af62121b670040955b330f9af2 # v2.1.27 with: sarif_file: results.sarif From fb0578d3973e926eee2e68ce9fd9996459b8534b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 1 Dec 2025 15:09:40 -0700 Subject: [PATCH 0937/1018] MAINT: Update circleci python to 3.12. (#30348) We could go to 3.14, but I would prefer to let it settle a bit longer. [skip actions] [skip cirrus] --- .circleci/config.yml | 4 ++-- doc/source/try_examples.json | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 62c29203dac1..36f0774131a5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.13 + - image: cimg/python:3.12.12 working_directory: ~/repo @@ -53,7 +53,7 @@ jobs: - run: name: build NumPy command: | - python3.11 -m venv venv + python3.12 -m venv venv . venv/bin/activate pip install --progress-bar=off -r requirements/test_requirements.txt \ -r requirements/build_requirements.txt \ diff --git a/doc/source/try_examples.json b/doc/source/try_examples.json index 510efcdd2694..305860dced55 100644 --- a/doc/source/try_examples.json +++ b/doc/source/try_examples.json @@ -6,3 +6,4 @@ "numpy.__array_namespace_info__.html*" ] } + From d0a14ac3c601b5d54efa5aa56a5008620ecebee0 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 1 Dec 2025 13:01:42 +0200 Subject: [PATCH 0938/1018] remove numpy.distutils --- .github/workflows/linux.yml | 2 +- doc/DISTUTILS.rst | 622 ---- doc/neps/scope.rst | 6 +- doc/source/building/distutils_equivalents.rst | 2 +- doc/source/conf.py | 10 +- doc/source/dev/depending_on_numpy.rst | 3 +- doc/source/f2py/buildtools/distutils.rst | 73 - doc/source/f2py/buildtools/index.rst | 2 +- doc/source/f2py/code/setup_example.py | 16 - doc/source/f2py/f2py.getting-started.rst | 9 +- doc/source/f2py/usage.rst | 77 +- doc/source/f2py/windows/index.rst | 6 - doc/source/reference/c-api/coremath.rst | 14 - doc/source/reference/distutils.rst | 219 -- doc/source/reference/distutils/misc_util.rst | 7 - doc/source/reference/distutils_guide.rst | 13 - .../reference/distutils_status_migration.rst | 38 +- doc/source/reference/index.rst | 2 - doc/source/reference/module_structure.rst | 2 - doc/source/try_examples.json | 1 - doc/source/user/absolute_beginners.rst | 2 +- doc/source/user/c-info.python-as-glue.rst | 8 +- meson_cpu/arm/meson.build | 16 +- meson_cpu/loongarch64/meson.build | 2 +- meson_cpu/ppc64/meson.build | 14 +- meson_cpu/riscv64/meson.build | 2 +- meson_cpu/s390x/meson.build | 6 +- meson_cpu/x86/meson.build | 4 +- numpy/__init__.py | 17 +- numpy/__init__.pyi | 2 - .../conv_template.py | 0 numpy/_build_utils/process_src_template.py | 4 +- numpy/_core/code_generators/genapi.py | 4 +- numpy/_core/code_generators/numpy_api.py | 2 +- .../src/_simd}/checks/cpu_asimd.c | 0 .../src/_simd}/checks/cpu_asimddp.c | 0 .../src/_simd}/checks/cpu_asimdfhm.c | 0 .../src/_simd}/checks/cpu_asimdhp.c | 0 .../src/_simd}/checks/cpu_avx.c | 0 .../src/_simd}/checks/cpu_avx2.c | 0 .../src/_simd}/checks/cpu_avx512_clx.c | 0 .../src/_simd}/checks/cpu_avx512_cnl.c | 0 .../src/_simd}/checks/cpu_avx512_icl.c | 0 .../src/_simd}/checks/cpu_avx512_knl.c | 0 .../src/_simd}/checks/cpu_avx512_knm.c | 0 .../src/_simd}/checks/cpu_avx512_skx.c | 0 .../src/_simd}/checks/cpu_avx512_spr.c | 0 .../src/_simd}/checks/cpu_avx512cd.c | 0 .../src/_simd}/checks/cpu_avx512f.c | 0 .../src/_simd}/checks/cpu_f16c.c | 0 .../src/_simd}/checks/cpu_fma3.c | 0 .../src/_simd}/checks/cpu_fma4.c | 0 .../src/_simd}/checks/cpu_lsx.c | 0 .../src/_simd}/checks/cpu_neon.c | 0 .../src/_simd}/checks/cpu_neon_fp16.c | 0 .../src/_simd}/checks/cpu_neon_vfpv4.c | 0 .../src/_simd}/checks/cpu_popcnt.c | 0 .../src/_simd}/checks/cpu_rvv.c | 0 .../src/_simd}/checks/cpu_sse.c | 0 .../src/_simd}/checks/cpu_sse2.c | 0 .../src/_simd}/checks/cpu_sse3.c | 0 .../src/_simd}/checks/cpu_sse41.c | 0 .../src/_simd}/checks/cpu_sse42.c | 0 .../src/_simd}/checks/cpu_ssse3.c | 0 .../src/_simd}/checks/cpu_sve.c | 0 .../src/_simd}/checks/cpu_vsx.c | 0 .../src/_simd}/checks/cpu_vsx2.c | 0 .../src/_simd}/checks/cpu_vsx3.c | 0 .../src/_simd}/checks/cpu_vsx4.c | 0 .../src/_simd}/checks/cpu_vx.c | 0 .../src/_simd}/checks/cpu_vxe.c | 0 .../src/_simd}/checks/cpu_vxe2.c | 0 .../src/_simd}/checks/cpu_xop.c | 0 .../src/_simd}/checks/extra_avx512bw_mask.c | 0 .../src/_simd}/checks/extra_avx512dq_mask.c | 0 .../src/_simd}/checks/extra_avx512f_reduce.c | 0 .../_simd}/checks/extra_vsx3_half_double.c | 0 .../src/_simd}/checks/extra_vsx4_mma.c | 0 .../src/_simd}/checks/extra_vsx_asm.c | 0 .../src/_simd}/checks/test_flags.c | 0 numpy/_pyinstaller/hook-numpy.py | 1 - numpy/_pytesttester.py | 8 - numpy/distutils/__init__.py | 64 - numpy/distutils/__init__.pyi | 4 - numpy/distutils/_shell_utils.py | 87 - numpy/distutils/armccompiler.py | 26 - numpy/distutils/ccompiler.py | 826 ----- numpy/distutils/ccompiler_opt.py | 2668 -------------- numpy/distutils/command/__init__.py | 41 - numpy/distutils/command/autodist.py | 148 - numpy/distutils/command/bdist_rpm.py | 22 - numpy/distutils/command/build.py | 62 - numpy/distutils/command/build_clib.py | 469 --- numpy/distutils/command/build_ext.py | 752 ---- numpy/distutils/command/build_py.py | 31 - numpy/distutils/command/build_scripts.py | 49 - numpy/distutils/command/build_src.py | 773 ---- numpy/distutils/command/config.py | 516 --- numpy/distutils/command/config_compiler.py | 126 - numpy/distutils/command/develop.py | 15 - numpy/distutils/command/egg_info.py | 25 - numpy/distutils/command/install.py | 79 - numpy/distutils/command/install_clib.py | 40 - numpy/distutils/command/install_data.py | 24 - numpy/distutils/command/install_headers.py | 25 - numpy/distutils/command/sdist.py | 27 - numpy/distutils/core.py | 215 -- numpy/distutils/cpuinfo.py | 683 ---- numpy/distutils/exec_command.py | 315 -- numpy/distutils/extension.py | 101 - numpy/distutils/fcompiler/__init__.py | 1035 ------ numpy/distutils/fcompiler/absoft.py | 158 - numpy/distutils/fcompiler/arm.py | 71 - numpy/distutils/fcompiler/compaq.py | 120 - numpy/distutils/fcompiler/environment.py | 88 - numpy/distutils/fcompiler/fujitsu.py | 46 - numpy/distutils/fcompiler/g95.py | 42 - numpy/distutils/fcompiler/gnu.py | 555 --- numpy/distutils/fcompiler/hpux.py | 41 - numpy/distutils/fcompiler/ibm.py | 97 - numpy/distutils/fcompiler/intel.py | 211 -- numpy/distutils/fcompiler/lahey.py | 45 - numpy/distutils/fcompiler/mips.py | 54 - numpy/distutils/fcompiler/nag.py | 87 - numpy/distutils/fcompiler/none.py | 28 - numpy/distutils/fcompiler/nv.py | 53 - numpy/distutils/fcompiler/pathf95.py | 33 - numpy/distutils/fcompiler/pg.py | 128 - numpy/distutils/fcompiler/sun.py | 51 - numpy/distutils/fcompiler/vast.py | 52 - numpy/distutils/from_template.py | 261 -- numpy/distutils/fujitsuccompiler.py | 28 - numpy/distutils/intelccompiler.py | 106 - numpy/distutils/lib2def.py | 116 - numpy/distutils/line_endings.py | 77 - numpy/distutils/log.py | 111 - numpy/distutils/mingw/gfortran_vs2003_hack.c | 6 - numpy/distutils/mingw32ccompiler.py | 620 ---- numpy/distutils/misc_util.py | 2484 ------------- numpy/distutils/msvc9compiler.py | 63 - numpy/distutils/msvccompiler.py | 76 - numpy/distutils/npy_pkg_config.py | 441 --- numpy/distutils/numpy_distribution.py | 17 - numpy/distutils/pathccompiler.py | 21 - numpy/distutils/system_info.py | 3267 ----------------- numpy/distutils/tests/__init__.py | 0 numpy/distutils/tests/test_build_ext.py | 74 - numpy/distutils/tests/test_ccompiler_opt.py | 808 ---- .../tests/test_ccompiler_opt_conf.py | 176 - numpy/distutils/tests/test_exec_command.py | 217 -- numpy/distutils/tests/test_fcompiler.py | 43 - numpy/distutils/tests/test_fcompiler_gnu.py | 55 - numpy/distutils/tests/test_fcompiler_intel.py | 30 - .../distutils/tests/test_fcompiler_nagfor.py | 22 - numpy/distutils/tests/test_from_template.py | 44 - numpy/distutils/tests/test_log.py | 34 - .../distutils/tests/test_mingw32ccompiler.py | 47 - numpy/distutils/tests/test_misc_util.py | 88 - numpy/distutils/tests/test_npy_pkg_config.py | 84 - numpy/distutils/tests/test_shell_utils.py | 79 - numpy/distutils/tests/test_system_info.py | 334 -- numpy/distutils/tests/utilities.py | 90 - numpy/distutils/unixccompiler.py | 141 - numpy/f2py/_backends/__init__.py | 3 - numpy/f2py/_backends/_distutils.py | 76 - numpy/f2py/diagnose.py | 59 +- numpy/f2py/f2py2e.py | 37 +- numpy/f2py/tests/test_f2py2e.py | 32 - numpy/testing/_private/utils.py | 6 +- numpy/tests/test_public_api.py | 84 +- numpy/typing/tests/test_isfile.py | 2 - pytest.ini | 5 +- ruff.toml | 1 - tools/check_installed_files.py | 5 - tools/stubtest/allowlist_py311.txt | 1 - tools/stubtest/allowlist_py312.txt | 2 - tools/stubtest/mypy.ini | 1 - 177 files changed, 89 insertions(+), 22709 deletions(-) delete mode 100644 doc/DISTUTILS.rst delete mode 100644 doc/source/f2py/code/setup_example.py delete mode 100644 doc/source/reference/distutils.rst delete mode 100644 doc/source/reference/distutils/misc_util.rst delete mode 100644 doc/source/reference/distutils_guide.rst rename numpy/{distutils => _build_utils}/conv_template.py (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_asimd.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_asimddp.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_asimdfhm.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_asimdhp.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx2.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_clx.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_cnl.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_icl.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_knl.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_knm.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_skx.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_spr.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512cd.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512f.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_f16c.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_fma3.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_fma4.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_lsx.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_neon.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_neon_fp16.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_neon_vfpv4.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_popcnt.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_rvv.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_sse.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_sse2.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_sse3.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_sse41.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_sse42.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_ssse3.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_sve.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vsx.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vsx2.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vsx3.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vsx4.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vx.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vxe.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vxe2.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_xop.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/extra_avx512bw_mask.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/extra_avx512dq_mask.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/extra_avx512f_reduce.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/extra_vsx3_half_double.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/extra_vsx4_mma.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/extra_vsx_asm.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/test_flags.c (100%) delete mode 100644 numpy/distutils/__init__.py delete mode 100644 numpy/distutils/__init__.pyi delete mode 100644 numpy/distutils/_shell_utils.py delete mode 100644 numpy/distutils/armccompiler.py delete mode 100644 numpy/distutils/ccompiler.py delete mode 100644 numpy/distutils/ccompiler_opt.py delete mode 100644 numpy/distutils/command/__init__.py delete mode 100644 numpy/distutils/command/autodist.py delete mode 100644 numpy/distutils/command/bdist_rpm.py delete mode 100644 numpy/distutils/command/build.py delete mode 100644 numpy/distutils/command/build_clib.py delete mode 100644 numpy/distutils/command/build_ext.py delete mode 100644 numpy/distutils/command/build_py.py delete mode 100644 numpy/distutils/command/build_scripts.py delete mode 100644 numpy/distutils/command/build_src.py delete mode 100644 numpy/distutils/command/config.py delete mode 100644 numpy/distutils/command/config_compiler.py delete mode 100644 numpy/distutils/command/develop.py delete mode 100644 numpy/distutils/command/egg_info.py delete mode 100644 numpy/distutils/command/install.py delete mode 100644 numpy/distutils/command/install_clib.py delete mode 100644 numpy/distutils/command/install_data.py delete mode 100644 numpy/distutils/command/install_headers.py delete mode 100644 numpy/distutils/command/sdist.py delete mode 100644 numpy/distutils/core.py delete mode 100644 numpy/distutils/cpuinfo.py delete mode 100644 numpy/distutils/exec_command.py delete mode 100644 numpy/distutils/extension.py delete mode 100644 numpy/distutils/fcompiler/__init__.py delete mode 100644 numpy/distutils/fcompiler/absoft.py delete mode 100644 numpy/distutils/fcompiler/arm.py delete mode 100644 numpy/distutils/fcompiler/compaq.py delete mode 100644 numpy/distutils/fcompiler/environment.py delete mode 100644 numpy/distutils/fcompiler/fujitsu.py delete mode 100644 numpy/distutils/fcompiler/g95.py delete mode 100644 numpy/distutils/fcompiler/gnu.py delete mode 100644 numpy/distutils/fcompiler/hpux.py delete mode 100644 numpy/distutils/fcompiler/ibm.py delete mode 100644 numpy/distutils/fcompiler/intel.py delete mode 100644 numpy/distutils/fcompiler/lahey.py delete mode 100644 numpy/distutils/fcompiler/mips.py delete mode 100644 numpy/distutils/fcompiler/nag.py delete mode 100644 numpy/distutils/fcompiler/none.py delete mode 100644 numpy/distutils/fcompiler/nv.py delete mode 100644 numpy/distutils/fcompiler/pathf95.py delete mode 100644 numpy/distutils/fcompiler/pg.py delete mode 100644 numpy/distutils/fcompiler/sun.py delete mode 100644 numpy/distutils/fcompiler/vast.py delete mode 100644 numpy/distutils/from_template.py delete mode 100644 numpy/distutils/fujitsuccompiler.py delete mode 100644 numpy/distutils/intelccompiler.py delete mode 100644 numpy/distutils/lib2def.py delete mode 100644 numpy/distutils/line_endings.py delete mode 100644 numpy/distutils/log.py delete mode 100644 numpy/distutils/mingw/gfortran_vs2003_hack.c delete mode 100644 numpy/distutils/mingw32ccompiler.py delete mode 100644 numpy/distutils/misc_util.py delete mode 100644 numpy/distutils/msvc9compiler.py delete mode 100644 numpy/distutils/msvccompiler.py delete mode 100644 numpy/distutils/npy_pkg_config.py delete mode 100644 numpy/distutils/numpy_distribution.py delete mode 100644 numpy/distutils/pathccompiler.py delete mode 100644 numpy/distutils/system_info.py delete mode 100644 numpy/distutils/tests/__init__.py delete mode 100644 numpy/distutils/tests/test_build_ext.py delete mode 100644 numpy/distutils/tests/test_ccompiler_opt.py delete mode 100644 numpy/distutils/tests/test_ccompiler_opt_conf.py delete mode 100644 numpy/distutils/tests/test_exec_command.py delete mode 100644 numpy/distutils/tests/test_fcompiler.py delete mode 100644 numpy/distutils/tests/test_fcompiler_gnu.py delete mode 100644 numpy/distutils/tests/test_fcompiler_intel.py delete mode 100644 numpy/distutils/tests/test_fcompiler_nagfor.py delete mode 100644 numpy/distutils/tests/test_from_template.py delete mode 100644 numpy/distutils/tests/test_log.py delete mode 100644 numpy/distutils/tests/test_mingw32ccompiler.py delete mode 100644 numpy/distutils/tests/test_misc_util.py delete mode 100644 numpy/distutils/tests/test_npy_pkg_config.py delete mode 100644 numpy/distutils/tests/test_shell_utils.py delete mode 100644 numpy/distutils/tests/test_system_info.py delete mode 100644 numpy/distutils/tests/utilities.py delete mode 100644 numpy/distutils/unixccompiler.py delete mode 100644 numpy/f2py/_backends/_distutils.py diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 9d1c3ac20a45..a23f1f7649d8 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -368,7 +368,7 @@ jobs: - name: Check for unreachable code paths in Python modules run: | # Need the explicit `bash -c` here because `grep` returns exit code 1 for no matches - bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/,vendored-meson/ | grep 'unreachable'" + bash -c "! vulture . --min-confidence 100 --exclude doc/,vendored-meson/ | grep 'unreachable'" - name: Check usage of install_tag run: | rm -rf build-install diff --git a/doc/DISTUTILS.rst b/doc/DISTUTILS.rst deleted file mode 100644 index 142c15a7124a..000000000000 --- a/doc/DISTUTILS.rst +++ /dev/null @@ -1,622 +0,0 @@ -.. -*- rest -*- - -NumPy distutils - users guide -============================= - -.. contents:: - -SciPy structure -''''''''''''''' - -Currently SciPy project consists of two packages: - -- NumPy --- it provides packages like: - - + numpy.distutils - extension to Python distutils - + numpy.f2py - a tool to bind Fortran/C codes to Python - + numpy._core - future replacement of Numeric and numarray packages - + numpy.lib - extra utility functions - + numpy.testing - numpy-style tools for unit testing - + etc - -- SciPy --- a collection of scientific tools for Python. - -The aim of this document is to describe how to add new tools to SciPy. - - -Requirements for SciPy packages -''''''''''''''''''''''''''''''' - -SciPy consists of Python packages, called SciPy packages, that are -available to Python users via the ``scipy`` namespace. Each SciPy package -may contain other SciPy packages. And so on. Therefore, the SciPy -directory tree is a tree of packages with arbitrary depth and width. -Any SciPy package may depend on NumPy packages but the dependence on other -SciPy packages should be kept minimal or zero. - -A SciPy package contains, in addition to its sources, the following -files and directories: - -+ ``setup.py`` --- building script -+ ``__init__.py`` --- package initializer -+ ``tests/`` --- directory of unittests - -Their contents are described below. - -The ``setup.py`` file -''''''''''''''''''''' - -In order to add a Python package to SciPy, its build script (``setup.py``) -must meet certain requirements. The most important requirement is that the -package define a ``configuration(parent_package='',top_path=None)`` function -which returns a dictionary suitable for passing to -``numpy.distutils.core.setup(..)``. To simplify the construction of -this dictionary, ``numpy.distutils.misc_util`` provides the -``Configuration`` class, described below. - -SciPy pure Python package example ---------------------------------- - -Below is an example of a minimal ``setup.py`` file for a pure SciPy package:: - - #!/usr/bin/env python3 - def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('mypackage',parent_package,top_path) - return config - - if __name__ == "__main__": - from numpy.distutils.core import setup - #setup(**configuration(top_path='').todict()) - setup(configuration=configuration) - -The arguments of the ``configuration`` function specify the name of -parent SciPy package (``parent_package``) and the directory location -of the main ``setup.py`` script (``top_path``). These arguments, -along with the name of the current package, should be passed to the -``Configuration`` constructor. - -The ``Configuration`` constructor has a fourth optional argument, -``package_path``, that can be used when package files are located in -a different location than the directory of the ``setup.py`` file. - -Remaining ``Configuration`` arguments are all keyword arguments that will -be used to initialize attributes of ``Configuration`` -instance. Usually, these keywords are the same as the ones that -``setup(..)`` function would expect, for example, ``packages``, -``ext_modules``, ``data_files``, ``include_dirs``, ``libraries``, -``headers``, ``scripts``, ``package_dir``, etc. However, the direct -specification of these keywords is not recommended as the content of -these keyword arguments will not be processed or checked for the -consistency of SciPy building system. - -Finally, ``Configuration`` has ``.todict()`` method that returns all -the configuration data as a dictionary suitable for passing on to the -``setup(..)`` function. - -``Configuration`` instance attributes -------------------------------------- - -In addition to attributes that can be specified via keyword arguments -to ``Configuration`` constructor, ``Configuration`` instance (let us -denote as ``config``) has the following attributes that can be useful -in writing setup scripts: - -+ ``config.name`` - full name of the current package. The names of parent - packages can be extracted as ``config.name.split('.')``. - -+ ``config.local_path`` - path to the location of current ``setup.py`` file. - -+ ``config.top_path`` - path to the location of main ``setup.py`` file. - -``Configuration`` instance methods ----------------------------------- - -+ ``config.todict()`` --- returns configuration dictionary suitable for - passing to ``numpy.distutils.core.setup(..)`` function. - -+ ``config.paths(*paths) --- applies ``glob.glob(..)`` to items of - ``paths`` if necessary. Fixes ``paths`` item that is relative to - ``config.local_path``. - -+ ``config.get_subpackage(subpackage_name,subpackage_path=None)`` --- - returns a list of subpackage configurations. Subpackage is looked in the - current directory under the name ``subpackage_name`` but the path - can be specified also via optional ``subpackage_path`` argument. - If ``subpackage_name`` is specified as ``None`` then the subpackage - name will be taken the basename of ``subpackage_path``. - Any ``*`` used for subpackage names are expanded as wildcards. - -+ ``config.add_subpackage(subpackage_name,subpackage_path=None)`` --- - add SciPy subpackage configuration to the current one. The meaning - and usage of arguments is explained above, see - ``config.get_subpackage()`` method. - -+ ``config.add_data_files(*files)`` --- prepend ``files`` to ``data_files`` - list. If ``files`` item is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data - files. By default data files are copied under package installation - directory. For example, - - :: - - config.add_data_files('foo.dat', - ('fun',['gun.dat','nun/pun.dat','/tmp/sun.dat']), - 'bar/car.dat'. - '/full/path/to/can.dat', - ) - - will install data files to the following locations - - :: - - / - foo.dat - fun/ - gun.dat - pun.dat - sun.dat - bar/ - car.dat - can.dat - - Path to data files can be a function taking no arguments and - returning path(s) to data files -- this is a useful when data files - are generated while building the package. (XXX: explain the step - when this function are called exactly) - -+ ``config.add_data_dir(data_path)`` --- add directory ``data_path`` - recursively to ``data_files``. The whole directory tree starting at - ``data_path`` will be copied under package installation directory. - If ``data_path`` is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data directory. - By default, data directory are copied under package installation - directory under the basename of ``data_path``. For example, - - :: - - config.add_data_dir('fun') # fun/ contains foo.dat bar/car.dat - config.add_data_dir(('sun','fun')) - config.add_data_dir(('gun','/full/path/to/fun')) - - will install data files to the following locations - - :: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - bar/ - car.dat - -+ ``config.add_include_dirs(*paths)`` --- prepend ``paths`` to - ``include_dirs`` list. This list will be visible to all extension - modules of the current package. - -+ ``config.add_headers(*files)`` --- prepend ``files`` to ``headers`` - list. By default, headers will be installed under - ``/include/pythonX.X//`` - directory. If ``files`` item is a tuple then it's first argument - specifies the installation suffix relative to - ``/include/pythonX.X/`` path. This is a Python distutils - method; its use is discouraged for NumPy and SciPy in favour of - ``config.add_data_files(*files)``. - -+ ``config.add_scripts(*files)`` --- prepend ``files`` to ``scripts`` - list. Scripts will be installed under ``/bin/`` directory. - -+ ``config.add_extension(name,sources,**kw)`` --- create and add an - ``Extension`` instance to ``ext_modules`` list. The first argument - ``name`` defines the name of the extension module that will be - installed under ``config.name`` package. The second argument is - a list of sources. ``add_extension`` method takes also keyword - arguments that are passed on to the ``Extension`` constructor. - The list of allowed keywords is the following: ``include_dirs``, - ``define_macros``, ``undef_macros``, ``library_dirs``, ``libraries``, - ``runtime_library_dirs``, ``extra_objects``, ``extra_compile_args``, - ``extra_link_args``, ``export_symbols``, ``swig_opts``, ``depends``, - ``language``, ``f2py_options``, ``module_dirs``, ``extra_info``, - ``extra_f77_compile_args``, ``extra_f90_compile_args``. - - Note that ``config.paths`` method is applied to all lists that - may contain paths. ``extra_info`` is a dictionary or a list - of dictionaries that content will be appended to keyword arguments. - The list ``depends`` contains paths to files or directories - that the sources of the extension module depend on. If any path - in the ``depends`` list is newer than the extension module, then - the module will be rebuilt. - - The list of sources may contain functions ('source generators') - with a pattern ``def (ext, build_dir): return - ``. If ``funcname`` returns ``None``, no sources - are generated. And if the ``Extension`` instance has no sources - after processing all source generators, no extension module will - be built. This is the recommended way to conditionally define - extension modules. Source generator functions are called by the - ``build_src`` sub-command of ``numpy.distutils``. - - For example, here is a typical source generator function:: - - def generate_source(ext,build_dir): - import os - from distutils.dep_util import newer - target = os.path.join(build_dir,'somesource.c') - if newer(target,__file__): - # create target file - return target - - The first argument contains the Extension instance that can be - useful to access its attributes like ``depends``, ``sources``, - etc. lists and modify them during the building process. - The second argument gives a path to a build directory that must - be used when creating files to a disk. - -+ ``config.add_library(name, sources, **build_info)`` --- add a - library to ``libraries`` list. Allowed keywords arguments are - ``depends``, ``macros``, ``include_dirs``, ``extra_compiler_args``, - ``f2py_options``, ``extra_f77_compile_args``, - ``extra_f90_compile_args``. See ``.add_extension()`` method for - more information on arguments. - -+ ``config.have_f77c()`` --- return True if Fortran 77 compiler is - available (read: a simple Fortran 77 code compiled successfully). - -+ ``config.have_f90c()`` --- return True if Fortran 90 compiler is - available (read: a simple Fortran 90 code compiled successfully). - -+ ``config.get_version()`` --- return version string of the current package, - ``None`` if version information could not be detected. This methods - scans files ``__version__.py``, ``_version.py``, - ``version.py``, ``__svn_version__.py`` for string variables - ``version``, ``__version__``, ``_version``. - -+ ``config.make_svn_version_py()`` --- appends a data function to - ``data_files`` list that will generate ``__svn_version__.py`` file - to the current package directory. The file will be removed from - the source directory when Python exits. - -+ ``config.get_build_temp_dir()`` --- return a path to a temporary - directory. This is the place where one should build temporary - files. - -+ ``config.get_distribution()`` --- return distutils ``Distribution`` - instance. - -+ ``config.get_config_cmd()`` --- returns ``numpy.distutils`` config - command instance. - -+ ``config.get_info(*names)`` --- - - -.. _templating: - -Conversion of ``.src`` files using templates --------------------------------------------- - -NumPy distutils supports automatic conversion of source files named -.src. This facility can be used to maintain very similar -code blocks requiring only simple changes between blocks. During the -build phase of setup, if a template file named .src is -encountered, a new file named is constructed from the -template and placed in the build directory to be used instead. Two -forms of template conversion are supported. The first form occurs for -files named .ext.src where ext is a recognized Fortran -extension (f, f90, f95, f77, for, ftn, pyf). The second form is used -for all other cases. - -.. index:: - single: code generation - -Fortran files -------------- - -This template converter will replicate all **function** and -**subroutine** blocks in the file with names that contain '<...>' -according to the rules in '<...>'. The number of comma-separated words -in '<...>' determines the number of times the block is repeated. What -these words are indicates what that repeat rule, '<...>', should be -replaced with in each block. All of the repeat rules in a block must -contain the same number of comma-separated words indicating the number -of times that block should be repeated. If the word in the repeat rule -needs a comma, leftarrow, or rightarrow, then prepend it with a -backslash ' \'. If a word in the repeat rule matches ' \\' then -it will be replaced with the -th word in the same repeat -specification. There are two forms for the repeat rule: named and -short. - -Named repeat rule -^^^^^^^^^^^^^^^^^ - -A named repeat rule is useful when the same set of repeats must be -used several times in a block. It is specified using , where N is the number of times the block -should be repeated. On each repeat of the block, the entire -expression, '<...>' will be replaced first with item1, and then with -item2, and so forth until N repeats are accomplished. Once a named -repeat specification has been introduced, the same repeat rule may be -used **in the current block** by referring only to the name -(i.e. ). - - -Short repeat rule -^^^^^^^^^^^^^^^^^ - -A short repeat rule looks like . The -rule specifies that the entire expression, '<...>' should be replaced -first with item1, and then with item2, and so forth until N repeats -are accomplished. - - -Pre-defined names -^^^^^^^^^^^^^^^^^ - -The following predefined named repeat rules are available: - -- - -- <_c=s,d,c,z> - -- <_t=real, double precision, complex, double complex> - -- - -- - -- - -- - - -Other files ------------- - -Non-Fortran files use a separate syntax for defining template blocks -that should be repeated using a variable expansion similar to the -named repeat rules of the Fortran-specific repeats. - -NumPy Distutils preprocesses C source files (extension: :file:`.c.src`) written -in a custom templating language to generate C code. The ``@`` symbol is -used to wrap macro-style variables to empower a string substitution mechanism -that might describe (for instance) a set of data types. - -The template language blocks are delimited by ``/**begin repeat`` -and ``/**end repeat**/`` lines, which may also be nested using -consecutively numbered delimiting lines such as ``/**begin repeat1`` -and ``/**end repeat1**/``: - -1. ``/**begin repeat`` on a line by itself marks the beginning of - a segment that should be repeated. - -2. Named variable expansions are defined using ``#name=item1, item2, item3, - ..., itemN#`` and placed on successive lines. These variables are - replaced in each repeat block with corresponding word. All named - variables in the same repeat block must define the same number of - words. - -3. In specifying the repeat rule for a named variable, ``item*N`` is short- - hand for ``item, item, ..., item`` repeated N times. In addition, - parenthesis in combination with ``*N`` can be used for grouping several - items that should be repeated. Thus, ``#name=(item1, item2)*4#`` is - equivalent to ``#name=item1, item2, item1, item2, item1, item2, item1, - item2#``. - -4. ``*/`` on a line by itself marks the end of the variable expansion - naming. The next line is the first line that will be repeated using - the named rules. - -5. Inside the block to be repeated, the variables that should be expanded - are specified as ``@name@``. - -6. ``/**end repeat**/`` on a line by itself marks the previous line - as the last line of the block to be repeated. - -7. A loop in the NumPy C source code may have a ``@TYPE@`` variable, targeted - for string substitution, which is preprocessed to a number of otherwise - identical loops with several strings such as ``INT``, ``LONG``, ``UINT``, - ``ULONG``. The ``@TYPE@`` style syntax thus reduces code duplication and - maintenance burden by mimicking languages that have generic type support. - -The above rules may be clearer in the following template source example: - -.. code-block:: NumPyC - :linenos: - :emphasize-lines: 3, 13, 29, 31 - - /* TIMEDELTA to non-float types */ - - /**begin repeat - * - * #TOTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, DATETIME, - * TIMEDELTA# - * #totype = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, - * npy_long, npy_ulong, npy_longlong, npy_ulonglong, - * npy_datetime, npy_timedelta# - */ - - /**begin repeat1 - * - * #FROMTYPE = TIMEDELTA# - * #fromtype = npy_timedelta# - */ - static void - @FROMTYPE@_to_@TOTYPE@(void *input, void *output, npy_intp n, - void *NPY_UNUSED(aip), void *NPY_UNUSED(aop)) - { - const @fromtype@ *ip = input; - @totype@ *op = output; - - while (n--) { - *op++ = (@totype@)*ip++; - } - } - /**end repeat1**/ - - /**end repeat**/ - -The preprocessing of generically-typed C source files (whether in NumPy -proper or in any third party package using NumPy Distutils) is performed -by `conv_template.py`_. -The type-specific C files generated (extension: ``.c``) -by these modules during the build process are ready to be compiled. This -form of generic typing is also supported for C header files (preprocessed -to produce ``.h`` files). - -.. _conv_template.py: https://github.com/numpy/numpy/blob/main/numpy/distutils/conv_template.py - -Useful functions in ``numpy.distutils.misc_util`` -------------------------------------------------- - -+ ``get_numpy_include_dirs()`` --- return a list of NumPy base - include directories. NumPy base include directories contain - header files such as ``numpy/arrayobject.h``, ``numpy/funcobject.h`` - etc. For installed NumPy the returned list has length 1 - but when building NumPy the list may contain more directories, - for example, a path to ``config.h`` file that - ``numpy/base/setup.py`` file generates and is used by ``numpy`` - header files. - -+ ``append_path(prefix,path)`` --- smart append ``path`` to ``prefix``. - -+ ``gpaths(paths, local_path='')`` --- apply glob to paths and prepend - ``local_path`` if needed. - -+ ``njoin(*path)`` --- join pathname components + convert ``/``-separated path - to ``os.sep``-separated path and resolve ``..``, ``.`` from paths. - Ex. ``njoin('a',['b','./c'],'..','g') -> os.path.join('a','b','g')``. - -+ ``minrelpath(path)`` --- resolves dots in ``path``. - -+ ``rel_path(path, parent_path)`` --- return ``path`` relative to ``parent_path``. - -+ ``def get_cmd(cmdname,_cache={})`` --- returns ``numpy.distutils`` - command instance. - -+ ``all_strings(lst)`` - -+ ``has_f_sources(sources)`` - -+ ``has_cxx_sources(sources)`` - -+ ``filter_sources(sources)`` --- return ``c_sources, cxx_sources, - f_sources, fmodule_sources`` - -+ ``get_dependencies(sources)`` - -+ ``is_local_src_dir(directory)`` - -+ ``get_ext_source_files(ext)`` - -+ ``get_script_files(scripts)`` - -+ ``get_lib_source_files(lib)`` - -+ ``get_data_files(data)`` - -+ ``dot_join(*args)`` --- join non-zero arguments with a dot. - -+ ``get_frame(level=0)`` --- return frame object from call stack with given level. - -+ ``cyg2win32(path)`` - -+ ``mingw32()`` --- return ``True`` when using mingw32 environment. - -+ ``terminal_has_colors()``, ``red_text(s)``, ``green_text(s)``, - ``yellow_text(s)``, ``blue_text(s)``, ``cyan_text(s)`` - -+ ``get_path(mod_name,parent_path=None)`` --- return path of a module - relative to parent_path when given. Handles also ``__main__`` and - ``__builtin__`` modules. - -+ ``allpath(name)`` --- replaces ``/`` with ``os.sep`` in ``name``. - -+ ``cxx_ext_match``, ``fortran_ext_match``, ``f90_ext_match``, - ``f90_module_name_match`` - -``numpy.distutils.system_info`` module --------------------------------------- - -+ ``get_info(name,notfound_action=0)`` -+ ``combine_paths(*args,**kws)`` -+ ``show_all()`` - -``numpy.distutils.cpuinfo`` module ----------------------------------- - -+ ``cpuinfo`` - -``numpy.distutils.log`` module ------------------------------- - -+ ``set_verbosity(v)`` - - -``numpy.distutils.exec_command`` module ---------------------------------------- - -+ ``get_pythonexe()`` -+ ``find_executable(exe, path=None)`` -+ ``exec_command( command, execute_in='', use_shell=None, use_tee=None, **env )`` - -The ``__init__.py`` file -'''''''''''''''''''''''' - -The header of a typical SciPy ``__init__.py`` is:: - - """ - Package docstring, typically with a brief description and function listing. - """ - - # import functions into module namespace - from .subpackage import * - ... - - __all__ = [s for s in dir() if not s.startswith('_')] - - from numpy.testing import Tester - test = Tester().test - bench = Tester().bench - -Extra features in NumPy Distutils -''''''''''''''''''''''''''''''''' - -Specifying config_fc options for libraries in setup.py script -------------------------------------------------------------- - -It is possible to specify config_fc options in setup.py scripts. -For example, using:: - - config.add_library('library', - sources=[...], - config_fc={'noopt':(__file__,1)}) - -will compile the ``library`` sources without optimization flags. - -It's recommended to specify only those config_fc options in such a way -that are compiler independent. - -Getting extra Fortran 77 compiler options from source ------------------------------------------------------ - -Some old Fortran codes need special compiler options in order to -work correctly. In order to specify compiler options per source -file, ``numpy.distutils`` Fortran compiler looks for the following -pattern:: - - CF77FLAGS() = - -in the first 20 lines of the source and use the ``f77flags`` for -specified type of the fcompiler (the first character ``C`` is optional). - -TODO: This feature can be easily extended for Fortran 90 codes as -well. Let us know if you would need such a feature. diff --git a/doc/neps/scope.rst b/doc/neps/scope.rst index 93887c4b12ff..1d5722700a79 100644 --- a/doc/neps/scope.rst +++ b/doc/neps/scope.rst @@ -36,10 +36,10 @@ Here, we describe aspects of N-d array computation that are within scope for Num - NumPy provides some **infrastructure for other packages in the scientific Python ecosystem**: - - numpy.distutils (build support for C++, Fortran, BLAS/LAPACK, and other - relevant libraries for scientific computing) + - numpy.distutils (deprecated and removed, build support for C++, Fortran, + BLAS/LAPACK, and other relevant libraries for scientific computing) - f2py (generating bindings for Fortran code) - - testing utilities + - testing utilities (mostly deprecated, pytest does a good job) - **Speed**: we take performance concerns seriously and aim to execute operations on large arrays with similar performance as native C diff --git a/doc/source/building/distutils_equivalents.rst b/doc/source/building/distutils_equivalents.rst index 156174d02358..65821bfec9d9 100644 --- a/doc/source/building/distutils_equivalents.rst +++ b/doc/source/building/distutils_equivalents.rst @@ -3,7 +3,7 @@ Meson and ``distutils`` ways of doing things -------------------------------------------- -*Old workflows (numpy.distutils based):* +*Old workflows (numpy.distutils based, no longer relevant):* 1. ``python runtests.py`` 2. ``python setup.py build_ext -i`` + ``export diff --git a/doc/source/conf.py b/doc/source/conf.py index af431db44351..e26719b05cb0 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -154,21 +154,13 @@ class PyTypeObject(ctypes.Structure): nitpick_ignore = [] if sys.version_info[:2] >= (3, 12): - exclude_patterns += [ - "reference/distutils.rst", - "reference/distutils/misc_util.rst", - ] suppress_warnings += [ 'toc.excluded', # Suppress warnings about excluded toctree entries ] nitpicky = True nitpick_ignore += [ - ('ref', 'numpy-distutils-refguide'), - # The first ignore is not catpured without nitpicky = True. - # These three ignores are required once nitpicky = True is set. - ('py:mod', 'numpy.distutils'), + # The first ignore is not captured without nitpicky = True. ('py:class', 'Extension'), - ('py:class', 'numpy.distutils.misc_util.Configuration'), ] # If true, '()' will be appended to :func: etc. cross-reference text. diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index 7583dc9af84a..fcdae806fad3 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -142,8 +142,7 @@ recommend all packages depending on NumPy to follow the recommendations in NEP 29. For *run-time dependencies*, specify version bounds using -``install_requires`` in ``setup.py`` (assuming you use ``numpy.distutils`` or -``setuptools`` to build). +``install_requires`` in ``setup.py`` (assuming you use ``setuptools`` to build). Most libraries that rely on NumPy will not need to set an upper version bound: NumPy is careful to preserve backward-compatibility. diff --git a/doc/source/f2py/buildtools/distutils.rst b/doc/source/f2py/buildtools/distutils.rst index 87e17a811cd0..98416c8cdef9 100644 --- a/doc/source/f2py/buildtools/distutils.rst +++ b/doc/source/f2py/buildtools/distutils.rst @@ -9,76 +9,3 @@ Using via `numpy.distutils` ``distutils`` has been removed in favor of ``meson`` see :ref:`distutils-status-migration`. - -.. currentmodule:: numpy.distutils.core - -:mod:`numpy.distutils` is part of NumPy, and extends the standard Python -``distutils`` module to deal with Fortran sources and F2PY signature files, e.g. -compile Fortran sources, call F2PY to construct extension modules, etc. - -.. topic:: Example - - Consider the following ``setup_file.py`` for the ``fib`` and ``scalar`` - examples from :ref:`f2py-getting-started` section: - - .. literalinclude:: ./../code/setup_example.py - :language: python - - Running - - .. code-block:: bash - - python setup_example.py build - - will build two extension modules ``scalar`` and ``fib2`` to the - build directory. - -Extensions to ``distutils`` -=========================== - -:mod:`numpy.distutils` extends ``distutils`` with the following features: - -* :class:`Extension` class argument ``sources`` may contain Fortran source - files. In addition, the list ``sources`` may contain at most one - F2PY signature file, and in this case, the name of an Extension module must - match with the ```` used in signature file. It is - assumed that an F2PY signature file contains exactly one ``python - module`` block. - - If ``sources`` do not contain a signature file, then F2PY is used to scan - Fortran source files to construct wrappers to the Fortran codes. - - Additional options to the F2PY executable can be given using the - :class:`Extension` class argument ``f2py_options``. - -* The following new ``distutils`` commands are defined: - - ``build_src`` - to construct Fortran wrapper extension modules, among many other things. - ``config_fc`` - to change Fortran compiler options. - - Additionally, the ``build_ext`` and ``build_clib`` commands are also enhanced - to support Fortran sources. - - Run - - .. code-block:: bash - - python config_fc build_src build_ext --help - - to see available options for these commands. - -* When building Python packages containing Fortran sources, one - can choose different Fortran compilers by using the ``build_ext`` - command option ``--fcompiler=``. Here ```` can be one of the - following names (on ``linux`` systems):: - - absoft compaq fujitsu g95 gnu gnu95 intel intele intelem lahey nag nagfor nv pathf95 pg vast - - See ``numpy_distutils/fcompiler.py`` for an up-to-date list of - supported compilers for different platforms, or run - - .. code-block:: bash - - python -m numpy.f2py -c --backend distutils --help-fcompiler diff --git a/doc/source/f2py/buildtools/index.rst b/doc/source/f2py/buildtools/index.rst index 37782e5ca74b..d5f3876e6dd5 100644 --- a/doc/source/f2py/buildtools/index.rst +++ b/doc/source/f2py/buildtools/index.rst @@ -11,7 +11,7 @@ with ``f2py``. The default build system for ``f2py`` has traditionally been through the enhanced ``numpy.distutils`` module. This module is based on ``distutils`` - which was removed in ``Python 3.12.0`` in **October 2023**. Like the rest of + which was removed in ``Python 3.12.0`` in **October 2025**. Like the rest of NumPy and SciPy, ``f2py`` uses ``meson`` now, see :ref:`distutils-status-migration` for some more details. diff --git a/doc/source/f2py/code/setup_example.py b/doc/source/f2py/code/setup_example.py deleted file mode 100644 index ef79ad1ecfb6..000000000000 --- a/doc/source/f2py/code/setup_example.py +++ /dev/null @@ -1,16 +0,0 @@ -from numpy.distutils.core import Extension - -ext1 = Extension(name='scalar', - sources=['scalar.f']) -ext2 = Extension(name='fib2', - sources=['fib2.pyf', 'fib1.f']) - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(name='f2py_example', - description="F2PY Users Guide examples", - author="Pearu Peterson", - author_email="pearu@cens.ioc.ee", - ext_modules=[ext1, ext2] - ) -# End of setup_example.py diff --git a/doc/source/f2py/f2py.getting-started.rst b/doc/source/f2py/f2py.getting-started.rst index e5746c49e94d..b6951b11da8d 100644 --- a/doc/source/f2py/f2py.getting-started.rst +++ b/doc/source/f2py/f2py.getting-started.rst @@ -22,15 +22,12 @@ following steps: * F2PY compiles all sources and builds an extension module containing the wrappers. - * In building the extension modules, F2PY uses ``meson`` and used to use - ``numpy.distutils`` For different build systems, see :ref:`f2py-bldsys`. + * In building the extension modules, F2PY uses ``meson``. For different + build systems, see :ref:`f2py-bldsys`. .. note:: - See :ref:`f2py-meson-distutils` for migration information. - - * Depending on your operating system, you may need to install the Python development headers (which provide the file ``Python.h``) separately. In Linux Debian-based distributions this package should be called ``python3-dev``, @@ -224,7 +221,7 @@ Fortran code, we can apply the wrapping steps one by one. .. literalinclude:: ./code/fib2.pyf :language: fortran -* Finally, we build the extension module with ``numpy.distutils`` by running: +* Finally, we build the extension module by running: :: diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index efcf2bec5266..ec936bb72e1c 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -101,10 +101,6 @@ Here ```` may also contain signature files. Among other options and ``;`` on Windows. In ``CMake`` this corresponds to using ``$``. -``--help-link []`` - List system resources found by ``numpy_distutils/system_info.py``. For - example, try ``f2py --help-link lapack_opt``. - 3. Building a module ~~~~~~~~~~~~~~~~~~~~ @@ -127,7 +123,7 @@ module is constructed by scanning all Fortran source codes for routine signatures, before proceeding to build the extension module. .. warning:: - From Python 3.12 onwards, ``distutils`` has been removed. Use environment + ``distutils`` has been removed. Use environment variables or native files to interact with ``meson`` instead. See its `FAQ `__ for more information. @@ -135,17 +131,13 @@ Among other options (see below) and options described for previous modes, the fo .. note:: - .. versionchanged:: 1.26.0 - There are now two separate build backends which can be used, ``distutils`` - and ``meson``. Users are **strongly** recommended to switch to ``meson`` - since it is the default above Python ``3.12``. + .. versionchanged:: 2.5.0 + The ``distutils`` backend has been removed. Common build flags: ``--backend `` - Specify the build backend for the compilation process. The supported backends - are ``meson`` and ``distutils``. If not specified, defaults to ``distutils``. - On Python 3.12 or higher, the default is ``meson``. + Legacy option, only ``meson`` is supported. ``--f77flags=`` Specify F77 compiler flags ``--f90flags=`` @@ -165,39 +157,13 @@ Common build flags: Add directory ```` to the list of directories to be searched for ``-l``. -The ``meson`` specific flags are: - -``--dep `` **meson only** +``--dep `` Specify a meson dependency for the module. This may be passed multiple times for multiple dependencies. Dependencies are stored in a list for further processing. Example: ``--dep lapack --dep scalapack`` This will identify "lapack" and "scalapack" as dependencies and remove them from argv, leaving a dependencies list containing ["lapack", "scalapack"]. -The older ``distutils`` flags are: - -``--help-fcompiler`` **no meson** - List the available Fortran compilers. -``--fcompiler=`` **no meson** - Specify a Fortran compiler type by vendor. -``--f77exec=`` **no meson** - Specify the path to a F77 compiler -``--f90exec=`` **no meson** - Specify the path to a F90 compiler -``--opt=`` **no meson** - Specify optimization flags -``--arch=`` **no meson** - Specify architecture specific optimization flags -``--noopt`` **no meson** - Compile without optimization flags -``--noarch`` **no meson** - Compile without arch-dependent optimization flags -``link-`` **no meson** - Link the extension module with as defined by - ``numpy_distutils/system_info.py``. E.g. to link with optimized LAPACK - libraries (vecLib on MacOSX, ATLAS elsewhere), use ``--link-lapack_opt``. - See also ``--help-link`` switch. - .. note:: The ``f2py -c`` option must be applied either to an existing ``.pyf`` file @@ -295,39 +261,6 @@ When using ``numpy.f2py`` as a module, the following functions can be invoked. .. automodule:: numpy.f2py :members: -Automatic extension module generation -===================================== - -If you want to distribute your f2py extension module, then you only -need to include the .pyf file and the Fortran code. The distutils -extensions in NumPy allow you to define an extension module entirely -in terms of this interface file. A valid ``setup.py`` file allowing -distribution of the ``add.f`` module (as part of the package -``f2py_examples`` so that it would be loaded as ``f2py_examples.add``) is: - -.. code-block:: python - - def configuration(parent_package='', top_path=None) - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_examples',parent_package, top_path) - config.add_extension('add', sources=['add.pyf','add.f']) - return config - - if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) - -Installation of the new package is easy using:: - - pip install . - -assuming you have the proper permissions to write to the main site- -packages directory for the version of Python you are using. For the -resulting package to work, you need to create a file named ``__init__.py`` -(in the same directory as ``add.pyf``). Notice the extension module is -defined entirely in terms of the ``add.pyf`` and ``add.f`` files. The -conversion of the .pyf file to a .c file is handled by `numpy.distutils`. - Building with Meson (Examples) ============================== diff --git a/doc/source/f2py/windows/index.rst b/doc/source/f2py/windows/index.rst index ea0af7505ce7..aa7851da5dd2 100644 --- a/doc/source/f2py/windows/index.rst +++ b/doc/source/f2py/windows/index.rst @@ -71,12 +71,6 @@ Cygwin (FOSS) Cygwin is meant to compile UNIX software on Windows, instead of building native Windows programs. This means cross compilation is required. -The compilation suites described so far are compatible with the `now -deprecated`_ ``np.distutils`` build backend which is exposed by the F2PY CLI. -Additional build system usage (``meson``, ``cmake``) as described in -:ref:`f2py-bldsys` allows for a more flexible set of compiler -backends including: - Intel oneAPI The newer Intel compilers (``ifx``, ``icx``) are based on LLVM and can be used for native compilation. Licensing requirements can be onerous. diff --git a/doc/source/reference/c-api/coremath.rst b/doc/source/reference/c-api/coremath.rst index cc46ba744a49..b2e3af4c0944 100644 --- a/doc/source/reference/c-api/coremath.rst +++ b/doc/source/reference/c-api/coremath.rst @@ -317,20 +317,6 @@ The generic steps to take are: machine. Otherwise you pick up a static library built for the wrong architecture. -When you build with ``numpy.distutils`` (deprecated), then use this in your ``setup.py``: - - .. hidden in a comment so as to be included in refguide but not rendered documentation - >>> import numpy.distutils.misc_util - >>> config = np.distutils.misc_util.Configuration(None, '', '.') - >>> with open('foo.c', 'w') as f: pass - - >>> from numpy.distutils.misc_util import get_info - >>> info = get_info('npymath') - >>> _ = config.add_extension('foo', sources=['foo.c'], extra_info=info) - -In other words, the usage of ``info`` is exactly the same as when using -``blas_info`` and co. - When you are building with `Meson `__, use:: # Note that this will get easier in the future, when Meson has diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst deleted file mode 100644 index 714c8836322e..000000000000 --- a/doc/source/reference/distutils.rst +++ /dev/null @@ -1,219 +0,0 @@ -.. _numpy-distutils-refguide: - -********* -Packaging -********* - -.. module:: numpy.distutils - -.. warning:: - - ``numpy.distutils`` is deprecated, and will be removed for - Python >= 3.12. For more details, see :ref:`distutils-status-migration` - -.. warning:: - - Note that ``setuptools`` does major releases often and those may contain - changes that break :mod:`numpy.distutils`, which will *not* be updated anymore - for new ``setuptools`` versions. It is therefore recommended to set an - upper version bound in your build configuration for the last known version - of ``setuptools`` that works with your build. - -NumPy provides enhanced distutils functionality to make it easier to -build and install sub-packages, auto-generate code, and extension -modules that use Fortran-compiled libraries. A useful :class:`Configuration -` class is also provided in -:mod:`numpy.distutils.misc_util` that can make it easier to construct -keyword arguments to pass to the setup function (by passing the -dictionary obtained from the todict() method of the class). More -information is available in the :ref:`distutils-user-guide`. - -The choice and location of linked libraries such as BLAS and LAPACK as well as -include paths and other such build options can be specified in a ``site.cfg`` -file located in the NumPy root repository or a ``.numpy-site.cfg`` file in your -home directory. See the ``site.cfg.example`` example file included in the NumPy -repository or sdist for documentation. - -.. index:: - single: distutils - - -Modules in :mod:`numpy.distutils` -================================= -.. toctree:: - :maxdepth: 2 - - distutils/misc_util - - -.. currentmodule:: numpy.distutils - -.. autosummary:: - :toctree: generated/ - - ccompiler - ccompiler_opt - cpuinfo.cpu - core.Extension - exec_command - log.set_verbosity - system_info.get_info - system_info.get_standard_file - - -Configuration class -=================== - -.. currentmodule:: numpy.distutils.misc_util - -.. class:: Configuration(package_name=None, parent_name=None, top_path=None, package_path=None, **attrs) - - Construct a configuration instance for the given package name. If - *parent_name* is not None, then construct the package as a - sub-package of the *parent_name* package. If *top_path* and - *package_path* are None then they are assumed equal to - the path of the file this instance was created in. The setup.py - files in the numpy distribution are good examples of how to use - the :class:`Configuration` instance. - - .. automethod:: todict - - .. automethod:: get_distribution - - .. automethod:: get_subpackage - - .. automethod:: add_subpackage - - .. automethod:: add_data_files - - .. automethod:: add_data_dir - - .. automethod:: add_include_dirs - - .. automethod:: add_headers - - .. automethod:: add_extension - - .. automethod:: add_library - - .. automethod:: add_scripts - - .. automethod:: add_installed_library - - .. automethod:: add_npy_pkg_config - - .. automethod:: paths - - .. automethod:: get_config_cmd - - .. automethod:: get_build_temp_dir - - .. automethod:: have_f77c - - .. automethod:: have_f90c - - .. automethod:: get_version - - .. automethod:: make_svn_version_py - - .. automethod:: make_config_py - - .. automethod:: get_info - -Building installable C libraries -================================ - -Conventional C libraries (installed through `add_library`) are not installed, and -are just used during the build (they are statically linked). An installable C -library is a pure C library, which does not depend on the python C runtime, and -is installed such that it may be used by third-party packages. To build and -install the C library, you just use the method `add_installed_library` instead of -`add_library`, which takes the same arguments except for an additional -``install_dir`` argument:: - - .. hidden in a comment so as to be included in refguide but not rendered documentation - >>> import numpy.distutils.misc_util - >>> config = np.distutils.misc_util.Configuration(None, '', '.') - >>> with open('foo.c', 'w') as f: pass - - >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') - -npy-pkg-config files --------------------- - -To make the necessary build options available to third parties, you could use -the `npy-pkg-config` mechanism implemented in `numpy.distutils`. This mechanism is -based on a .ini file which contains all the options. A .ini file is very -similar to .pc files as used by the pkg-config unix utility:: - - [meta] - Name: foo - Version: 1.0 - Description: foo library - - [variables] - prefix = /home/user/local - libdir = ${prefix}/lib - includedir = ${prefix}/include - - [default] - cflags = -I${includedir} - libs = -L${libdir} -lfoo - -Generally, the file needs to be generated during the build, since it needs some -information known at build time only (e.g. prefix). This is mostly automatic if -one uses the `Configuration` method `add_npy_pkg_config`. Assuming we have a -template file foo.ini.in as follows:: - - [meta] - Name: foo - Version: @version@ - Description: foo library - - [variables] - prefix = @prefix@ - libdir = ${prefix}/lib - includedir = ${prefix}/include - - [default] - cflags = -I${includedir} - libs = -L${libdir} -lfoo - -and the following code in setup.py:: - - >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') - >>> subst = {'version': '1.0'} - >>> config.add_npy_pkg_config('foo.ini.in', 'lib', subst_dict=subst) - -This will install the file foo.ini into the directory package_dir/lib, and the -foo.ini file will be generated from foo.ini.in, where each ``@version@`` will be -replaced by ``subst_dict['version']``. The dictionary has an additional prefix -substitution rule automatically added, which contains the install prefix (since -this is not easy to get from setup.py). - -Reusing a C library from another package ----------------------------------------- - -Info are easily retrieved from the `get_info` function in -`numpy.distutils.misc_util`:: - - >>> info = np.distutils.misc_util.get_info('npymath') - >>> config.add_extension('foo', sources=['foo.c'], extra_info=info) - - - -An additional list of paths to look for .ini files can be given to `get_info`. - -Conversion of ``.src`` files -============================ - -NumPy distutils supports automatic conversion of source files named -.src. This facility can be used to maintain very similar -code blocks requiring only simple changes between blocks. During the -build phase of setup, if a template file named .src is -encountered, a new file named is constructed from the -template and placed in the build directory to be used instead. Two -forms of template conversion are supported. The first form occurs for -files named .ext.src where ext is a recognized Fortran -extension (f, f90, f95, f77, for, ftn, pyf). The second form is used -for all other cases. See :ref:`templating`. diff --git a/doc/source/reference/distutils/misc_util.rst b/doc/source/reference/distutils/misc_util.rst deleted file mode 100644 index bbb83a5ab061..000000000000 --- a/doc/source/reference/distutils/misc_util.rst +++ /dev/null @@ -1,7 +0,0 @@ -distutils.misc_util -=================== - -.. automodule:: numpy.distutils.misc_util - :members: - :undoc-members: - :exclude-members: Configuration diff --git a/doc/source/reference/distutils_guide.rst b/doc/source/reference/distutils_guide.rst deleted file mode 100644 index 0a815797ac30..000000000000 --- a/doc/source/reference/distutils_guide.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _distutils-user-guide: - -``numpy.distutils`` user guide -============================== - -.. warning:: - - ``numpy.distutils`` is deprecated, and will be removed for - Python >= 3.12. For more details, see :ref:`distutils-status-migration` - - -.. include:: ../../DISTUTILS.rst - :start-line: 6 diff --git a/doc/source/reference/distutils_status_migration.rst b/doc/source/reference/distutils_status_migration.rst index 366b0e67f06a..2fea390a7a8b 100644 --- a/doc/source/reference/distutils_status_migration.rst +++ b/doc/source/reference/distutils_status_migration.rst @@ -3,16 +3,7 @@ Status of ``numpy.distutils`` and migration advice ================================================== -`numpy.distutils` has been deprecated in NumPy ``1.23.0``. It will be removed -for Python 3.12; for Python <= 3.11 it will not be removed until 2 years after -the Python 3.12 release (Oct 2025). - - -.. warning:: - - ``numpy.distutils`` is only tested with ``setuptools < 60.0``, newer - versions may break. See :ref:`numpy-setuptools-interaction` for details. - +`numpy.distutils` has been removed in NumPy ``1.25.0``. Migration advice ---------------- @@ -47,7 +38,7 @@ migrating. For more details about the SciPy migration, see: - `RFC: switch to Meson as a build system `__ - `Tracking issue for Meson support `__ -NumPy will migrate to Meson for the 1.26 release. +NumPy migrated to Meson for the 1.26 release. Moving to CMake / scikit-build @@ -73,15 +64,12 @@ present in ``setuptools``: - Support for a few other scientific libraries, like FFTW and UMFPACK - Better MinGW support - Per-compiler build flag customization (e.g. `-O3` and `SSE2` flags are default) -- a simple user build config system, see `site.cfg.example `__ +- a simple user build config system, see `site.cfg.example `__ - SIMD intrinsics support - Support for the NumPy-specific ``.src`` templating format for ``.c``/``.h`` files -The most widely used feature is nested ``setup.py`` files. This feature may -perhaps still be ported to ``setuptools`` in the future (it needs a volunteer -though, see `gh-18588 `__ for -status). Projects only using that feature could move to ``setuptools`` after -that is done. In case a project uses only a couple of ``setup.py`` files, it +The most widely used feature is nested ``setup.py`` files. In case a project +uses only a couple of ``setup.py`` files, it also could make sense to simply aggregate all the content of those files into a single ``setup.py`` file and then move to ``setuptools``. This involves dropping all ``Configuration`` instances, and using ``Extension`` instead. @@ -103,22 +91,10 @@ For more details, see the .. _numpy-setuptools-interaction: -Interaction of ``numpy.distutils`` with ``setuptools`` +Versioning ``setuptools`` ------------------------------------------------------ -It is recommended to use ``setuptools < 60.0``. Newer versions may work, but -are not guaranteed to. The reason for this is that ``setuptools`` 60.0 enabled -a vendored copy of ``distutils``, including backwards incompatible changes that -affect some functionality in ``numpy.distutils``. - -If you are using only simple Cython or C extensions with minimal use of -``numpy.distutils`` functionality beyond nested ``setup.py`` files (its most -popular feature, see :class:`Configuration `), -then latest ``setuptools`` is likely to continue working. In case of problems, -you can also try ``SETUPTOOLS_USE_DISTUTILS=stdlib`` to avoid the backwards -incompatible changes in ``setuptools``. - -Whatever you do, it is recommended to put an upper bound on your ``setuptools`` +It is recommended to put an upper bound on your ``setuptools`` build requirement in ``pyproject.toml`` to avoid future breakage - see :ref:`for-downstream-package-authors`. diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index aa6c692d6b2b..2a7ac83a96ca 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -40,7 +40,6 @@ Python API :maxdepth: 1 typing - distutils C API ===== @@ -63,7 +62,6 @@ Other topics security testing distutils_status_migration - distutils_guide swig diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 98e3dda54e7b..5c6d8139b055 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -44,7 +44,6 @@ Prefer not to use these namespaces for new code. There are better alternatives and/or this code is deprecated or isn't reliable. - :ref:`numpy.char ` - legacy string functionality, only for fixed-width strings -- :ref:`numpy.distutils ` (deprecated) - build system support - :ref:`numpy.f2py ` - Fortran binding generation (usually used from the command line only) - :ref:`numpy.ma ` - masked arrays (not very reliable, needs an overhaul) - :ref:`numpy.matlib ` (pending deprecation) - functions supporting ``matrix`` instances @@ -70,7 +69,6 @@ and/or this code is deprecated or isn't reliable. numpy.rec numpy.version numpy.char - numpy.distutils numpy.f2py <../f2py/index> numpy.ma numpy.matlib diff --git a/doc/source/try_examples.json b/doc/source/try_examples.json index 305860dced55..823d4a5d1e82 100644 --- a/doc/source/try_examples.json +++ b/doc/source/try_examples.json @@ -1,7 +1,6 @@ { "global_min_height": "400px", "ignore_patterns": [ - "distutils.html*", "reference\/typing.html*", "numpy.__array_namespace_info__.html*" ] diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 5f620fa36cef..6909db8cb7e2 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -1348,7 +1348,7 @@ For example:: With a single iterable argument, return its biggest item. The default keyword-only argument specifies an object to return if the provided iterable is empty. - With two or more arguments, return the largest argument. + With two or more ...arguments, return the largest argument. diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst index c699760fdebd..20d3f1bb5937 100644 --- a/doc/source/user/c-info.python-as-glue.rst +++ b/doc/source/user/c-info.python-as-glue.rst @@ -178,13 +178,7 @@ write in a ``setup.py`` file: Adding the NumPy include directory is, of course, only necessary if you are using NumPy arrays in the extension module (which is what we -assume you are using Cython for). The distutils extensions in NumPy -also include support for automatically producing the extension-module -and linking it from a ``.pyx`` file. It works so that if the user does -not have Cython installed, then it looks for a file with the same -file-name but a ``.c`` extension which it then uses instead of trying -to produce the ``.c`` file again. - +assume you are using Cython for). If you just use Cython to compile a standard Python module, then you will get a C extension module that typically runs a bit faster than the equivalent Python module. Further speed increases can be gained by using diff --git a/meson_cpu/arm/meson.build b/meson_cpu/arm/meson.build index 5478e52cdcea..92d241883795 100644 --- a/meson_cpu/arm/meson.build +++ b/meson_cpu/arm/meson.build @@ -2,21 +2,21 @@ source_root = meson.project_source_root() mod_features = import('features') NEON = mod_features.new( 'NEON', 1, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon.c')[0] ) NEON_FP16 = mod_features.new( 'NEON_FP16', 2, implies: NEON, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon_fp16.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon_fp16.c')[0] ) # FMA NEON_VFPV4 = mod_features.new( 'NEON_VFPV4', 3, implies: NEON_FP16, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon_vfpv4.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c')[0] ) # Advanced SIMD ASIMD = mod_features.new( 'ASIMD', 4, implies: NEON_VFPV4, detect: {'val': 'ASIMD', 'match': 'NEON.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimd.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimd.c')[0] ) cpu_family = host_machine.cpu_family() if cpu_family == 'aarch64' @@ -37,25 +37,25 @@ endif ASIMDHP = mod_features.new( 'ASIMDHP', 5, implies: ASIMD, args: {'val': '-march=armv8.2-a+fp16', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimdhp.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimdhp.c')[0] ) ## ARMv8.2 dot product ASIMDDP = mod_features.new( 'ASIMDDP', 6, implies: ASIMD, args: {'val': '-march=armv8.2-a+dotprod', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimddp.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimddp.c')[0] ) ## ARMv8.2 Single & half-precision Multiply ASIMDFHM = mod_features.new( 'ASIMDFHM', 7, implies: ASIMDHP, args: {'val': '-march=armv8.2-a+fp16fml', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimdfhm.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimdfhm.c')[0] ) ## Scalable Vector Extensions (SVE) SVE = mod_features.new( 'SVE', 8, implies: ASIMDHP, args: {'val': '-march=armv8.2-a+sve', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_sve.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_sve.c')[0] ) # TODO: Add support for MSVC ARM_FEATURES = { diff --git a/meson_cpu/loongarch64/meson.build b/meson_cpu/loongarch64/meson.build index 570e3bfcda01..d59b5682d646 100644 --- a/meson_cpu/loongarch64/meson.build +++ b/meson_cpu/loongarch64/meson.build @@ -3,6 +3,6 @@ mod_features = import('features') LSX = mod_features.new( 'LSX', 1, args: ['-mlsx'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_lsx.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_lsx.c')[0] ) LOONGARCH64_FEATURES = {'LSX': LSX} diff --git a/meson_cpu/ppc64/meson.build b/meson_cpu/ppc64/meson.build index 57fe47140429..8f2e8373c77c 100644 --- a/meson_cpu/ppc64/meson.build +++ b/meson_cpu/ppc64/meson.build @@ -4,9 +4,9 @@ compiler_id = meson.get_compiler('c').get_id() VSX = mod_features.new( 'VSX', 1, args: '-mvsx', - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx.c')[0], extra_tests: { - 'VSX_ASM': files(source_root + '/numpy/distutils/checks/extra_vsx_asm.c')[0] + 'VSX_ASM': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx_asm.c')[0] } ) if compiler_id == 'clang' @@ -15,7 +15,7 @@ endif VSX2 = mod_features.new( 'VSX2', 2, implies: VSX, args: {'val': '-mcpu=power8', 'match': '.*vsx'}, detect: {'val': 'VSX2', 'match': 'VSX'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx2.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx2.c')[0], ) # VSX2 is hardware baseline feature on ppc64le since the first little-endian # support was part of Power8 @@ -25,17 +25,17 @@ endif VSX3 = mod_features.new( 'VSX3', 3, implies: VSX2, args: {'val': '-mcpu=power9', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX3', 'match': 'VSX.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx3.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx3.c')[0], extra_tests: { - 'VSX3_HALF_DOUBLE': files(source_root + '/numpy/distutils/checks/extra_vsx3_half_double.c')[0] + 'VSX3_HALF_DOUBLE': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx3_half_double.c')[0] } ) VSX4 = mod_features.new( 'VSX4', 4, implies: VSX3, args: {'val': '-mcpu=power10', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX4', 'match': 'VSX.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx4.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx4.c')[0], extra_tests: { - 'VSX4_MMA': files(source_root + '/numpy/distutils/checks/extra_vsx4_mma.c')[0] + 'VSX4_MMA': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx4_mma.c')[0] } ) PPC64_FEATURES = {'VSX': VSX, 'VSX2': VSX2, 'VSX3': VSX3, 'VSX4': VSX4} diff --git a/meson_cpu/riscv64/meson.build b/meson_cpu/riscv64/meson.build index 3f930f39e27e..fdab67d246d6 100644 --- a/meson_cpu/riscv64/meson.build +++ b/meson_cpu/riscv64/meson.build @@ -3,6 +3,6 @@ mod_features = import('features') RVV = mod_features.new( 'RVV', 1, args: ['-march=rv64gcv'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_rvv.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_rvv.c')[0], ) RV64_FEATURES = {'RVV': RVV} diff --git a/meson_cpu/s390x/meson.build b/meson_cpu/s390x/meson.build index b7a420c27f0d..282ec056e78e 100644 --- a/meson_cpu/s390x/meson.build +++ b/meson_cpu/s390x/meson.build @@ -3,16 +3,16 @@ mod_features = import('features') VX = mod_features.new( 'VX', 1, args: ['-mzvector', '-march=arch11'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_vx.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vx.c')[0], ) VXE = mod_features.new( 'VXE', 2, implies: VX, args: {'val': '-march=arch12', 'match': '-march=.*'}, detect: {'val': 'VXE', 'match': '\\bvxe\\b'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vxe.c')[0], ) VXE2 = mod_features.new( 'VXE2', 3, implies: VXE, args: {'val': '-march=arch13', 'match': '-march=.*'}, detect: {'val': 'VXE2', 'match': '\\bvxe2\\b'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe2.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vxe2.c')[0], ) S390X_FEATURES = {'VX': VX, 'VXE': VXE, 'VXE2': VXE2} diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index add073376d98..412803e5ddbb 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -43,14 +43,14 @@ AVX512_ICL = mod_features.new( group: ['AVX512VBMI', 'AVX512VBMI2', 'AVX512VNNI', 'AVX512BITALG', 'AVX512VPOPCNTDQ', 'AVX512IFMA', 'VAES', 'GFNI', 'VPCLMULQDQ'], detect: 'AVX512_ICL', - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_icl.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_avx512_icl.c')[0] ) AVX512_SPR = mod_features.new( 'AVX512_SPR', 35, implies: AVX512_ICL, args: ['-mavx512fp16', '-mavx512bf16'], group: ['AVX512FP16', 'AVX512BF16'], detect: 'AVX512_SPR', - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_spr.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_avx512_spr.c')[0] ) # Specializations for non unix-like compilers diff --git a/numpy/__init__.py b/numpy/__init__.py index ef7c1ed7678a..42a2e8896d68 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -52,9 +52,6 @@ Polynomial tools testing NumPy testing tools -distutils - Enhancements to distutils with support for - Fortran compilers support and more (for Python <= 3.11) Utilities --------- @@ -624,8 +621,8 @@ from .matrixlib import asmatrix, bmat, matrix # public submodules are imported lazily, therefore are accessible from - # __getattr__. Note that `distutils` (deprecated) and `array_api` - # (experimental label) are not added here, because `from numpy import *` + # __getattr__. Note that `array_api` + # (experimental label) is not added here, because `from numpy import *` # must not raise any warnings - that's too disruptive. __numpy_submodules__ = { "linalg", "fft", "dtypes", "random", "polynomial", "ma", @@ -757,12 +754,8 @@ def __getattr__(attr): import numpy.strings as strings return strings elif attr == "distutils": - if 'distutils' in __numpy_submodules__: - import numpy.distutils as distutils - return distutils - else: - raise AttributeError("`numpy.distutils` is not available from " - "Python 3.12 onwards", name=None) + raise AttributeError("`numpy.distutils` is not available from " + "numpy 2.5 onwards", name=None) if attr in __future_scalars__: # And future warnings for those that will change, but also give @@ -797,7 +790,7 @@ def __dir__(): ) public_symbols -= { "matrixlib", "matlib", "tests", "conftest", "version", - "distutils", "array_api" + "array_api" } return list(public_symbols) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 975a04857db7..3c459952d6b4 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -217,8 +217,6 @@ from numpy import ( matrixlib as matrixlib, version as version, ) -if sys.version_info < (3, 12): - from numpy import distutils as distutils from numpy._core.records import ( record, diff --git a/numpy/distutils/conv_template.py b/numpy/_build_utils/conv_template.py similarity index 100% rename from numpy/distutils/conv_template.py rename to numpy/_build_utils/conv_template.py diff --git a/numpy/_build_utils/process_src_template.py b/numpy/_build_utils/process_src_template.py index 8bd1ea872a42..f934c222e838 100644 --- a/numpy/_build_utils/process_src_template.py +++ b/numpy/_build_utils/process_src_template.py @@ -5,11 +5,11 @@ def get_processor(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import from numpy # (numpy is not yet built) conv_template_path = os.path.join( os.path.dirname(__file__), - '..', 'distutils', 'conv_template.py' + 'conv_template.py' ) spec = importlib.util.spec_from_file_location( 'conv_template', conv_template_path diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index e97177e46153..1087d176816b 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -17,11 +17,11 @@ def get_processor(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import from numpy # (numpy is not yet built) conv_template_path = os.path.join( os.path.dirname(__file__), - '..', '..', 'distutils', 'conv_template.py' + '..', '..', '_build_utils', 'conv_template.py' ) spec = importlib.util.spec_from_file_location( 'conv_template', conv_template_path diff --git a/numpy/_core/code_generators/numpy_api.py b/numpy/_core/code_generators/numpy_api.py index ac108aa20370..c2b471c71757 100644 --- a/numpy/_core/code_generators/numpy_api.py +++ b/numpy/_core/code_generators/numpy_api.py @@ -19,7 +19,7 @@ def get_annotations(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import numpy # (numpy is not yet built) genapi_py = os.path.join(os.path.dirname(__file__), 'genapi.py') spec = importlib.util.spec_from_file_location('conv_template', genapi_py) diff --git a/numpy/distutils/checks/cpu_asimd.c b/numpy/_core/src/_simd/checks/cpu_asimd.c similarity index 100% rename from numpy/distutils/checks/cpu_asimd.c rename to numpy/_core/src/_simd/checks/cpu_asimd.c diff --git a/numpy/distutils/checks/cpu_asimddp.c b/numpy/_core/src/_simd/checks/cpu_asimddp.c similarity index 100% rename from numpy/distutils/checks/cpu_asimddp.c rename to numpy/_core/src/_simd/checks/cpu_asimddp.c diff --git a/numpy/distutils/checks/cpu_asimdfhm.c b/numpy/_core/src/_simd/checks/cpu_asimdfhm.c similarity index 100% rename from numpy/distutils/checks/cpu_asimdfhm.c rename to numpy/_core/src/_simd/checks/cpu_asimdfhm.c diff --git a/numpy/distutils/checks/cpu_asimdhp.c b/numpy/_core/src/_simd/checks/cpu_asimdhp.c similarity index 100% rename from numpy/distutils/checks/cpu_asimdhp.c rename to numpy/_core/src/_simd/checks/cpu_asimdhp.c diff --git a/numpy/distutils/checks/cpu_avx.c b/numpy/_core/src/_simd/checks/cpu_avx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx.c rename to numpy/_core/src/_simd/checks/cpu_avx.c diff --git a/numpy/distutils/checks/cpu_avx2.c b/numpy/_core/src/_simd/checks/cpu_avx2.c similarity index 100% rename from numpy/distutils/checks/cpu_avx2.c rename to numpy/_core/src/_simd/checks/cpu_avx2.c diff --git a/numpy/distutils/checks/cpu_avx512_clx.c b/numpy/_core/src/_simd/checks/cpu_avx512_clx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_clx.c rename to numpy/_core/src/_simd/checks/cpu_avx512_clx.c diff --git a/numpy/distutils/checks/cpu_avx512_cnl.c b/numpy/_core/src/_simd/checks/cpu_avx512_cnl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_cnl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_cnl.c diff --git a/numpy/distutils/checks/cpu_avx512_icl.c b/numpy/_core/src/_simd/checks/cpu_avx512_icl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_icl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_icl.c diff --git a/numpy/distutils/checks/cpu_avx512_knl.c b/numpy/_core/src/_simd/checks/cpu_avx512_knl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_knl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_knl.c diff --git a/numpy/distutils/checks/cpu_avx512_knm.c b/numpy/_core/src/_simd/checks/cpu_avx512_knm.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_knm.c rename to numpy/_core/src/_simd/checks/cpu_avx512_knm.c diff --git a/numpy/distutils/checks/cpu_avx512_skx.c b/numpy/_core/src/_simd/checks/cpu_avx512_skx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_skx.c rename to numpy/_core/src/_simd/checks/cpu_avx512_skx.c diff --git a/numpy/distutils/checks/cpu_avx512_spr.c b/numpy/_core/src/_simd/checks/cpu_avx512_spr.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_spr.c rename to numpy/_core/src/_simd/checks/cpu_avx512_spr.c diff --git a/numpy/distutils/checks/cpu_avx512cd.c b/numpy/_core/src/_simd/checks/cpu_avx512cd.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512cd.c rename to numpy/_core/src/_simd/checks/cpu_avx512cd.c diff --git a/numpy/distutils/checks/cpu_avx512f.c b/numpy/_core/src/_simd/checks/cpu_avx512f.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512f.c rename to numpy/_core/src/_simd/checks/cpu_avx512f.c diff --git a/numpy/distutils/checks/cpu_f16c.c b/numpy/_core/src/_simd/checks/cpu_f16c.c similarity index 100% rename from numpy/distutils/checks/cpu_f16c.c rename to numpy/_core/src/_simd/checks/cpu_f16c.c diff --git a/numpy/distutils/checks/cpu_fma3.c b/numpy/_core/src/_simd/checks/cpu_fma3.c similarity index 100% rename from numpy/distutils/checks/cpu_fma3.c rename to numpy/_core/src/_simd/checks/cpu_fma3.c diff --git a/numpy/distutils/checks/cpu_fma4.c b/numpy/_core/src/_simd/checks/cpu_fma4.c similarity index 100% rename from numpy/distutils/checks/cpu_fma4.c rename to numpy/_core/src/_simd/checks/cpu_fma4.c diff --git a/numpy/distutils/checks/cpu_lsx.c b/numpy/_core/src/_simd/checks/cpu_lsx.c similarity index 100% rename from numpy/distutils/checks/cpu_lsx.c rename to numpy/_core/src/_simd/checks/cpu_lsx.c diff --git a/numpy/distutils/checks/cpu_neon.c b/numpy/_core/src/_simd/checks/cpu_neon.c similarity index 100% rename from numpy/distutils/checks/cpu_neon.c rename to numpy/_core/src/_simd/checks/cpu_neon.c diff --git a/numpy/distutils/checks/cpu_neon_fp16.c b/numpy/_core/src/_simd/checks/cpu_neon_fp16.c similarity index 100% rename from numpy/distutils/checks/cpu_neon_fp16.c rename to numpy/_core/src/_simd/checks/cpu_neon_fp16.c diff --git a/numpy/distutils/checks/cpu_neon_vfpv4.c b/numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c similarity index 100% rename from numpy/distutils/checks/cpu_neon_vfpv4.c rename to numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c diff --git a/numpy/distutils/checks/cpu_popcnt.c b/numpy/_core/src/_simd/checks/cpu_popcnt.c similarity index 100% rename from numpy/distutils/checks/cpu_popcnt.c rename to numpy/_core/src/_simd/checks/cpu_popcnt.c diff --git a/numpy/distutils/checks/cpu_rvv.c b/numpy/_core/src/_simd/checks/cpu_rvv.c similarity index 100% rename from numpy/distutils/checks/cpu_rvv.c rename to numpy/_core/src/_simd/checks/cpu_rvv.c diff --git a/numpy/distutils/checks/cpu_sse.c b/numpy/_core/src/_simd/checks/cpu_sse.c similarity index 100% rename from numpy/distutils/checks/cpu_sse.c rename to numpy/_core/src/_simd/checks/cpu_sse.c diff --git a/numpy/distutils/checks/cpu_sse2.c b/numpy/_core/src/_simd/checks/cpu_sse2.c similarity index 100% rename from numpy/distutils/checks/cpu_sse2.c rename to numpy/_core/src/_simd/checks/cpu_sse2.c diff --git a/numpy/distutils/checks/cpu_sse3.c b/numpy/_core/src/_simd/checks/cpu_sse3.c similarity index 100% rename from numpy/distutils/checks/cpu_sse3.c rename to numpy/_core/src/_simd/checks/cpu_sse3.c diff --git a/numpy/distutils/checks/cpu_sse41.c b/numpy/_core/src/_simd/checks/cpu_sse41.c similarity index 100% rename from numpy/distutils/checks/cpu_sse41.c rename to numpy/_core/src/_simd/checks/cpu_sse41.c diff --git a/numpy/distutils/checks/cpu_sse42.c b/numpy/_core/src/_simd/checks/cpu_sse42.c similarity index 100% rename from numpy/distutils/checks/cpu_sse42.c rename to numpy/_core/src/_simd/checks/cpu_sse42.c diff --git a/numpy/distutils/checks/cpu_ssse3.c b/numpy/_core/src/_simd/checks/cpu_ssse3.c similarity index 100% rename from numpy/distutils/checks/cpu_ssse3.c rename to numpy/_core/src/_simd/checks/cpu_ssse3.c diff --git a/numpy/distutils/checks/cpu_sve.c b/numpy/_core/src/_simd/checks/cpu_sve.c similarity index 100% rename from numpy/distutils/checks/cpu_sve.c rename to numpy/_core/src/_simd/checks/cpu_sve.c diff --git a/numpy/distutils/checks/cpu_vsx.c b/numpy/_core/src/_simd/checks/cpu_vsx.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx.c rename to numpy/_core/src/_simd/checks/cpu_vsx.c diff --git a/numpy/distutils/checks/cpu_vsx2.c b/numpy/_core/src/_simd/checks/cpu_vsx2.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx2.c rename to numpy/_core/src/_simd/checks/cpu_vsx2.c diff --git a/numpy/distutils/checks/cpu_vsx3.c b/numpy/_core/src/_simd/checks/cpu_vsx3.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx3.c rename to numpy/_core/src/_simd/checks/cpu_vsx3.c diff --git a/numpy/distutils/checks/cpu_vsx4.c b/numpy/_core/src/_simd/checks/cpu_vsx4.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx4.c rename to numpy/_core/src/_simd/checks/cpu_vsx4.c diff --git a/numpy/distutils/checks/cpu_vx.c b/numpy/_core/src/_simd/checks/cpu_vx.c similarity index 100% rename from numpy/distutils/checks/cpu_vx.c rename to numpy/_core/src/_simd/checks/cpu_vx.c diff --git a/numpy/distutils/checks/cpu_vxe.c b/numpy/_core/src/_simd/checks/cpu_vxe.c similarity index 100% rename from numpy/distutils/checks/cpu_vxe.c rename to numpy/_core/src/_simd/checks/cpu_vxe.c diff --git a/numpy/distutils/checks/cpu_vxe2.c b/numpy/_core/src/_simd/checks/cpu_vxe2.c similarity index 100% rename from numpy/distutils/checks/cpu_vxe2.c rename to numpy/_core/src/_simd/checks/cpu_vxe2.c diff --git a/numpy/distutils/checks/cpu_xop.c b/numpy/_core/src/_simd/checks/cpu_xop.c similarity index 100% rename from numpy/distutils/checks/cpu_xop.c rename to numpy/_core/src/_simd/checks/cpu_xop.c diff --git a/numpy/distutils/checks/extra_avx512bw_mask.c b/numpy/_core/src/_simd/checks/extra_avx512bw_mask.c similarity index 100% rename from numpy/distutils/checks/extra_avx512bw_mask.c rename to numpy/_core/src/_simd/checks/extra_avx512bw_mask.c diff --git a/numpy/distutils/checks/extra_avx512dq_mask.c b/numpy/_core/src/_simd/checks/extra_avx512dq_mask.c similarity index 100% rename from numpy/distutils/checks/extra_avx512dq_mask.c rename to numpy/_core/src/_simd/checks/extra_avx512dq_mask.c diff --git a/numpy/distutils/checks/extra_avx512f_reduce.c b/numpy/_core/src/_simd/checks/extra_avx512f_reduce.c similarity index 100% rename from numpy/distutils/checks/extra_avx512f_reduce.c rename to numpy/_core/src/_simd/checks/extra_avx512f_reduce.c diff --git a/numpy/distutils/checks/extra_vsx3_half_double.c b/numpy/_core/src/_simd/checks/extra_vsx3_half_double.c similarity index 100% rename from numpy/distutils/checks/extra_vsx3_half_double.c rename to numpy/_core/src/_simd/checks/extra_vsx3_half_double.c diff --git a/numpy/distutils/checks/extra_vsx4_mma.c b/numpy/_core/src/_simd/checks/extra_vsx4_mma.c similarity index 100% rename from numpy/distutils/checks/extra_vsx4_mma.c rename to numpy/_core/src/_simd/checks/extra_vsx4_mma.c diff --git a/numpy/distutils/checks/extra_vsx_asm.c b/numpy/_core/src/_simd/checks/extra_vsx_asm.c similarity index 100% rename from numpy/distutils/checks/extra_vsx_asm.c rename to numpy/_core/src/_simd/checks/extra_vsx_asm.c diff --git a/numpy/distutils/checks/test_flags.c b/numpy/_core/src/_simd/checks/test_flags.c similarity index 100% rename from numpy/distutils/checks/test_flags.c rename to numpy/_core/src/_simd/checks/test_flags.c diff --git a/numpy/_pyinstaller/hook-numpy.py b/numpy/_pyinstaller/hook-numpy.py index 61c224b33810..b6f15e6edde8 100644 --- a/numpy/_pyinstaller/hook-numpy.py +++ b/numpy/_pyinstaller/hook-numpy.py @@ -32,5 +32,4 @@ "f2py", "setuptools", "distutils", - "numpy.distutils", ] diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index 77342e44aea0..ecc6a1cda738 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -136,14 +136,6 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, # offset verbosity. The "-q" cancels a "-v". pytest_args += ["-q"] - if sys.version_info < (3, 12): - with warnings.catch_warnings(): - warnings.simplefilter("always") - # Filter out distutils cpu warnings (could be localized to - # distutils tests). ASV has problems with top level import, - # so fetch module for suppression here. - from numpy.distutils import cpuinfo # noqa: F401 - # Filter out annoying import messages. Want these in both develop and # release mode. pytest_args += [ diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py deleted file mode 100644 index f74ed4d3f6db..000000000000 --- a/numpy/distutils/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -An enhanced distutils, providing support for Fortran compilers, for BLAS, -LAPACK and other common libraries for numerical computing, and more. - -Public submodules are:: - - misc_util - system_info - cpu_info - log - exec_command - -For details, please see the *Packaging* and *NumPy Distutils User Guide* -sections of the NumPy Reference Guide. - -For configuring the preference for and location of libraries like BLAS and -LAPACK, and for setting include paths and similar build options, please see -``site.cfg.example`` in the root of the NumPy repository or sdist. - -""" - -import warnings - -# Must import local ccompiler ASAP in order to get -# customized CCompiler.spawn effective. -from . import ccompiler -from . import unixccompiler - -from .npy_pkg_config import * - -warnings.warn("\n\n" - " `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n" - " of the deprecation of `distutils` itself. It will be removed for\n" - " Python >= 3.12. For older Python versions it will remain present.\n" - " It is recommended to use `setuptools < 60.0` for those Python versions.\n" - " For more details, see:\n" - " https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n", - DeprecationWarning, stacklevel=2 -) -del warnings - -# If numpy is installed, add distutils.test() -try: - from . import __config__ - # Normally numpy is installed if the above import works, but an interrupted - # in-place build could also have left a __config__.py. In that case the - # next import may still fail, so keep it inside the try block. - from numpy._pytesttester import PytestTester - test = PytestTester(__name__) - del PytestTester -except ImportError: - pass - - -def customized_fcompiler(plat=None, compiler=None): - from numpy.distutils.fcompiler import new_fcompiler - c = new_fcompiler(plat=plat, compiler=compiler) - c.customize() - return c - -def customized_ccompiler(plat=None, compiler=None, verbose=1): - c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose) - c.customize('') - return c diff --git a/numpy/distutils/__init__.pyi b/numpy/distutils/__init__.pyi deleted file mode 100644 index 3938d68de14c..000000000000 --- a/numpy/distutils/__init__.pyi +++ /dev/null @@ -1,4 +0,0 @@ -from typing import Any - -# TODO: remove when the full numpy namespace is defined -def __getattr__(name: str) -> Any: ... diff --git a/numpy/distutils/_shell_utils.py b/numpy/distutils/_shell_utils.py deleted file mode 100644 index 9a1c8ce718c9..000000000000 --- a/numpy/distutils/_shell_utils.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Helper functions for interacting with the shell, and consuming shell-style -parameters provided in config files. -""" -import os -import shlex -import subprocess - -__all__ = ['WindowsParser', 'PosixParser', 'NativeParser'] - - -class CommandLineParser: - """ - An object that knows how to split and join command-line arguments. - - It must be true that ``argv == split(join(argv))`` for all ``argv``. - The reverse neednt be true - `join(split(cmd))` may result in the addition - or removal of unnecessary escaping. - """ - @staticmethod - def join(argv): - """ Join a list of arguments into a command line string """ - raise NotImplementedError - - @staticmethod - def split(cmd): - """ Split a command line string into a list of arguments """ - raise NotImplementedError - - -class WindowsParser: - """ - The parsing behavior used by `subprocess.call("string")` on Windows, which - matches the Microsoft C/C++ runtime. - - Note that this is _not_ the behavior of cmd. - """ - @staticmethod - def join(argv): - # note that list2cmdline is specific to the windows syntax - return subprocess.list2cmdline(argv) - - @staticmethod - def split(cmd): - import ctypes # guarded import for systems without ctypes - try: - ctypes.windll - except AttributeError: - raise NotImplementedError - - # Windows has special parsing rules for the executable (no quotes), - # that we do not care about - insert a dummy element - if not cmd: - return [] - cmd = 'dummy ' + cmd - - CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW - CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p) - CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int)) - - nargs = ctypes.c_int() - lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs)) - args = [lpargs[i] for i in range(nargs.value)] - assert not ctypes.windll.kernel32.LocalFree(lpargs) - - # strip the element we inserted - assert args[0] == "dummy" - return args[1:] - - -class PosixParser: - """ - The parsing behavior used by `subprocess.call("string", shell=True)` on Posix. - """ - @staticmethod - def join(argv): - return ' '.join(shlex.quote(arg) for arg in argv) - - @staticmethod - def split(cmd): - return shlex.split(cmd, posix=True) - - -if os.name == 'nt': - NativeParser = WindowsParser -elif os.name == 'posix': - NativeParser = PosixParser diff --git a/numpy/distutils/armccompiler.py b/numpy/distutils/armccompiler.py deleted file mode 100644 index afba7eb3b352..000000000000 --- a/numpy/distutils/armccompiler.py +++ /dev/null @@ -1,26 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class ArmCCompiler(UnixCCompiler): - - """ - Arm compiler. - """ - - compiler_type = 'arm' - cc_exe = 'armclang' - cxx_exe = 'armclang++' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler + - ' -O3 -fPIC', - compiler_so=cc_compiler + - ' -O3 -fPIC', - compiler_cxx=cxx_compiler + - ' -O3 -fPIC', - linker_exe=cc_compiler + - ' -lamath', - linker_so=cc_compiler + - ' -lamath -shared') diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py deleted file mode 100644 index dee13b1c9e84..000000000000 --- a/numpy/distutils/ccompiler.py +++ /dev/null @@ -1,826 +0,0 @@ -import os -import re -import sys -import platform -import shlex -import time -import subprocess -from copy import copy -from pathlib import Path -from distutils import ccompiler -from distutils.ccompiler import ( - compiler_class, gen_lib_options, get_default_compiler, new_compiler, - CCompiler -) -from distutils.errors import ( - DistutilsExecError, DistutilsModuleError, DistutilsPlatformError, - CompileError, UnknownFileError -) -from distutils.sysconfig import customize_compiler -from distutils.version import LooseVersion - -from numpy.distutils import log -from numpy.distutils.exec_command import ( - filepath_from_subprocess_output, forward_bytes_to_stdout -) -from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ - get_num_build_jobs, \ - _commandline_dep_string, \ - sanitize_cxx_flags - -# globals for parallel build management -import threading - -_job_semaphore = None -_global_lock = threading.Lock() -_processing_files = set() - - -def _needs_build(obj, cc_args, extra_postargs, pp_opts): - """ - Check if an objects needs to be rebuild based on its dependencies - - Parameters - ---------- - obj : str - object file - - Returns - ------- - bool - """ - # defined in unixcompiler.py - dep_file = obj + '.d' - if not os.path.exists(dep_file): - return True - - # dep_file is a makefile containing 'object: dependencies' - # formatted like posix shell (spaces escaped, \ line continuations) - # the last line contains the compiler commandline arguments as some - # projects may compile an extension multiple times with different - # arguments - with open(dep_file) as f: - lines = f.readlines() - - cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts) - last_cmdline = lines[-1] - if last_cmdline != cmdline: - return True - - contents = ''.join(lines[:-1]) - deps = [x for x in shlex.split(contents, posix=True) - if x != "\n" and not x.endswith(":")] - - try: - t_obj = os.stat(obj).st_mtime - - # check if any of the dependencies is newer than the object - # the dependencies includes the source used to create the object - for f in deps: - if os.stat(f).st_mtime > t_obj: - return True - except OSError: - # no object counts as newer (shouldn't happen if dep_file exists) - return True - - return False - - -def replace_method(klass, method_name, func): - # Py3k does not have unbound method anymore, MethodType does not work - m = lambda self, *args, **kw: func(self, *args, **kw) - setattr(klass, method_name, m) - - -###################################################################### -## Method that subclasses may redefine. But don't call this method, -## it i private to CCompiler class and may return unexpected -## results if used elsewhere. So, you have been warned.. - -def CCompiler_find_executables(self): - """ - Does nothing here, but is called by the get_version method and can be - overridden by subclasses. In particular it is redefined in the `FCompiler` - class where more documentation can be found. - - """ - pass - - -replace_method(CCompiler, 'find_executables', CCompiler_find_executables) - - -# Using customized CCompiler.spawn. -def CCompiler_spawn(self, cmd, display=None, env=None): - """ - Execute a command in a sub-process. - - Parameters - ---------- - cmd : str - The command to execute. - display : str or sequence of str, optional - The text to add to the log file kept by `numpy.distutils`. - If not given, `display` is equal to `cmd`. - env : a dictionary for environment variables, optional - - Returns - ------- - None - - Raises - ------ - DistutilsExecError - If the command failed, i.e. the exit status was not 0. - - """ - env = env if env is not None else dict(os.environ) - if display is None: - display = cmd - if is_sequence(display): - display = ' '.join(list(display)) - log.info(display) - try: - if self.verbose: - subprocess.check_output(cmd, env=env) - else: - subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) - except subprocess.CalledProcessError as exc: - o = exc.output - s = exc.returncode - except OSError as e: - # OSError doesn't have the same hooks for the exception - # output, but exec_command() historically would use an - # empty string for EnvironmentError (base class for - # OSError) - # o = b'' - # still that would make the end-user lost in translation! - o = f"\n\n{e}\n\n\n" - try: - o = o.encode(sys.stdout.encoding) - except AttributeError: - o = o.encode('utf8') - # status previously used by exec_command() for parent - # of OSError - s = 127 - else: - # use a convenience return here so that any kind of - # caught exception will execute the default code after the - # try / except block, which handles various exceptions - return None - - if is_sequence(cmd): - cmd = ' '.join(list(cmd)) - - if self.verbose: - forward_bytes_to_stdout(o) - - if re.search(b'Too many open files', o): - msg = '\nTry rerunning setup command until build succeeds.' - else: - msg = '' - raise DistutilsExecError('Command "%s" failed with exit status %d%s' % - (cmd, s, msg)) - -replace_method(CCompiler, 'spawn', CCompiler_spawn) - -def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - """ - Return the name of the object files for the given source files. - - Parameters - ---------- - source_filenames : list of str - The list of paths to source files. Paths can be either relative or - absolute, this is handled transparently. - strip_dir : bool, optional - Whether to strip the directory from the returned paths. If True, - the file name prepended by `output_dir` is returned. Default is False. - output_dir : str, optional - If given, this path is prepended to the returned paths to the - object files. - - Returns - ------- - obj_names : list of str - The list of paths to the object files corresponding to the source - files in `source_filenames`. - - """ - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - base, ext = os.path.splitext(os.path.normpath(src_name)) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if base.startswith('..'): - # Resolve starting relative path components, middle ones - # (if any) have been handled by os.path.normpath above. - i = base.rfind('..')+2 - d = base[:i] - d = os.path.basename(os.path.abspath(d)) - base = d + base[i:] - if ext not in self.src_extensions: - raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) - if strip_dir: - base = os.path.basename(base) - obj_name = os.path.join(output_dir, base + self.obj_extension) - obj_names.append(obj_name) - return obj_names - -replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) - -def CCompiler_compile(self, sources, output_dir=None, macros=None, - include_dirs=None, debug=0, extra_preargs=None, - extra_postargs=None, depends=None): - """ - Compile one or more source files. - - Please refer to the Python distutils API reference for more details. - - Parameters - ---------- - sources : list of str - A list of filenames - output_dir : str, optional - Path to the output directory. - macros : list of tuples - A list of macro definitions. - include_dirs : list of str, optional - The directories to add to the default include file search path for - this compilation only. - debug : bool, optional - Whether or not to output debug symbols in or alongside the object - file(s). - extra_preargs, extra_postargs : ? - Extra pre- and post-arguments. - depends : list of str, optional - A list of file names that all targets depend on. - - Returns - ------- - objects : list of str - A list of object file names, one per source file `sources`. - - Raises - ------ - CompileError - If compilation fails. - - """ - global _job_semaphore - - jobs = get_num_build_jobs() - - # setup semaphore to not exceed number of compile jobs when parallelized at - # extension level (python >= 3.5) - with _global_lock: - if _job_semaphore is None: - _job_semaphore = threading.Semaphore(jobs) - - if not sources: - return [] - from numpy.distutils.fcompiler import (FCompiler, - FORTRAN_COMMON_FIXED_EXTENSIONS, - has_f90_header) - if isinstance(self, FCompiler): - display = [] - for fc in ['f77', 'f90', 'fix']: - fcomp = getattr(self, 'compiler_'+fc) - if fcomp is None: - continue - display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) - display = '\n'.join(display) - else: - ccomp = self.compiler_so - display = "C compiler: %s\n" % (' '.join(ccomp),) - log.info(display) - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) - display = "compile options: '%s'" % (' '.join(cc_args)) - if extra_postargs: - display += "\nextra options: '%s'" % (' '.join(extra_postargs)) - log.info(display) - - def single_compile(args): - obj, (src, ext) = args - if not _needs_build(obj, cc_args, extra_postargs, pp_opts): - return - - # check if we are currently already processing the same object - # happens when using the same source in multiple extensions - while True: - # need explicit lock as there is no atomic check and add with GIL - with _global_lock: - # file not being worked on, start working - if obj not in _processing_files: - _processing_files.add(obj) - break - # wait for the processing to end - time.sleep(0.1) - - try: - # retrieve slot from our #job semaphore and build - with _job_semaphore: - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - finally: - # register being done processing - with _global_lock: - _processing_files.remove(obj) - - - if isinstance(self, FCompiler): - objects_to_build = list(build.keys()) - f77_objects, other_objects = [], [] - for obj in objects: - if obj in objects_to_build: - src, ext = build[obj] - if self.compiler_type=='absoft': - obj = cyg2win32(obj) - src = cyg2win32(src) - if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ - and not has_f90_header(src): - f77_objects.append((obj, (src, ext))) - else: - other_objects.append((obj, (src, ext))) - - # f77 objects can be built in parallel - build_items = f77_objects - # build f90 modules serial, module files are generated during - # compilation and may be used by files later in the list so the - # ordering is important - for o in other_objects: - single_compile(o) - else: - build_items = build.items() - - if len(build) > 1 and jobs > 1: - # build parallel - from concurrent.futures import ThreadPoolExecutor - with ThreadPoolExecutor(jobs) as pool: - res = pool.map(single_compile, build_items) - list(res) # access result to raise errors - else: - # build serial - for o in build_items: - single_compile(o) - - # Return *all* object filenames, not just the ones we just built. - return objects - -replace_method(CCompiler, 'compile', CCompiler_compile) - -def CCompiler_customize_cmd(self, cmd, ignore=()): - """ - Customize compiler using distutils command. - - Parameters - ---------- - cmd : class instance - An instance inheriting from ``distutils.cmd.Command``. - ignore : sequence of str, optional - List of ``distutils.ccompiler.CCompiler`` commands (without ``'set_'``) that should not be - altered. Strings that are checked for are: - ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', - 'rpath', 'link_objects')``. - - Returns - ------- - None - - """ - log.info('customize %s using %s' % (self.__class__.__name__, - cmd.__class__.__name__)) - - if ( - hasattr(self, 'compiler') and - 'clang' in self.compiler[0] and - not (platform.machine() == 'arm64' and sys.platform == 'darwin') - ): - # clang defaults to a non-strict floating error point model. - # However, '-ftrapping-math' is not currently supported (2023-04-08) - # for macosx_arm64. - # Since NumPy and most Python libs give warnings for these, override: - self.compiler.append('-ftrapping-math') - self.compiler_so.append('-ftrapping-math') - - def allow(attr): - return getattr(cmd, attr, None) is not None and attr not in ignore - - if allow('include_dirs'): - self.set_include_dirs(cmd.include_dirs) - if allow('define'): - for (name, value) in cmd.define: - self.define_macro(name, value) - if allow('undef'): - for macro in cmd.undef: - self.undefine_macro(macro) - if allow('libraries'): - self.set_libraries(self.libraries + cmd.libraries) - if allow('library_dirs'): - self.set_library_dirs(self.library_dirs + cmd.library_dirs) - if allow('rpath'): - self.set_runtime_library_dirs(cmd.rpath) - if allow('link_objects'): - self.set_link_objects(cmd.link_objects) - -replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) - -def _compiler_to_string(compiler): - props = [] - mx = 0 - keys = list(compiler.executables.keys()) - for key in ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch', - 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: - if key not in keys: - keys.append(key) - for key in keys: - if hasattr(compiler, key): - v = getattr(compiler, key) - mx = max(mx, len(key)) - props.append((key, repr(v))) - fmt = '%-' + repr(mx+1) + 's = %s' - lines = [fmt % prop for prop in props] - return '\n'.join(lines) - -def CCompiler_show_customization(self): - """ - Print the compiler customizations to stdout. - - Parameters - ---------- - None - - Returns - ------- - None - - Notes - ----- - Printing is only done if the distutils log threshold is < 2. - - """ - try: - self.get_version() - except Exception: - pass - if log._global_log.threshold<2: - print('*'*80) - print(self.__class__) - print(_compiler_to_string(self)) - print('*'*80) - -replace_method(CCompiler, 'show_customization', CCompiler_show_customization) - -def CCompiler_customize(self, dist, need_cxx=0): - """ - Do any platform-specific customization of a compiler instance. - - This method calls ``distutils.sysconfig.customize_compiler`` for - platform-specific customization, as well as optionally remove a flag - to suppress spurious warnings in case C++ code is being compiled. - - Parameters - ---------- - dist : object - This parameter is not used for anything. - need_cxx : bool, optional - Whether or not C++ has to be compiled. If so (True), the - ``"-Wstrict-prototypes"`` option is removed to prevent spurious - warnings. Default is False. - - Returns - ------- - None - - Notes - ----- - All the default options used by distutils can be extracted with:: - - from distutils import sysconfig - sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', - 'CCSHARED', 'LDSHARED', 'SO') - - """ - # See FCompiler.customize for suggested usage. - log.info('customize %s' % (self.__class__.__name__)) - customize_compiler(self) - if need_cxx: - # In general, distutils uses -Wstrict-prototypes, but this option is - # not valid for C++ code, only for C. Remove it if it's there to - # avoid a spurious warning on every compilation. - try: - self.compiler_so.remove('-Wstrict-prototypes') - except (AttributeError, ValueError): - pass - - if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: - if not self.compiler_cxx: - if self.compiler[0].startswith('gcc'): - a, b = 'gcc', 'g++' - else: - a, b = 'cc', 'c++' - self.compiler_cxx = [self.compiler[0].replace(a, b)]\ - + self.compiler[1:] - else: - if hasattr(self, 'compiler'): - log.warn("#### %s #######" % (self.compiler,)) - if not hasattr(self, 'compiler_cxx'): - log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) - - - # check if compiler supports gcc style automatic dependencies - # run on every extension so skip for known good compilers - if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or - 'g++' in self.compiler[0] or - 'clang' in self.compiler[0]): - self._auto_depends = True - elif os.name == 'posix': - import tempfile - import shutil - tmpdir = tempfile.mkdtemp() - try: - fn = os.path.join(tmpdir, "file.c") - with open(fn, "w") as f: - f.write("int a;\n") - self.compile([fn], output_dir=tmpdir, - extra_preargs=['-MMD', '-MF', fn + '.d']) - self._auto_depends = True - except CompileError: - self._auto_depends = False - finally: - shutil.rmtree(tmpdir) - - return - -replace_method(CCompiler, 'customize', CCompiler_customize) - -def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): - """ - Simple matching of version numbers, for use in CCompiler and FCompiler. - - Parameters - ---------- - pat : str, optional - A regular expression matching version numbers. - Default is ``r'[-.\\d]+'``. - ignore : str, optional - A regular expression matching patterns to skip. - Default is ``''``, in which case nothing is skipped. - start : str, optional - A regular expression matching the start of where to start looking - for version numbers. - Default is ``''``, in which case searching is started at the - beginning of the version string given to `matcher`. - - Returns - ------- - matcher : callable - A function that is appropriate to use as the ``.version_match`` - attribute of a ``distutils.ccompiler.CCompiler`` class. `matcher` takes a single parameter, - a version string. - - """ - def matcher(self, version_string): - # version string may appear in the second line, so getting rid - # of new lines: - version_string = version_string.replace('\n', ' ') - pos = 0 - if start: - m = re.match(start, version_string) - if not m: - return None - pos = m.end() - while True: - m = re.search(pat, version_string[pos:]) - if not m: - return None - if ignore and re.match(ignore, m.group(0)): - pos = m.end() - continue - break - return m.group(0) - return matcher - -def CCompiler_get_version(self, force=False, ok_status=[0]): - """ - Return compiler version, or None if compiler is not available. - - Parameters - ---------- - force : bool, optional - If True, force a new determination of the version, even if the - compiler already has a version attribute. Default is False. - ok_status : list of int, optional - The list of status values returned by the version look-up process - for which a version string is returned. If the status value is not - in `ok_status`, None is returned. Default is ``[0]``. - - Returns - ------- - version : str or None - Version string, in the format of ``distutils.version.LooseVersion``. - - """ - if not force and hasattr(self, 'version'): - return self.version - self.find_executables() - try: - version_cmd = self.version_cmd - except AttributeError: - return None - if not version_cmd or not version_cmd[0]: - return None - try: - matcher = self.version_match - except AttributeError: - try: - pat = self.version_pattern - except AttributeError: - return None - def matcher(version_string): - m = re.match(pat, version_string) - if not m: - return None - version = m.group('version') - return version - - try: - output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as exc: - output = exc.output - status = exc.returncode - except OSError: - # match the historical returns for a parent - # exception class caught by exec_command() - status = 127 - output = b'' - else: - # output isn't actually a filepath but we do this - # for now to match previous distutils behavior - output = filepath_from_subprocess_output(output) - status = 0 - - version = None - if status in ok_status: - version = matcher(output) - if version: - version = LooseVersion(version) - self.version = version - return version - -replace_method(CCompiler, 'get_version', CCompiler_get_version) - -def CCompiler_cxx_compiler(self): - """ - Return the C++ compiler. - - Parameters - ---------- - None - - Returns - ------- - cxx : class instance - The C++ compiler, as a ``distutils.ccompiler.CCompiler`` instance. - - """ - if self.compiler_type in ('msvc', 'intelw', 'intelemw'): - return self - - cxx = copy(self) - cxx.compiler_cxx = cxx.compiler_cxx - cxx.compiler_so = [cxx.compiler_cxx[0]] + \ - sanitize_cxx_flags(cxx.compiler_so[1:]) - if (sys.platform.startswith(('aix', 'os400')) and - 'ld_so_aix' in cxx.linker_so[0]): - # AIX needs the ld_so_aix script included with Python - cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ - + cxx.linker_so[2:] - if sys.platform.startswith('os400'): - #This is required by i 7.4 and prievous for PRId64 in printf() call. - cxx.compiler_so.append('-D__STDC_FORMAT_MACROS') - #This a bug of gcc10.3, which failed to handle the TLS init. - cxx.compiler_so.append('-fno-extern-tls-init') - cxx.linker_so.append('-fno-extern-tls-init') - else: - cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] - return cxx - -replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) - -compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', - "Intel C Compiler for 32-bit applications") -compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', - "Intel C Itanium Compiler for Itanium-based applications") -compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', - "Intel C Compiler for 64-bit applications") -compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW', - "Intel C Compiler for 32-bit applications on Windows") -compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW', - "Intel C Compiler for 64-bit applications on Windows") -compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', - "PathScale Compiler for SiCortex-based applications") -compiler_class['arm'] = ('armccompiler', 'ArmCCompiler', - "Arm C Compiler") -compiler_class['fujitsu'] = ('fujitsuccompiler', 'FujitsuCCompiler', - "Fujitsu C Compiler") - -ccompiler._default_compilers += (('linux.*', 'intel'), - ('linux.*', 'intele'), - ('linux.*', 'intelem'), - ('linux.*', 'pathcc'), - ('nt', 'intelw'), - ('nt', 'intelemw')) - -if sys.platform == 'win32': - compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', - "Mingw32 port of GNU C Compiler for Win32"\ - "(for MSC built Python)") - if mingw32(): - # On windows platforms, we want to default to mingw32 (gcc) - # because msvc can't build blitz stuff. - log.info('Setting mingw32 as default compiler for nt.') - ccompiler._default_compilers = (('nt', 'mingw32'),) \ - + ccompiler._default_compilers - - -_distutils_new_compiler = new_compiler -def new_compiler (plat=None, - compiler=None, - verbose=None, - dry_run=0, - force=0): - # Try first C compilers from numpy.distutils. - if verbose is None: - verbose = log.get_threshold() <= log.INFO - if plat is None: - plat = os.name - try: - if compiler is None: - compiler = get_default_compiler(plat) - (module_name, class_name, long_description) = compiler_class[compiler] - except KeyError: - msg = "don't know how to compile C/C++ code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler" % compiler - raise DistutilsPlatformError(msg) - module_name = "numpy.distutils." + module_name - try: - __import__ (module_name) - except ImportError as e: - msg = str(e) - log.info('%s in numpy.distutils; trying from distutils', - str(msg)) - module_name = module_name[6:] - try: - __import__(module_name) - except ImportError as e: - msg = str(e) - raise DistutilsModuleError("can't compile C/C++ code: unable to load " - "module '%s'" % module_name) - try: - module = sys.modules[module_name] - klass = vars(module)[class_name] - except KeyError: - raise DistutilsModuleError(("can't compile C/C++ code: unable to find " - "class '%s' in module '%s'") % (class_name, module_name)) - compiler = klass(None, dry_run, force) - compiler.verbose = verbose - log.debug('new_compiler returns %s' % (klass)) - return compiler - -ccompiler.new_compiler = new_compiler - -_distutils_gen_lib_options = gen_lib_options -def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): - # the version of this function provided by CPython allows the following - # to return lists, which are unpacked automatically: - # - compiler.runtime_library_dir_option - # our version extends the behavior to: - # - compiler.library_dir_option - # - compiler.library_option - # - compiler.find_library_file - r = _distutils_gen_lib_options(compiler, library_dirs, - runtime_library_dirs, libraries) - lib_opts = [] - for i in r: - if is_sequence(i): - lib_opts.extend(list(i)) - else: - lib_opts.append(i) - return lib_opts -ccompiler.gen_lib_options = gen_lib_options - -# Also fix up the various compiler modules, which do -# from distutils.ccompiler import gen_lib_options -# Don't bother with mwerks, as we don't support Classic Mac. -for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: - _m = sys.modules.get('distutils.' + _cc + 'compiler') - if _m is not None: - setattr(_m, 'gen_lib_options', gen_lib_options) - diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py deleted file mode 100644 index 4dea2f9b1da1..000000000000 --- a/numpy/distutils/ccompiler_opt.py +++ /dev/null @@ -1,2668 +0,0 @@ -"""Provides the `CCompilerOpt` class, used for handling the CPU/hardware -optimization, starting from parsing the command arguments, to managing the -relation between the CPU baseline and dispatch-able features, -also generating the required C headers and ending with compiling -the sources with proper compiler's flags. - -`CCompilerOpt` doesn't provide runtime detection for the CPU features, -instead only focuses on the compiler side, but it creates abstract C headers -that can be used later for the final runtime dispatching process.""" - -import atexit -import inspect -import os -import pprint -import re -import subprocess -import textwrap - -class _Config: - """An abstract class holds all configurable attributes of `CCompilerOpt`, - these class attributes can be used to change the default behavior - of `CCompilerOpt` in order to fit other requirements. - - Attributes - ---------- - conf_nocache : bool - Set True to disable memory and file cache. - Default is False. - - conf_noopt : bool - Set True to forces the optimization to be disabled, - in this case `CCompilerOpt` tends to generate all - expected headers in order to 'not' break the build. - Default is False. - - conf_cache_factors : list - Add extra factors to the primary caching factors. The caching factors - are utilized to determine if there are changes had happened that - requires to discard the cache and re-updating it. The primary factors - are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc). - Default is list of two items, containing the time of last modification - of `ccompiler_opt` and value of attribute "conf_noopt" - - conf_tmp_path : str, - The path of temporary directory. Default is auto-created - temporary directory via ``tempfile.mkdtemp()``. - - conf_check_path : str - The path of testing files. Each added CPU feature must have a - **C** source file contains at least one intrinsic or instruction that - related to this feature, so it can be tested against the compiler. - Default is ``./distutils/checks``. - - conf_target_groups : dict - Extra tokens that can be reached from dispatch-able sources through - the special mark ``@targets``. Default is an empty dictionary. - - **Notes**: - - case-insensitive for tokens and group names - - sign '#' must stick in the begin of group name and only within ``@targets`` - - **Example**: - .. code-block:: console - - $ "@targets #avx_group other_tokens" > group_inside.c - - >>> CCompilerOpt.conf_target_groups["avx_group"] = \\ - "$werror $maxopt avx2 avx512f avx512_skx" - >>> cco = CCompilerOpt(cc_instance) - >>> cco.try_dispatch(["group_inside.c"]) - - conf_c_prefix : str - The prefix of public C definitions. Default is ``"NPY_"``. - - conf_c_prefix_ : str - The prefix of internal C definitions. Default is ``"NPY__"``. - - conf_cc_flags : dict - Nested dictionaries defining several compiler flags - that linked to some major functions, the main key - represent the compiler name and sub-keys represent - flags names. Default is already covers all supported - **C** compilers. - - Sub-keys explained as follows: - - "native": str or None - used by argument option `native`, to detect the current - machine support via the compiler. - "werror": str or None - utilized to treat warning as errors during testing CPU features - against the compiler and also for target's policy `$werror` - via dispatch-able sources. - "maxopt": str or None - utilized for target's policy '$maxopt' and the value should - contains the maximum acceptable optimization by the compiler. - e.g. in gcc ``'-O3'`` - - **Notes**: - * case-sensitive for compiler names and flags - * use space to separate multiple flags - * any flag will tested against the compiler and it will skipped - if it's not applicable. - - conf_min_features : dict - A dictionary defines the used CPU features for - argument option ``'min'``, the key represent the CPU architecture - name e.g. ``'x86'``. Default values provide the best effort - on wide range of users platforms. - - **Note**: case-sensitive for architecture names. - - conf_features : dict - Nested dictionaries used for identifying the CPU features. - the primary key is represented as a feature name or group name - that gathers several features. Default values covers all - supported features but without the major options like "flags", - these undefined options handle it by method `conf_features_partial()`. - Default value is covers almost all CPU features for *X86*, *IBM/Power64* - and *ARM 7/8*. - - Sub-keys explained as follows: - - "implies" : str or list, optional, - List of CPU feature names to be implied by it, - the feature name must be defined within `conf_features`. - Default is None. - - "flags": str or list, optional - List of compiler flags. Default is None. - - "detect": str or list, optional - List of CPU feature names that required to be detected - in runtime. By default, its the feature name or features - in "group" if its specified. - - "implies_detect": bool, optional - If True, all "detect" of implied features will be combined. - Default is True. see `feature_detect()`. - - "group": str or list, optional - Same as "implies" but doesn't require the feature name to be - defined within `conf_features`. - - "interest": int, required - a key for sorting CPU features - - "headers": str or list, optional - intrinsics C header file - - "disable": str, optional - force disable feature, the string value should contains the - reason of disabling. - - "autovec": bool or None, optional - True or False to declare that CPU feature can be auto-vectorized - by the compiler. - By default(None), treated as True if the feature contains at - least one applicable flag. see `feature_can_autovec()` - - "extra_checks": str or list, optional - Extra test case names for the CPU feature that need to be tested - against the compiler. - - Each test case must have a C file named ``extra_xxxx.c``, where - ``xxxx`` is the case name in lower case, under 'conf_check_path'. - It should contain at least one intrinsic or function related to the test case. - - If the compiler able to successfully compile the C file then `CCompilerOpt` - will add a C ``#define`` for it into the main dispatch header, e.g. - ``#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case. - - **NOTES**: - * space can be used as separator with options that supports "str or list" - * case-sensitive for all values and feature name must be in upper-case. - * if flags aren't applicable, its will skipped rather than disable the - CPU feature - * the CPU feature will disabled if the compiler fail to compile - the test file - """ - conf_nocache = False - conf_noopt = False - conf_cache_factors = None - conf_tmp_path = None - conf_check_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "checks" - ) - conf_target_groups = {} - conf_c_prefix = 'NPY_' - conf_c_prefix_ = 'NPY__' - conf_cc_flags = dict( - gcc = dict( - # native should always fail on arm and ppc64, - # native usually works only with x86 - native = '-march=native', - opt = '-O3', - werror = '-Werror', - ), - clang = dict( - native = '-march=native', - opt = "-O3", - # One of the following flags needs to be applicable for Clang to - # guarantee the sanity of the testing process, however in certain - # cases `-Werror` gets skipped during the availability test due to - # "unused arguments" warnings. - # see https://github.com/numpy/numpy/issues/19624 - werror = '-Werror=switch -Werror', - ), - icc = dict( - native = '-xHost', - opt = '-O3', - werror = '-Werror', - ), - iccw = dict( - native = '/QxHost', - opt = '/O3', - werror = '/Werror', - ), - msvc = dict( - native = None, - opt = '/O2', - werror = '/WX', - ), - fcc = dict( - native = '-mcpu=a64fx', - opt = None, - werror = None, - ) - ) - conf_min_features = dict( - x86 = "SSE SSE2", - x64 = "SSE SSE2 SSE3", - ppc64 = '', # play it safe - ppc64le = "VSX VSX2", - s390x = '', - armhf = '', # play it safe - aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD" - ) - conf_features = dict( - # X86 - SSE = dict( - interest=1, headers="xmmintrin.h", - # enabling SSE without SSE2 is useless also - # it's non-optional for x86_64 - implies="SSE2" - ), - SSE2 = dict(interest=2, implies="SSE", headers="emmintrin.h"), - SSE3 = dict(interest=3, implies="SSE2", headers="pmmintrin.h"), - SSSE3 = dict(interest=4, implies="SSE3", headers="tmmintrin.h"), - SSE41 = dict(interest=5, implies="SSSE3", headers="smmintrin.h"), - POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"), - SSE42 = dict(interest=7, implies="POPCNT"), - AVX = dict( - interest=8, implies="SSE42", headers="immintrin.h", - implies_detect=False - ), - XOP = dict(interest=9, implies="AVX", headers="x86intrin.h"), - FMA4 = dict(interest=10, implies="AVX", headers="x86intrin.h"), - F16C = dict(interest=11, implies="AVX"), - FMA3 = dict(interest=12, implies="F16C"), - AVX2 = dict(interest=13, implies="F16C"), - AVX512F = dict( - interest=20, implies="FMA3 AVX2", implies_detect=False, - extra_checks="AVX512F_REDUCE" - ), - AVX512CD = dict(interest=21, implies="AVX512F"), - AVX512_KNL = dict( - interest=40, implies="AVX512CD", group="AVX512ER AVX512PF", - detect="AVX512_KNL", implies_detect=False - ), - AVX512_KNM = dict( - interest=41, implies="AVX512_KNL", - group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ", - detect="AVX512_KNM", implies_detect=False - ), - AVX512_SKX = dict( - interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ", - detect="AVX512_SKX", implies_detect=False, - extra_checks="AVX512BW_MASK AVX512DQ_MASK" - ), - AVX512_CLX = dict( - interest=43, implies="AVX512_SKX", group="AVX512VNNI", - detect="AVX512_CLX" - ), - AVX512_CNL = dict( - interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI", - detect="AVX512_CNL", implies_detect=False - ), - AVX512_ICL = dict( - interest=45, implies="AVX512_CLX AVX512_CNL", - group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ", - detect="AVX512_ICL", implies_detect=False - ), - AVX512_SPR = dict( - interest=46, implies="AVX512_ICL", group="AVX512FP16", - detect="AVX512_SPR", implies_detect=False - ), - # IBM/Power - ## Power7/ISA 2.06 - VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"), - ## Power8/ISA 2.07 - VSX2 = dict(interest=2, implies="VSX", implies_detect=False), - ## Power9/ISA 3.00 - VSX3 = dict(interest=3, implies="VSX2", implies_detect=False, - extra_checks="VSX3_HALF_DOUBLE"), - ## Power10/ISA 3.1 - VSX4 = dict(interest=4, implies="VSX3", implies_detect=False, - extra_checks="VSX4_MMA"), - # IBM/Z - ## VX(z13) support - VX = dict(interest=1, headers="vecintrin.h"), - ## Vector-Enhancements Facility - VXE = dict(interest=2, implies="VX", implies_detect=False), - ## Vector-Enhancements Facility 2 - VXE2 = dict(interest=3, implies="VXE", implies_detect=False), - # ARM - NEON = dict(interest=1, headers="arm_neon.h"), - NEON_FP16 = dict(interest=2, implies="NEON"), - ## FMA - NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"), - ## Advanced SIMD - ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False), - ## ARMv8.2 half-precision & vector arithm - ASIMDHP = dict(interest=5, implies="ASIMD"), - ## ARMv8.2 dot product - ASIMDDP = dict(interest=6, implies="ASIMD"), - ## ARMv8.2 Single & half-precision Multiply - ASIMDFHM = dict(interest=7, implies="ASIMDHP") - ) - def conf_features_partial(self): - """Return a dictionary of supported CPU features by the platform, - and accumulate the rest of undefined options in `conf_features`, - the returned dict has same rules and notes in - class attribute `conf_features`, also its override - any options that been set in 'conf_features'. - """ - if self.cc_noopt: - # optimization is disabled - return {} - - on_x86 = self.cc_on_x86 or self.cc_on_x64 - is_unix = self.cc_is_gcc or self.cc_is_clang or self.cc_is_fcc - - if on_x86 and is_unix: return dict( - SSE = dict(flags="-msse"), - SSE2 = dict(flags="-msse2"), - SSE3 = dict(flags="-msse3"), - SSSE3 = dict(flags="-mssse3"), - SSE41 = dict(flags="-msse4.1"), - POPCNT = dict(flags="-mpopcnt"), - SSE42 = dict(flags="-msse4.2"), - AVX = dict(flags="-mavx"), - F16C = dict(flags="-mf16c"), - XOP = dict(flags="-mxop"), - FMA4 = dict(flags="-mfma4"), - FMA3 = dict(flags="-mfma"), - AVX2 = dict(flags="-mavx2"), - AVX512F = dict(flags="-mavx512f -mno-mmx"), - AVX512CD = dict(flags="-mavx512cd"), - AVX512_KNL = dict(flags="-mavx512er -mavx512pf"), - AVX512_KNM = dict( - flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq" - ), - AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"), - AVX512_CLX = dict(flags="-mavx512vnni"), - AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"), - AVX512_ICL = dict( - flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq" - ), - AVX512_SPR = dict(flags="-mavx512fp16"), - ) - if on_x86 and self.cc_is_icc: return dict( - SSE = dict(flags="-msse"), - SSE2 = dict(flags="-msse2"), - SSE3 = dict(flags="-msse3"), - SSSE3 = dict(flags="-mssse3"), - SSE41 = dict(flags="-msse4.1"), - POPCNT = {}, - SSE42 = dict(flags="-msse4.2"), - AVX = dict(flags="-mavx"), - F16C = {}, - XOP = dict(disable="Intel Compiler doesn't support it"), - FMA4 = dict(disable="Intel Compiler doesn't support it"), - # Intel Compiler doesn't support AVX2 or FMA3 independently - FMA3 = dict( - implies="F16C AVX2", flags="-march=core-avx2" - ), - AVX2 = dict(implies="FMA3", flags="-march=core-avx2"), - # Intel Compiler doesn't support AVX512F or AVX512CD independently - AVX512F = dict( - implies="AVX2 AVX512CD", flags="-march=common-avx512" - ), - AVX512CD = dict( - implies="AVX2 AVX512F", flags="-march=common-avx512" - ), - AVX512_KNL = dict(flags="-xKNL"), - AVX512_KNM = dict(flags="-xKNM"), - AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"), - AVX512_CLX = dict(flags="-xCASCADELAKE"), - AVX512_CNL = dict(flags="-xCANNONLAKE"), - AVX512_ICL = dict(flags="-xICELAKE-CLIENT"), - AVX512_SPR = dict(disable="Not supported yet") - ) - if on_x86 and self.cc_is_iccw: return dict( - SSE = dict(flags="/arch:SSE"), - SSE2 = dict(flags="/arch:SSE2"), - SSE3 = dict(flags="/arch:SSE3"), - SSSE3 = dict(flags="/arch:SSSE3"), - SSE41 = dict(flags="/arch:SSE4.1"), - POPCNT = {}, - SSE42 = dict(flags="/arch:SSE4.2"), - AVX = dict(flags="/arch:AVX"), - F16C = {}, - XOP = dict(disable="Intel Compiler doesn't support it"), - FMA4 = dict(disable="Intel Compiler doesn't support it"), - # Intel Compiler doesn't support FMA3 or AVX2 independently - FMA3 = dict( - implies="F16C AVX2", flags="/arch:CORE-AVX2" - ), - AVX2 = dict( - implies="FMA3", flags="/arch:CORE-AVX2" - ), - # Intel Compiler doesn't support AVX512F or AVX512CD independently - AVX512F = dict( - implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512" - ), - AVX512CD = dict( - implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512" - ), - AVX512_KNL = dict(flags="/Qx:KNL"), - AVX512_KNM = dict(flags="/Qx:KNM"), - AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"), - AVX512_CLX = dict(flags="/Qx:CASCADELAKE"), - AVX512_CNL = dict(flags="/Qx:CANNONLAKE"), - AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT"), - AVX512_SPR = dict(disable="Not supported yet") - ) - if on_x86 and self.cc_is_msvc: return dict( - SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {}, - SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {}, - SSE3 = {}, - SSSE3 = {}, - SSE41 = {}, - POPCNT = dict(headers="nmmintrin.h"), - SSE42 = {}, - AVX = dict(flags="/arch:AVX"), - F16C = {}, - XOP = dict(headers="ammintrin.h"), - FMA4 = dict(headers="ammintrin.h"), - # MSVC doesn't support FMA3 or AVX2 independently - FMA3 = dict( - implies="F16C AVX2", flags="/arch:AVX2" - ), - AVX2 = dict( - implies="F16C FMA3", flags="/arch:AVX2" - ), - # MSVC doesn't support AVX512F or AVX512CD independently, - # always generate instructions belong to (VL/VW/DQ) - AVX512F = dict( - implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512" - ), - AVX512CD = dict( - implies="AVX512F AVX512_SKX", flags="/arch:AVX512" - ), - AVX512_KNL = dict( - disable="MSVC compiler doesn't support it" - ), - AVX512_KNM = dict( - disable="MSVC compiler doesn't support it" - ), - AVX512_SKX = dict(flags="/arch:AVX512"), - AVX512_CLX = {}, - AVX512_CNL = {}, - AVX512_ICL = {}, - AVX512_SPR= dict( - disable="MSVC compiler doesn't support it" - ) - ) - - on_power = self.cc_on_ppc64le or self.cc_on_ppc64 - if on_power: - partial = dict( - VSX = dict( - implies=("VSX2" if self.cc_on_ppc64le else ""), - flags="-mvsx" - ), - VSX2 = dict( - flags="-mcpu=power8", implies_detect=False - ), - VSX3 = dict( - flags="-mcpu=power9 -mtune=power9", implies_detect=False - ), - VSX4 = dict( - flags="-mcpu=power10 -mtune=power10", implies_detect=False - ) - ) - if self.cc_is_clang: - partial["VSX"]["flags"] = "-maltivec -mvsx" - partial["VSX2"]["flags"] = "-mcpu=power8" - partial["VSX3"]["flags"] = "-mcpu=power9" - partial["VSX4"]["flags"] = "-mcpu=power10" - - return partial - - on_zarch = self.cc_on_s390x - if on_zarch: - partial = dict( - VX = dict( - flags="-march=arch11 -mzvector" - ), - VXE = dict( - flags="-march=arch12", implies_detect=False - ), - VXE2 = dict( - flags="-march=arch13", implies_detect=False - ) - ) - - return partial - - - if self.cc_on_aarch64 and is_unix: return dict( - NEON = dict( - implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True - ), - NEON_FP16 = dict( - implies="NEON NEON_VFPV4 ASIMD", autovec=True - ), - NEON_VFPV4 = dict( - implies="NEON NEON_FP16 ASIMD", autovec=True - ), - ASIMD = dict( - implies="NEON NEON_FP16 NEON_VFPV4", autovec=True - ), - ASIMDHP = dict( - flags="-march=armv8.2-a+fp16" - ), - ASIMDDP = dict( - flags="-march=armv8.2-a+dotprod" - ), - ASIMDFHM = dict( - flags="-march=armv8.2-a+fp16fml" - ), - ) - if self.cc_on_armhf and is_unix: return dict( - NEON = dict( - flags="-mfpu=neon" - ), - NEON_FP16 = dict( - flags="-mfpu=neon-fp16 -mfp16-format=ieee" - ), - NEON_VFPV4 = dict( - flags="-mfpu=neon-vfpv4", - ), - ASIMD = dict( - flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd", - ), - ASIMDHP = dict( - flags="-march=armv8.2-a+fp16" - ), - ASIMDDP = dict( - flags="-march=armv8.2-a+dotprod", - ), - ASIMDFHM = dict( - flags="-march=armv8.2-a+fp16fml" - ) - ) - # TODO: ARM MSVC - return {} - - def __init__(self): - if self.conf_tmp_path is None: - import shutil - import tempfile - tmp = tempfile.mkdtemp() - def rm_temp(): - try: - shutil.rmtree(tmp) - except OSError: - pass - atexit.register(rm_temp) - self.conf_tmp_path = tmp - - if self.conf_cache_factors is None: - self.conf_cache_factors = [ - os.path.getmtime(__file__), - self.conf_nocache - ] - -class _Distutils: - """A helper class that provides a collection of fundamental methods - implemented in a top of Python and NumPy Distutils. - - The idea behind this class is to gather all methods that it may - need to override in case of reuse 'CCompilerOpt' in environment - different than of what NumPy has. - - Parameters - ---------- - ccompiler : `CCompiler` - The generate instance that returned from `distutils.ccompiler.new_compiler()`. - """ - def __init__(self, ccompiler): - self._ccompiler = ccompiler - - def dist_compile(self, sources, flags, ccompiler=None, **kwargs): - """Wrap CCompiler.compile()""" - assert(isinstance(sources, list)) - assert(isinstance(flags, list)) - flags = kwargs.pop("extra_postargs", []) + flags - if not ccompiler: - ccompiler = self._ccompiler - - return ccompiler.compile(sources, extra_postargs=flags, **kwargs) - - def dist_test(self, source, flags, macros=[]): - """Return True if 'CCompiler.compile()' able to compile - a source file with certain flags. - """ - assert(isinstance(source, str)) - from distutils.errors import CompileError - cc = self._ccompiler; - bk_spawn = getattr(cc, 'spawn', None) - if bk_spawn: - cc_type = getattr(self._ccompiler, "compiler_type", "") - if cc_type in ("msvc",): - setattr(cc, 'spawn', self._dist_test_spawn_paths) - else: - setattr(cc, 'spawn', self._dist_test_spawn) - test = False - try: - self.dist_compile( - [source], flags, macros=macros, output_dir=self.conf_tmp_path - ) - test = True - except CompileError as e: - self.dist_log(str(e), stderr=True) - if bk_spawn: - setattr(cc, 'spawn', bk_spawn) - return test - - def dist_info(self): - """ - Return a tuple containing info about (platform, compiler, extra_args), - required by the abstract class '_CCompiler' for discovering the - platform environment. This is also used as a cache factor in order - to detect any changes happening from outside. - """ - if hasattr(self, "_dist_info"): - return self._dist_info - - cc_type = getattr(self._ccompiler, "compiler_type", '') - if cc_type in ("intelem", "intelemw"): - platform = "x86_64" - elif cc_type in ("intel", "intelw", "intele"): - platform = "x86" - else: - from distutils.util import get_platform - platform = get_platform() - - cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", '')) - if not cc_type or cc_type == "unix": - if hasattr(cc_info, "__iter__"): - compiler = cc_info[0] - else: - compiler = str(cc_info) - else: - compiler = cc_type - - if hasattr(cc_info, "__iter__") and len(cc_info) > 1: - extra_args = ' '.join(cc_info[1:]) - else: - extra_args = os.environ.get("CFLAGS", "") - extra_args += os.environ.get("CPPFLAGS", "") - - self._dist_info = (platform, compiler, extra_args) - return self._dist_info - - @staticmethod - def dist_error(*args): - """Raise a compiler error""" - from distutils.errors import CompileError - raise CompileError(_Distutils._dist_str(*args)) - - @staticmethod - def dist_fatal(*args): - """Raise a distutils error""" - from distutils.errors import DistutilsError - raise DistutilsError(_Distutils._dist_str(*args)) - - @staticmethod - def dist_log(*args, stderr=False): - """Print a console message""" - from numpy.distutils import log - out = _Distutils._dist_str(*args) - if stderr: - log.warn(out) - else: - log.info(out) - - @staticmethod - def dist_load_module(name, path): - """Load a module from file, required by the abstract class '_Cache'.""" - from .misc_util import exec_mod_from_location - try: - return exec_mod_from_location(name, path) - except Exception as e: - _Distutils.dist_log(e, stderr=True) - return None - - @staticmethod - def _dist_str(*args): - """Return a string to print by log and errors.""" - def to_str(arg): - if not isinstance(arg, str) and hasattr(arg, '__iter__'): - ret = [] - for a in arg: - ret.append(to_str(a)) - return '('+ ' '.join(ret) + ')' - return str(arg) - - stack = inspect.stack()[2] - start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno) - out = ' '.join([ - to_str(a) - for a in (*args,) - ]) - return start + out - - def _dist_test_spawn_paths(self, cmd, display=None): - """ - Fix msvc SDK ENV path same as distutils do - without it we get c1: fatal error C1356: unable to find mspdbcore.dll - """ - if not hasattr(self._ccompiler, "_paths"): - self._dist_test_spawn(cmd) - return - old_path = os.getenv("path") - try: - os.environ["path"] = self._ccompiler._paths - self._dist_test_spawn(cmd) - finally: - os.environ["path"] = old_path - - _dist_warn_regex = re.compile( - # intel and msvc compilers don't raise - # fatal errors when flags are wrong or unsupported - ".*(" - "warning D9002|" # msvc, it should be work with any language. - "invalid argument for option" # intel - ").*" - ) - @staticmethod - def _dist_test_spawn(cmd, display=None): - try: - o = subprocess.check_output(cmd, stderr=subprocess.STDOUT, - text=True) - if o and re.match(_Distutils._dist_warn_regex, o): - _Distutils.dist_error( - "Flags in command", cmd ,"aren't supported by the compiler" - ", output -> \n%s" % o - ) - except subprocess.CalledProcessError as exc: - o = exc.output - s = exc.returncode - except OSError as e: - o = e - s = 127 - else: - return None - _Distutils.dist_error( - "Command", cmd, "failed with exit status %d output -> \n%s" % ( - s, o - )) - -_share_cache = {} -class _Cache: - """An abstract class handles caching functionality, provides two - levels of caching, in-memory by share instances attributes among - each other and by store attributes into files. - - **Note**: - any attributes that start with ``_`` or ``conf_`` will be ignored. - - Parameters - ---------- - cache_path : str or None - The path of cache file, if None then cache in file will disabled. - - *factors : - The caching factors that need to utilize next to `conf_cache_factors`. - - Attributes - ---------- - cache_private : set - Hold the attributes that need be skipped from "in-memory cache". - - cache_infile : bool - Utilized during initializing this class, to determine if the cache was able - to loaded from the specified cache path in 'cache_path'. - """ - - # skip attributes from cache - _cache_ignore = re.compile("^(_|conf_)") - - def __init__(self, cache_path=None, *factors): - self.cache_me = {} - self.cache_private = set() - self.cache_infile = False - self._cache_path = None - - if self.conf_nocache: - self.dist_log("cache is disabled by `Config`") - return - - self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors) - self._cache_path = cache_path - if cache_path: - if os.path.exists(cache_path): - self.dist_log("load cache from file ->", cache_path) - cache_mod = self.dist_load_module("cache", cache_path) - if not cache_mod: - self.dist_log( - "unable to load the cache file as a module", - stderr=True - ) - elif not hasattr(cache_mod, "hash") or \ - not hasattr(cache_mod, "data"): - self.dist_log("invalid cache file", stderr=True) - elif self._cache_hash == cache_mod.hash: - self.dist_log("hit the file cache") - for attr, val in cache_mod.data.items(): - setattr(self, attr, val) - self.cache_infile = True - else: - self.dist_log("miss the file cache") - - if not self.cache_infile: - other_cache = _share_cache.get(self._cache_hash) - if other_cache: - self.dist_log("hit the memory cache") - for attr, val in other_cache.__dict__.items(): - if attr in other_cache.cache_private or \ - re.match(self._cache_ignore, attr): - continue - setattr(self, attr, val) - - _share_cache[self._cache_hash] = self - atexit.register(self.cache_flush) - - def __del__(self): - for h, o in _share_cache.items(): - if o == self: - _share_cache.pop(h) - break - - def cache_flush(self): - """ - Force update the cache. - """ - if not self._cache_path: - return - # TODO: don't write if the cache doesn't change - self.dist_log("write cache to path ->", self._cache_path) - cdict = self.__dict__.copy() - for attr in self.__dict__.keys(): - if re.match(self._cache_ignore, attr): - cdict.pop(attr) - - d = os.path.dirname(self._cache_path) - if not os.path.exists(d): - os.makedirs(d) - - repr_dict = pprint.pformat(cdict, compact=True) - with open(self._cache_path, "w") as f: - f.write(textwrap.dedent("""\ - # AUTOGENERATED DON'T EDIT - # Please make changes to the code generator \ - (distutils/ccompiler_opt.py) - hash = {} - data = \\ - """).format(self._cache_hash)) - f.write(repr_dict) - - def cache_hash(self, *factors): - # is there a built-in non-crypto hash? - # sdbm - chash = 0 - for f in factors: - for char in str(f): - chash = ord(char) + (chash << 6) + (chash << 16) - chash - chash &= 0xFFFFFFFF - return chash - - @staticmethod - def me(cb): - """ - A static method that can be treated as a decorator to - dynamically cache certain methods. - """ - def cache_wrap_me(self, *args, **kwargs): - # good for normal args - cache_key = str(( - cb.__name__, *args, *kwargs.keys(), *kwargs.values() - )) - if cache_key in self.cache_me: - return self.cache_me[cache_key] - ccb = cb(self, *args, **kwargs) - self.cache_me[cache_key] = ccb - return ccb - return cache_wrap_me - -class _CCompiler: - """A helper class for `CCompilerOpt` containing all utilities that - related to the fundamental compiler's functions. - - Attributes - ---------- - cc_on_x86 : bool - True when the target architecture is 32-bit x86 - cc_on_x64 : bool - True when the target architecture is 64-bit x86 - cc_on_ppc64 : bool - True when the target architecture is 64-bit big-endian powerpc - cc_on_ppc64le : bool - True when the target architecture is 64-bit litle-endian powerpc - cc_on_s390x : bool - True when the target architecture is IBM/ZARCH on linux - cc_on_armhf : bool - True when the target architecture is 32-bit ARMv7+ - cc_on_aarch64 : bool - True when the target architecture is 64-bit Armv8-a+ - cc_on_noarch : bool - True when the target architecture is unknown or not supported - cc_is_gcc : bool - True if the compiler is GNU or - if the compiler is unknown - cc_is_clang : bool - True if the compiler is Clang - cc_is_icc : bool - True if the compiler is Intel compiler (unix like) - cc_is_iccw : bool - True if the compiler is Intel compiler (msvc like) - cc_is_nocc : bool - True if the compiler isn't supported directly, - Note: that cause a fail-back to gcc - cc_has_debug : bool - True if the compiler has debug flags - cc_has_native : bool - True if the compiler has native flags - cc_noopt : bool - True if the compiler has definition 'DISABLE_OPT*', - or 'cc_on_noarch' is True - cc_march : str - The target architecture name, or "unknown" if - the architecture isn't supported - cc_name : str - The compiler name, or "unknown" if the compiler isn't supported - cc_flags : dict - Dictionary containing the initialized flags of `_Config.conf_cc_flags` - """ - def __init__(self): - if hasattr(self, "cc_is_cached"): - return - # attr regex compiler-expression - detect_arch = ( - ("cc_on_x64", ".*(x|x86_|amd)64.*", ""), - ("cc_on_x86", ".*(win32|x86|i386|i686).*", ""), - ("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*|.*powerpc.*", - "defined(__powerpc64__) && " - "defined(__LITTLE_ENDIAN__)"), - ("cc_on_ppc64", ".*(powerpc|ppc).*|.*powerpc.*", - "defined(__powerpc64__) && " - "defined(__BIG_ENDIAN__)"), - ("cc_on_aarch64", ".*(aarch64|arm64).*", ""), - ("cc_on_armhf", ".*arm.*", "defined(__ARM_ARCH_7__) || " - "defined(__ARM_ARCH_7A__)"), - ("cc_on_s390x", ".*s390x.*", ""), - # undefined platform - ("cc_on_noarch", "", ""), - ) - detect_compiler = ( - ("cc_is_gcc", r".*(gcc|gnu\-g).*", ""), - ("cc_is_clang", ".*clang.*", ""), - # intel msvc like - ("cc_is_iccw", ".*(intelw|intelemw|iccw).*", ""), - ("cc_is_icc", ".*(intel|icc).*", ""), # intel unix like - ("cc_is_msvc", ".*msvc.*", ""), - ("cc_is_fcc", ".*fcc.*", ""), - # undefined compiler will be treat it as gcc - ("cc_is_nocc", "", ""), - ) - detect_args = ( - ("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""), - ("cc_has_native", - ".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", ""), - # in case if the class run with -DNPY_DISABLE_OPTIMIZATION - ("cc_noopt", ".*DISABLE_OPT.*", ""), - ) - - dist_info = self.dist_info() - platform, compiler_info, extra_args = dist_info - # set False to all attrs - for section in (detect_arch, detect_compiler, detect_args): - for attr, rgex, cexpr in section: - setattr(self, attr, False) - - for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)): - for attr, rgex, cexpr in detect: - if rgex and not re.match(rgex, searchin, re.IGNORECASE): - continue - if cexpr and not self.cc_test_cexpr(cexpr): - continue - setattr(self, attr, True) - break - - for attr, rgex, cexpr in detect_args: - if rgex and not re.match(rgex, extra_args, re.IGNORECASE): - continue - if cexpr and not self.cc_test_cexpr(cexpr): - continue - setattr(self, attr, True) - - if self.cc_on_noarch: - self.dist_log( - "unable to detect CPU architecture which lead to disable the optimization. " - f"check dist_info:<<\n{dist_info}\n>>", - stderr=True - ) - self.cc_noopt = True - - if self.conf_noopt: - self.dist_log("Optimization is disabled by the Config", stderr=True) - self.cc_noopt = True - - if self.cc_is_nocc: - """ - mingw can be treated as a gcc, and also xlc even if it based on clang, - but still has the same gcc optimization flags. - """ - self.dist_log( - "unable to detect compiler type which leads to treating it as GCC. " - "this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC." - f"check dist_info:<<\n{dist_info}\n>>", - stderr=True - ) - self.cc_is_gcc = True - - self.cc_march = "unknown" - for arch in ("x86", "x64", "ppc64", "ppc64le", - "armhf", "aarch64", "s390x"): - if getattr(self, "cc_on_" + arch): - self.cc_march = arch - break - - self.cc_name = "unknown" - for name in ("gcc", "clang", "iccw", "icc", "msvc", "fcc"): - if getattr(self, "cc_is_" + name): - self.cc_name = name - break - - self.cc_flags = {} - compiler_flags = self.conf_cc_flags.get(self.cc_name) - if compiler_flags is None: - self.dist_fatal( - "undefined flag for compiler '%s', " - "leave an empty dict instead" % self.cc_name - ) - for name, flags in compiler_flags.items(): - self.cc_flags[name] = nflags = [] - if flags: - assert(isinstance(flags, str)) - flags = flags.split() - for f in flags: - if self.cc_test_flags([f]): - nflags.append(f) - - self.cc_is_cached = True - - @_Cache.me - def cc_test_flags(self, flags): - """ - Returns True if the compiler supports 'flags'. - """ - assert(isinstance(flags, list)) - self.dist_log("testing flags", flags) - test_path = os.path.join(self.conf_check_path, "test_flags.c") - test = self.dist_test(test_path, flags) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - @_Cache.me - def cc_test_cexpr(self, cexpr, flags=[]): - """ - Same as the above but supports compile-time expressions. - """ - self.dist_log("testing compiler expression", cexpr) - test_path = os.path.join(self.conf_tmp_path, "npy_dist_test_cexpr.c") - with open(test_path, "w") as fd: - fd.write(textwrap.dedent(f"""\ - #if !({cexpr}) - #error "unsupported expression" - #endif - int dummy; - """)) - test = self.dist_test(test_path, flags) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - def cc_normalize_flags(self, flags): - """ - Remove the conflicts that caused due gathering implied features flags. - - Parameters - ---------- - 'flags' list, compiler flags - flags should be sorted from the lowest to the highest interest. - - Returns - ------- - list, filtered from any conflicts. - - Examples - -------- - >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod']) - ['armv8.2-a+fp16+dotprod'] - - >>> self.cc_normalize_flags( - ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2'] - ) - ['-march=core-avx2'] - """ - assert(isinstance(flags, list)) - if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc: - return self._cc_normalize_unix(flags) - - if self.cc_is_msvc or self.cc_is_iccw: - return self._cc_normalize_win(flags) - return flags - - _cc_normalize_unix_mrgx = re.compile( - # 1- to check the highest of - r"^(-mcpu=|-march=|-x[A-Z0-9\-])" - ) - _cc_normalize_unix_frgx = re.compile( - # 2- to remove any flags starts with - # -march, -mcpu, -x(INTEL) and '-m' without '=' - r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|" - # exclude: - r"(?:-mzvector)" - ) - _cc_normalize_unix_krgx = re.compile( - # 3- keep only the highest of - r"^(-mfpu|-mtune)" - ) - _cc_normalize_arch_ver = re.compile( - r"[0-9.]" - ) - def _cc_normalize_unix(self, flags): - def ver_flags(f): - # arch ver subflag - # -march=armv8.2-a+fp16fml - tokens = f.split('+') - ver = float('0' + ''.join( - re.findall(self._cc_normalize_arch_ver, tokens[0]) - )) - return ver, tokens[0], tokens[1:] - - if len(flags) <= 1: - return flags - # get the highest matched flag - for i, cur_flag in enumerate(reversed(flags)): - if not re.match(self._cc_normalize_unix_mrgx, cur_flag): - continue - lower_flags = flags[:-(i+1)] - upper_flags = flags[-i:] - filtered = list(filter( - self._cc_normalize_unix_frgx.search, lower_flags - )) - # gather subflags - ver, arch, subflags = ver_flags(cur_flag) - if ver > 0 and len(subflags) > 0: - for xflag in lower_flags: - xver, _, xsubflags = ver_flags(xflag) - if ver == xver: - subflags = xsubflags + subflags - cur_flag = arch + '+' + '+'.join(subflags) - - flags = filtered + [cur_flag] - if i > 0: - flags += upper_flags - break - - # to remove overridable flags - final_flags = [] - matched = set() - for f in reversed(flags): - match = re.match(self._cc_normalize_unix_krgx, f) - if not match: - pass - elif match[0] in matched: - continue - else: - matched.add(match[0]) - final_flags.insert(0, f) - return final_flags - - _cc_normalize_win_frgx = re.compile( - r"^(?!(/arch\:|/Qx\:))" - ) - _cc_normalize_win_mrgx = re.compile( - r"^(/arch|/Qx:)" - ) - def _cc_normalize_win(self, flags): - for i, f in enumerate(reversed(flags)): - if not re.match(self._cc_normalize_win_mrgx, f): - continue - i += 1 - return list(filter( - self._cc_normalize_win_frgx.search, flags[:-i] - )) + flags[-i:] - return flags - -class _Feature: - """A helper class for `CCompilerOpt` that managing CPU features. - - Attributes - ---------- - feature_supported : dict - Dictionary containing all CPU features that supported - by the platform, according to the specified values in attribute - `_Config.conf_features` and `_Config.conf_features_partial()` - - feature_min : set - The minimum support of CPU features, according to - the specified values in attribute `_Config.conf_min_features`. - """ - def __init__(self): - if hasattr(self, "feature_is_cached"): - return - self.feature_supported = pfeatures = self.conf_features_partial() - for feature_name in list(pfeatures.keys()): - feature = pfeatures[feature_name] - cfeature = self.conf_features[feature_name] - feature.update({ - k:v for k,v in cfeature.items() if k not in feature - }) - disabled = feature.get("disable") - if disabled is not None: - pfeatures.pop(feature_name) - self.dist_log( - "feature '%s' is disabled," % feature_name, - disabled, stderr=True - ) - continue - # list is used internally for these options - for option in ( - "implies", "group", "detect", "headers", "flags", "extra_checks" - ) : - oval = feature.get(option) - if isinstance(oval, str): - feature[option] = oval.split() - - self.feature_min = set() - min_f = self.conf_min_features.get(self.cc_march, "") - for F in min_f.upper().split(): - if F in self.feature_supported: - self.feature_min.add(F) - - self.feature_is_cached = True - - def feature_names(self, names=None, force_flags=None, macros=[]): - """ - Returns a set of CPU feature names that supported by platform and the **C** compiler. - - Parameters - ---------- - names : sequence or None, optional - Specify certain CPU features to test it against the **C** compiler. - if None(default), it will test all current supported features. - **Note**: feature names must be in upper-case. - - force_flags : list or None, optional - If None(default), default compiler flags for every CPU feature will - be used during the test. - - macros : list of tuples, optional - A list of C macro definitions. - """ - assert( - names is None or ( - not isinstance(names, str) and - hasattr(names, "__iter__") - ) - ) - assert(force_flags is None or isinstance(force_flags, list)) - if names is None: - names = self.feature_supported.keys() - supported_names = set() - for f in names: - if self.feature_is_supported( - f, force_flags=force_flags, macros=macros - ): - supported_names.add(f) - return supported_names - - def feature_is_exist(self, name): - """ - Returns True if a certain feature is exist and covered within - ``_Config.conf_features``. - - Parameters - ---------- - 'name': str - feature name in uppercase. - """ - assert(name.isupper()) - return name in self.conf_features - - def feature_sorted(self, names, reverse=False): - """ - Sort a list of CPU features ordered by the lowest interest. - - Parameters - ---------- - 'names': sequence - sequence of supported feature names in uppercase. - 'reverse': bool, optional - If true, the sorted features is reversed. (highest interest) - - Returns - ------- - list, sorted CPU features - """ - def sort_cb(k): - if isinstance(k, str): - return self.feature_supported[k]["interest"] - # multiple features - rank = max([self.feature_supported[f]["interest"] for f in k]) - # FIXME: that's not a safe way to increase the rank for - # multi targets - rank += len(k) -1 - return rank - return sorted(names, reverse=reverse, key=sort_cb) - - def feature_implies(self, names, keep_origins=False): - """ - Return a set of CPU features that implied by 'names' - - Parameters - ---------- - names : str or sequence of str - CPU feature name(s) in uppercase. - - keep_origins : bool - if False(default) then the returned set will not contain any - features from 'names'. This case happens only when two features - imply each other. - - Examples - -------- - >>> self.feature_implies("SSE3") - {'SSE', 'SSE2'} - >>> self.feature_implies("SSE2") - {'SSE'} - >>> self.feature_implies("SSE2", keep_origins=True) - # 'SSE2' found here since 'SSE' and 'SSE2' imply each other - {'SSE', 'SSE2'} - """ - def get_implies(name, _caller=set()): - implies = set() - d = self.feature_supported[name] - for i in d.get("implies", []): - implies.add(i) - if i in _caller: - # infinity recursive guard since - # features can imply each other - continue - _caller.add(name) - implies = implies.union(get_implies(i, _caller)) - return implies - - if isinstance(names, str): - implies = get_implies(names) - names = [names] - else: - assert(hasattr(names, "__iter__")) - implies = set() - for n in names: - implies = implies.union(get_implies(n)) - if not keep_origins: - implies.difference_update(names) - return implies - - def feature_implies_c(self, names): - """same as feature_implies() but combining 'names'""" - if isinstance(names, str): - names = set((names,)) - else: - names = set(names) - return names.union(self.feature_implies(names)) - - def feature_ahead(self, names): - """ - Return list of features in 'names' after remove any - implied features and keep the origins. - - Parameters - ---------- - 'names': sequence - sequence of CPU feature names in uppercase. - - Returns - ------- - list of CPU features sorted as-is 'names' - - Examples - -------- - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41"]) - ["SSE41"] - # assume AVX2 and FMA3 implies each other and AVX2 - # is the highest interest - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) - ["AVX2"] - # assume AVX2 and FMA3 don't implies each other - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) - ["AVX2", "FMA3"] - """ - assert( - not isinstance(names, str) - and hasattr(names, '__iter__') - ) - implies = self.feature_implies(names, keep_origins=True) - ahead = [n for n in names if n not in implies] - if len(ahead) == 0: - # return the highest interested feature - # if all features imply each other - ahead = self.feature_sorted(names, reverse=True)[:1] - return ahead - - def feature_untied(self, names): - """ - same as 'feature_ahead()' but if both features implied each other - and keep the highest interest. - - Parameters - ---------- - 'names': sequence - sequence of CPU feature names in uppercase. - - Returns - ------- - list of CPU features sorted as-is 'names' - - Examples - -------- - >>> self.feature_untied(["SSE2", "SSE3", "SSE41"]) - ["SSE2", "SSE3", "SSE41"] - # assume AVX2 and FMA3 implies each other - >>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"]) - ["SSE2", "SSE3", "SSE41", "AVX2"] - """ - assert( - not isinstance(names, str) - and hasattr(names, '__iter__') - ) - final = [] - for n in names: - implies = self.feature_implies(n) - tied = [ - nn for nn in final - if nn in implies and n in self.feature_implies(nn) - ] - if tied: - tied = self.feature_sorted(tied + [n]) - if n not in tied[1:]: - continue - final.remove(tied[:1][0]) - final.append(n) - return final - - def feature_get_til(self, names, keyisfalse): - """ - same as `feature_implies_c()` but stop collecting implied - features when feature's option that provided through - parameter 'keyisfalse' is False, also sorting the returned - features. - """ - def til(tnames): - # sort from highest to lowest interest then cut if "key" is False - tnames = self.feature_implies_c(tnames) - tnames = self.feature_sorted(tnames, reverse=True) - for i, n in enumerate(tnames): - if not self.feature_supported[n].get(keyisfalse, True): - tnames = tnames[:i+1] - break - return tnames - - if isinstance(names, str) or len(names) <= 1: - names = til(names) - # normalize the sort - names.reverse() - return names - - names = self.feature_ahead(names) - names = {t for n in names for t in til(n)} - return self.feature_sorted(names) - - def feature_detect(self, names): - """ - Return a list of CPU features that required to be detected - sorted from the lowest to highest interest. - """ - names = self.feature_get_til(names, "implies_detect") - detect = [] - for n in names: - d = self.feature_supported[n] - detect += d.get("detect", d.get("group", [n])) - return detect - - @_Cache.me - def feature_flags(self, names): - """ - Return a list of CPU features flags sorted from the lowest - to highest interest. - """ - names = self.feature_sorted(self.feature_implies_c(names)) - flags = [] - for n in names: - d = self.feature_supported[n] - f = d.get("flags", []) - if not f or not self.cc_test_flags(f): - continue - flags += f - return self.cc_normalize_flags(flags) - - @_Cache.me - def feature_test(self, name, force_flags=None, macros=[]): - """ - Test a certain CPU feature against the compiler through its own - check file. - - Parameters - ---------- - name : str - Supported CPU feature name. - - force_flags : list or None, optional - If None(default), the returned flags from `feature_flags()` - will be used. - - macros : list of tuples, optional - A list of C macro definitions. - """ - if force_flags is None: - force_flags = self.feature_flags(name) - - self.dist_log( - "testing feature '%s' with flags (%s)" % ( - name, ' '.join(force_flags) - )) - # Each CPU feature must have C source code contains at - # least one intrinsic or instruction related to this feature. - test_path = os.path.join( - self.conf_check_path, "cpu_%s.c" % name.lower() - ) - if not os.path.exists(test_path): - self.dist_fatal("feature test file is not exist", test_path) - - test = self.dist_test( - test_path, force_flags + self.cc_flags["werror"], macros=macros - ) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - @_Cache.me - def feature_is_supported(self, name, force_flags=None, macros=[]): - """ - Check if a certain CPU feature is supported by the platform and compiler. - - Parameters - ---------- - name : str - CPU feature name in uppercase. - - force_flags : list or None, optional - If None(default), default compiler flags for every CPU feature will - be used during test. - - macros : list of tuples, optional - A list of C macro definitions. - """ - assert(name.isupper()) - assert(force_flags is None or isinstance(force_flags, list)) - - supported = name in self.feature_supported - if supported: - for impl in self.feature_implies(name): - if not self.feature_test(impl, force_flags, macros=macros): - return False - if not self.feature_test(name, force_flags, macros=macros): - return False - return supported - - @_Cache.me - def feature_can_autovec(self, name): - """ - check if the feature can be auto-vectorized by the compiler - """ - assert(isinstance(name, str)) - d = self.feature_supported[name] - can = d.get("autovec", None) - if can is None: - valid_flags = [ - self.cc_test_flags([f]) for f in d.get("flags", []) - ] - can = valid_flags and any(valid_flags) - return can - - @_Cache.me - def feature_extra_checks(self, name): - """ - Return a list of supported extra checks after testing them against - the compiler. - - Parameters - ---------- - names : str - CPU feature name in uppercase. - """ - assert isinstance(name, str) - d = self.feature_supported[name] - extra_checks = d.get("extra_checks", []) - if not extra_checks: - return [] - - self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks) - flags = self.feature_flags(name) - available = [] - not_available = [] - for chk in extra_checks: - test_path = os.path.join( - self.conf_check_path, "extra_%s.c" % chk.lower() - ) - if not os.path.exists(test_path): - self.dist_fatal("extra check file does not exist", test_path) - - is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"]) - if is_supported: - available.append(chk) - else: - not_available.append(chk) - - if not_available: - self.dist_log("testing failed for checks", not_available, stderr=True) - return available - - - def feature_c_preprocessor(self, feature_name, tabs=0): - """ - Generate C preprocessor definitions and include headers of a CPU feature. - - Parameters - ---------- - 'feature_name': str - CPU feature name in uppercase. - 'tabs': int - if > 0, align the generated strings to the right depend on number of tabs. - - Returns - ------- - str, generated C preprocessor - - Examples - -------- - >>> self.feature_c_preprocessor("SSE3") - /** SSE3 **/ - #define NPY_HAVE_SSE3 1 - #include - """ - assert(feature_name.isupper()) - feature = self.feature_supported.get(feature_name) - assert(feature is not None) - - prepr = [ - "/** %s **/" % feature_name, - "#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name) - ] - prepr += [ - "#include <%s>" % h for h in feature.get("headers", []) - ] - - extra_defs = feature.get("group", []) - extra_defs += self.feature_extra_checks(feature_name) - for edef in extra_defs: - # Guard extra definitions in case of duplicate with - # another feature - prepr += [ - "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef), - "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef), - "#endif", - ] - - if tabs > 0: - prepr = [('\t'*tabs) + l for l in prepr] - return '\n'.join(prepr) - -class _Parse: - """A helper class that parsing main arguments of `CCompilerOpt`, - also parsing configuration statements in dispatch-able sources. - - Parameters - ---------- - cpu_baseline : str or None - minimal set of required CPU features or special options. - - cpu_dispatch : str or None - dispatched set of additional CPU features or special options. - - Special options can be: - - **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features` - - **MAX**: Enables all supported CPU features by the Compiler and platform. - - **NATIVE**: Enables all CPU features that supported by the current machine. - - **NONE**: Enables nothing - - **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**. - NOTE: operand + is only added for nominal reason. - - NOTES: - - Case-insensitive among all CPU features and special options. - - Comma or space can be used as a separator. - - If the CPU feature is not supported by the user platform or compiler, - it will be skipped rather than raising a fatal error. - - Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features - - 'cpu_baseline' force enables implied features. - - Attributes - ---------- - parse_baseline_names : list - Final CPU baseline's feature names(sorted from low to high) - parse_baseline_flags : list - Compiler flags of baseline features - parse_dispatch_names : list - Final CPU dispatch-able feature names(sorted from low to high) - parse_target_groups : dict - Dictionary containing initialized target groups that configured - through class attribute `conf_target_groups`. - - The key is represent the group name and value is a tuple - contains three items : - - bool, True if group has the 'baseline' option. - - list, list of CPU features. - - list, list of extra compiler flags. - - """ - def __init__(self, cpu_baseline, cpu_dispatch): - self._parse_policies = dict( - # POLICY NAME, (HAVE, NOT HAVE, [DEB]) - KEEP_BASELINE = ( - None, self._parse_policy_not_keepbase, - [] - ), - KEEP_SORT = ( - self._parse_policy_keepsort, - self._parse_policy_not_keepsort, - [] - ), - MAXOPT = ( - self._parse_policy_maxopt, None, - [] - ), - WERROR = ( - self._parse_policy_werror, None, - [] - ), - AUTOVEC = ( - self._parse_policy_autovec, None, - ["MAXOPT"] - ) - ) - if hasattr(self, "parse_is_cached"): - return - - self.parse_baseline_names = [] - self.parse_baseline_flags = [] - self.parse_dispatch_names = [] - self.parse_target_groups = {} - - if self.cc_noopt: - # skip parsing baseline and dispatch args and keep parsing target groups - cpu_baseline = cpu_dispatch = None - - self.dist_log("check requested baseline") - if cpu_baseline is not None: - cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline) - baseline_names = self.feature_names(cpu_baseline) - self.parse_baseline_flags = self.feature_flags(baseline_names) - self.parse_baseline_names = self.feature_sorted( - self.feature_implies_c(baseline_names) - ) - - self.dist_log("check requested dispatch-able features") - if cpu_dispatch is not None: - cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch) - cpu_dispatch = { - f for f in cpu_dispatch_ - if f not in self.parse_baseline_names - } - conflict_baseline = cpu_dispatch_.difference(cpu_dispatch) - self.parse_dispatch_names = self.feature_sorted( - self.feature_names(cpu_dispatch) - ) - if len(conflict_baseline) > 0: - self.dist_log( - "skip features", conflict_baseline, "since its part of baseline" - ) - - self.dist_log("initialize targets groups") - for group_name, tokens in self.conf_target_groups.items(): - self.dist_log("parse target group", group_name) - GROUP_NAME = group_name.upper() - if not tokens or not tokens.strip(): - # allow empty groups, useful in case if there's a need - # to disable certain group since '_parse_target_tokens()' - # requires at least one valid target - self.parse_target_groups[GROUP_NAME] = ( - False, [], [] - ) - continue - has_baseline, features, extra_flags = \ - self._parse_target_tokens(tokens) - self.parse_target_groups[GROUP_NAME] = ( - has_baseline, features, extra_flags - ) - - self.parse_is_cached = True - - def parse_targets(self, source): - """ - Fetch and parse configuration statements that required for - defining the targeted CPU features, statements should be declared - in the top of source in between **C** comment and start - with a special mark **@targets**. - - Configuration statements are sort of keywords representing - CPU features names, group of statements and policies, combined - together to determine the required optimization. - - Parameters - ---------- - source : str - the path of **C** source file. - - Returns - ------- - - bool, True if group has the 'baseline' option - - list, list of CPU features - - list, list of extra compiler flags - """ - self.dist_log("looking for '@targets' inside -> ", source) - # get lines between /*@targets and */ - with open(source) as fd: - tokens = "" - max_to_reach = 1000 # good enough, isn't? - start_with = "@targets" - start_pos = -1 - end_with = "*/" - end_pos = -1 - for current_line, line in enumerate(fd): - if current_line == max_to_reach: - self.dist_fatal("reached the max of lines") - break - if start_pos == -1: - start_pos = line.find(start_with) - if start_pos == -1: - continue - start_pos += len(start_with) - tokens += line - end_pos = line.find(end_with) - if end_pos != -1: - end_pos += len(tokens) - len(line) - break - - if start_pos == -1: - self.dist_fatal("expected to find '%s' within a C comment" % start_with) - if end_pos == -1: - self.dist_fatal("expected to end with '%s'" % end_with) - - tokens = tokens[start_pos:end_pos] - return self._parse_target_tokens(tokens) - - _parse_regex_arg = re.compile(r'\s|,|([+-])') - def _parse_arg_features(self, arg_name, req_features): - if not isinstance(req_features, str): - self.dist_fatal("expected a string in '%s'" % arg_name) - - final_features = set() - # space and comma can be used as a separator - tokens = list(filter(None, re.split(self._parse_regex_arg, req_features))) - append = True # append is the default - for tok in tokens: - if tok[0] in ("#", "$"): - self.dist_fatal( - arg_name, "target groups and policies " - "aren't allowed from arguments, " - "only from dispatch-able sources" - ) - if tok == '+': - append = True - continue - if tok == '-': - append = False - continue - - TOK = tok.upper() # we use upper-case internally - features_to = set() - if TOK == "NONE": - pass - elif TOK == "NATIVE": - native = self.cc_flags["native"] - if not native: - self.dist_fatal(arg_name, - "native option isn't supported by the compiler" - ) - features_to = self.feature_names( - force_flags=native, macros=[("DETECT_FEATURES", 1)] - ) - elif TOK == "MAX": - features_to = self.feature_supported.keys() - elif TOK == "MIN": - features_to = self.feature_min - else: - if TOK in self.feature_supported: - features_to.add(TOK) - else: - if not self.feature_is_exist(TOK): - self.dist_fatal(arg_name, - ", '%s' isn't a known feature or option" % tok - ) - if append: - final_features = final_features.union(features_to) - else: - final_features = final_features.difference(features_to) - - append = True # back to default - - return final_features - - _parse_regex_target = re.compile(r'\s|[*,/]|([()])') - def _parse_target_tokens(self, tokens): - assert(isinstance(tokens, str)) - final_targets = [] # to keep it sorted as specified - extra_flags = [] - has_baseline = False - - skipped = set() - policies = set() - multi_target = None - - tokens = list(filter(None, re.split(self._parse_regex_target, tokens))) - if not tokens: - self.dist_fatal("expected one token at least") - - for tok in tokens: - TOK = tok.upper() - ch = tok[0] - if ch in ('+', '-'): - self.dist_fatal( - "+/- are 'not' allowed from target's groups or @targets, " - "only from cpu_baseline and cpu_dispatch parms" - ) - elif ch == '$': - if multi_target is not None: - self.dist_fatal( - "policies aren't allowed inside multi-target '()'" - ", only CPU features" - ) - policies.add(self._parse_token_policy(TOK)) - elif ch == '#': - if multi_target is not None: - self.dist_fatal( - "target groups aren't allowed inside multi-target '()'" - ", only CPU features" - ) - has_baseline, final_targets, extra_flags = \ - self._parse_token_group(TOK, has_baseline, final_targets, extra_flags) - elif ch == '(': - if multi_target is not None: - self.dist_fatal("unclosed multi-target, missing ')'") - multi_target = set() - elif ch == ')': - if multi_target is None: - self.dist_fatal("multi-target opener '(' wasn't found") - targets = self._parse_multi_target(multi_target) - if targets is None: - skipped.add(tuple(multi_target)) - else: - if len(targets) == 1: - targets = targets[0] - if targets and targets not in final_targets: - final_targets.append(targets) - multi_target = None # back to default - else: - if TOK == "BASELINE": - if multi_target is not None: - self.dist_fatal("baseline isn't allowed inside multi-target '()'") - has_baseline = True - continue - - if multi_target is not None: - multi_target.add(TOK) - continue - - if not self.feature_is_exist(TOK): - self.dist_fatal("invalid target name '%s'" % TOK) - - is_enabled = ( - TOK in self.parse_baseline_names or - TOK in self.parse_dispatch_names - ) - if is_enabled: - if TOK not in final_targets: - final_targets.append(TOK) - continue - - skipped.add(TOK) - - if multi_target is not None: - self.dist_fatal("unclosed multi-target, missing ')'") - if skipped: - self.dist_log( - "skip targets", skipped, - "not part of baseline or dispatch-able features" - ) - - final_targets = self.feature_untied(final_targets) - - # add polices dependencies - for p in list(policies): - _, _, deps = self._parse_policies[p] - for d in deps: - if d in policies: - continue - self.dist_log( - "policy '%s' force enables '%s'" % ( - p, d - )) - policies.add(d) - - # release policies filtrations - for p, (have, nhave, _) in self._parse_policies.items(): - func = None - if p in policies: - func = have - self.dist_log("policy '%s' is ON" % p) - else: - func = nhave - if not func: - continue - has_baseline, final_targets, extra_flags = func( - has_baseline, final_targets, extra_flags - ) - - return has_baseline, final_targets, extra_flags - - def _parse_token_policy(self, token): - """validate policy token""" - if len(token) <= 1 or token[-1:] == token[0]: - self.dist_fatal("'$' must stuck in the begin of policy name") - token = token[1:] - if token not in self._parse_policies: - self.dist_fatal( - "'%s' is an invalid policy name, available policies are" % token, - self._parse_policies.keys() - ) - return token - - def _parse_token_group(self, token, has_baseline, final_targets, extra_flags): - """validate group token""" - if len(token) <= 1 or token[-1:] == token[0]: - self.dist_fatal("'#' must stuck in the begin of group name") - - token = token[1:] - ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get( - token, (False, None, []) - ) - if gtargets is None: - self.dist_fatal( - "'%s' is an invalid target group name, " % token + \ - "available target groups are", - self.parse_target_groups.keys() - ) - if ghas_baseline: - has_baseline = True - # always keep sorting as specified - final_targets += [f for f in gtargets if f not in final_targets] - extra_flags += [f for f in gextra_flags if f not in extra_flags] - return has_baseline, final_targets, extra_flags - - def _parse_multi_target(self, targets): - """validate multi targets that defined between parentheses()""" - # remove any implied features and keep the origins - if not targets: - self.dist_fatal("empty multi-target '()'") - if not all([ - self.feature_is_exist(tar) for tar in targets - ]) : - self.dist_fatal("invalid target name in multi-target", targets) - if not all([ - ( - tar in self.parse_baseline_names or - tar in self.parse_dispatch_names - ) - for tar in targets - ]) : - return None - targets = self.feature_ahead(targets) - if not targets: - return None - # force sort multi targets, so it can be comparable - targets = self.feature_sorted(targets) - targets = tuple(targets) # hashable - return targets - - def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags): - """skip all baseline features""" - skipped = [] - for tar in final_targets[:]: - is_base = False - if isinstance(tar, str): - is_base = tar in self.parse_baseline_names - else: - # multi targets - is_base = all([ - f in self.parse_baseline_names - for f in tar - ]) - if is_base: - skipped.append(tar) - final_targets.remove(tar) - - if skipped: - self.dist_log("skip baseline features", skipped) - - return has_baseline, final_targets, extra_flags - - def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags): - """leave a notice that $keep_sort is on""" - self.dist_log( - "policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n" - "are 'not' sorted depend on the highest interest but" - "as specified in the dispatch-able source or the extra group" - ) - return has_baseline, final_targets, extra_flags - - def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags): - """sorted depend on the highest interest""" - final_targets = self.feature_sorted(final_targets, reverse=True) - return has_baseline, final_targets, extra_flags - - def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags): - """append the compiler optimization flags""" - if self.cc_has_debug: - self.dist_log("debug mode is detected, policy 'maxopt' is skipped.") - elif self.cc_noopt: - self.dist_log("optimization is disabled, policy 'maxopt' is skipped.") - else: - flags = self.cc_flags["opt"] - if not flags: - self.dist_log( - "current compiler doesn't support optimization flags, " - "policy 'maxopt' is skipped", stderr=True - ) - else: - extra_flags += flags - return has_baseline, final_targets, extra_flags - - def _parse_policy_werror(self, has_baseline, final_targets, extra_flags): - """force warnings to treated as errors""" - flags = self.cc_flags["werror"] - if not flags: - self.dist_log( - "current compiler doesn't support werror flags, " - "warnings will 'not' treated as errors", stderr=True - ) - else: - self.dist_log("compiler warnings are treated as errors") - extra_flags += flags - return has_baseline, final_targets, extra_flags - - def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags): - """skip features that has no auto-vectorized support by compiler""" - skipped = [] - for tar in final_targets[:]: - if isinstance(tar, str): - can = self.feature_can_autovec(tar) - else: # multiple target - can = all([ - self.feature_can_autovec(t) - for t in tar - ]) - if not can: - final_targets.remove(tar) - skipped.append(tar) - - if skipped: - self.dist_log("skip non auto-vectorized features", skipped) - - return has_baseline, final_targets, extra_flags - -class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse): - """ - A helper class for `CCompiler` aims to provide extra build options - to effectively control of compiler optimizations that are directly - related to CPU features. - """ - def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None): - _Config.__init__(self) - _Distutils.__init__(self, ccompiler) - _Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch) - _CCompiler.__init__(self) - _Feature.__init__(self) - if not self.cc_noopt and self.cc_has_native: - self.dist_log( - "native flag is specified through environment variables. " - "force cpu-baseline='native'" - ) - cpu_baseline = "native" - _Parse.__init__(self, cpu_baseline, cpu_dispatch) - # keep the requested features untouched, need it later for report - # and trace purposes - self._requested_baseline = cpu_baseline - self._requested_dispatch = cpu_dispatch - # key is the dispatch-able source and value is a tuple - # contains two items (has_baseline[boolean], dispatched-features[list]) - self.sources_status = getattr(self, "sources_status", {}) - # every instance should has a separate one - self.cache_private.add("sources_status") - # set it at the end to make sure the cache writing was done after init - # this class - self.hit_cache = hasattr(self, "hit_cache") - - def is_cached(self): - """ - Returns True if the class loaded from the cache file - """ - return self.cache_infile and self.hit_cache - - def cpu_baseline_flags(self): - """ - Returns a list of final CPU baseline compiler flags - """ - return self.parse_baseline_flags - - def cpu_baseline_names(self): - """ - return a list of final CPU baseline feature names - """ - return self.parse_baseline_names - - def cpu_dispatch_names(self): - """ - return a list of final CPU dispatch feature names - """ - return self.parse_dispatch_names - - def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs): - """ - Compile one or more dispatch-able sources and generates object files, - also generates abstract C config headers and macros that - used later for the final runtime dispatching process. - - The mechanism behind it is to takes each source file that specified - in 'sources' and branching it into several files depend on - special configuration statements that must be declared in the - top of each source which contains targeted CPU features, - then it compiles every branched source with the proper compiler flags. - - Parameters - ---------- - sources : list - Must be a list of dispatch-able sources file paths, - and configuration statements must be declared inside - each file. - - src_dir : str - Path of parent directory for the generated headers and wrapped sources. - If None(default) the files will generated in-place. - - ccompiler : CCompiler - Distutils `CCompiler` instance to be used for compilation. - If None (default), the provided instance during the initialization - will be used instead. - - **kwargs : any - Arguments to pass on to the `CCompiler.compile()` - - Returns - ------- - list : generated object files - - Raises - ------ - CompileError - Raises by `CCompiler.compile()` on compiling failure. - DistutilsError - Some errors during checking the sanity of configuration statements. - - See Also - -------- - parse_targets : - Parsing the configuration statements of dispatch-able sources. - """ - to_compile = {} - baseline_flags = self.cpu_baseline_flags() - include_dirs = kwargs.setdefault("include_dirs", []) - - for src in sources: - output_dir = os.path.dirname(src) - if src_dir: - if not output_dir.startswith(src_dir): - output_dir = os.path.join(src_dir, output_dir) - if output_dir not in include_dirs: - # To allow including the generated config header(*.dispatch.h) - # by the dispatch-able sources - include_dirs.append(output_dir) - - has_baseline, targets, extra_flags = self.parse_targets(src) - nochange = self._generate_config(output_dir, src, targets, has_baseline) - for tar in targets: - tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange) - flags = tuple(extra_flags + self.feature_flags(tar)) - to_compile.setdefault(flags, []).append(tar_src) - - if has_baseline: - flags = tuple(extra_flags + baseline_flags) - to_compile.setdefault(flags, []).append(src) - - self.sources_status[src] = (has_baseline, targets) - - # For these reasons, the sources are compiled in a separate loop: - # - Gathering all sources with the same flags to benefit from - # the parallel compiling as much as possible. - # - To generate all config headers of the dispatchable sources, - # before the compilation in case if there are dependency relationships - # among them. - objects = [] - for flags, srcs in to_compile.items(): - objects += self.dist_compile( - srcs, list(flags), ccompiler=ccompiler, **kwargs - ) - return objects - - def generate_dispatch_header(self, header_path): - """ - Generate the dispatch header which contains the #definitions and headers - for platform-specific instruction-sets for the enabled CPU baseline and - dispatch-able features. - - Its highly recommended to take a look at the generated header - also the generated source files via `try_dispatch()` - in order to get the full picture. - """ - self.dist_log("generate CPU dispatch header: (%s)" % header_path) - - baseline_names = self.cpu_baseline_names() - dispatch_names = self.cpu_dispatch_names() - baseline_len = len(baseline_names) - dispatch_len = len(dispatch_names) - - header_dir = os.path.dirname(header_path) - if not os.path.exists(header_dir): - self.dist_log( - f"dispatch header dir {header_dir} does not exist, creating it", - stderr=True - ) - os.makedirs(header_dir) - - with open(header_path, 'w') as f: - baseline_calls = ' \\\n'.join([ - ( - "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" - ) % (self.conf_c_prefix, f) - for f in baseline_names - ]) - dispatch_calls = ' \\\n'.join([ - ( - "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" - ) % (self.conf_c_prefix, f) - for f in dispatch_names - ]) - f.write(textwrap.dedent("""\ - /* - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator (distutils/ccompiler_opt.py) - */ - #define {pfx}WITH_CPU_BASELINE "{baseline_str}" - #define {pfx}WITH_CPU_DISPATCH "{dispatch_str}" - #define {pfx}WITH_CPU_BASELINE_N {baseline_len} - #define {pfx}WITH_CPU_DISPATCH_N {dispatch_len} - #define {pfx}WITH_CPU_EXPAND_(X) X - #define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\ - {baseline_calls} - #define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\ - {dispatch_calls} - """).format( - pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names), - dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len, - dispatch_len=dispatch_len, baseline_calls=baseline_calls, - dispatch_calls=dispatch_calls - )) - baseline_pre = '' - for name in baseline_names: - baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n' - - dispatch_pre = '' - for name in dispatch_names: - dispatch_pre += textwrap.dedent("""\ - #ifdef {pfx}CPU_TARGET_{name} - {pre} - #endif /*{pfx}CPU_TARGET_{name}*/ - """).format( - pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor( - name, tabs=1 - )) - - f.write(textwrap.dedent("""\ - /******* baseline features *******/ - {baseline_pre} - /******* dispatch features *******/ - {dispatch_pre} - """).format( - pfx=self.conf_c_prefix_, baseline_pre=baseline_pre, - dispatch_pre=dispatch_pre - )) - - def report(self, full=False): - report = [] - platform_rows = [] - baseline_rows = [] - dispatch_rows = [] - report.append(("Platform", platform_rows)) - report.append(("", "")) - report.append(("CPU baseline", baseline_rows)) - report.append(("", "")) - report.append(("CPU dispatch", dispatch_rows)) - - ########## platform ########## - platform_rows.append(("Architecture", ( - "unsupported" if self.cc_on_noarch else self.cc_march) - )) - platform_rows.append(("Compiler", ( - "unix-like" if self.cc_is_nocc else self.cc_name) - )) - ########## baseline ########## - if self.cc_noopt: - baseline_rows.append(("Requested", "optimization disabled")) - else: - baseline_rows.append(("Requested", repr(self._requested_baseline))) - - baseline_names = self.cpu_baseline_names() - baseline_rows.append(( - "Enabled", (' '.join(baseline_names) if baseline_names else "none") - )) - baseline_flags = self.cpu_baseline_flags() - baseline_rows.append(( - "Flags", (' '.join(baseline_flags) if baseline_flags else "none") - )) - extra_checks = [] - for name in baseline_names: - extra_checks += self.feature_extra_checks(name) - baseline_rows.append(( - "Extra checks", (' '.join(extra_checks) if extra_checks else "none") - )) - - ########## dispatch ########## - if self.cc_noopt: - baseline_rows.append(("Requested", "optimization disabled")) - else: - dispatch_rows.append(("Requested", repr(self._requested_dispatch))) - - dispatch_names = self.cpu_dispatch_names() - dispatch_rows.append(( - "Enabled", (' '.join(dispatch_names) if dispatch_names else "none") - )) - ########## Generated ########## - # TODO: - # - collect object names from 'try_dispatch()' - # then get size of each object and printed - # - give more details about the features that not - # generated due compiler support - # - find a better output's design. - # - target_sources = {} - for source, (_, targets) in self.sources_status.items(): - for tar in targets: - target_sources.setdefault(tar, []).append(source) - - if not full or not target_sources: - generated = "" - for tar in self.feature_sorted(target_sources): - sources = target_sources[tar] - name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) - generated += name + "[%d] " % len(sources) - dispatch_rows.append(("Generated", generated[:-1] if generated else "none")) - else: - dispatch_rows.append(("Generated", '')) - for tar in self.feature_sorted(target_sources): - sources = target_sources[tar] - pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) - flags = ' '.join(self.feature_flags(tar)) - implies = ' '.join(self.feature_sorted(self.feature_implies(tar))) - detect = ' '.join(self.feature_detect(tar)) - extra_checks = [] - for name in ((tar,) if isinstance(tar, str) else tar): - extra_checks += self.feature_extra_checks(name) - extra_checks = (' '.join(extra_checks) if extra_checks else "none") - - dispatch_rows.append(('', '')) - dispatch_rows.append((pretty_name, implies)) - dispatch_rows.append(("Flags", flags)) - dispatch_rows.append(("Extra checks", extra_checks)) - dispatch_rows.append(("Detect", detect)) - for src in sources: - dispatch_rows.append(("", src)) - - ############################### - # TODO: add support for 'markdown' format - text = [] - secs_len = [len(secs) for secs, _ in report] - cols_len = [len(col) for _, rows in report for col, _ in rows] - tab = ' ' * 2 - pad = max(max(secs_len), max(cols_len)) - for sec, rows in report: - if not sec: - text.append("") # empty line - continue - sec += ' ' * (pad - len(sec)) - text.append(sec + tab + ': ') - for col, val in rows: - col += ' ' * (pad - len(col)) - text.append(tab + col + ': ' + val) - - return '\n'.join(text) - - def _wrap_target(self, output_dir, dispatch_src, target, nochange=False): - assert(isinstance(target, (str, tuple))) - if isinstance(target, str): - ext_name = target_name = target - else: - # multi-target - ext_name = '.'.join(target) - target_name = '__'.join(target) - - wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src)) - wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower()) - if nochange and os.path.exists(wrap_path): - return wrap_path - - self.dist_log("wrap dispatch-able target -> ", wrap_path) - # sorting for readability - features = self.feature_sorted(self.feature_implies_c(target)) - target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_ - target_defs = [target_join + f for f in features] - target_defs = '\n'.join(target_defs) - - with open(wrap_path, "w") as fd: - fd.write(textwrap.dedent("""\ - /** - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator \ - (distutils/ccompiler_opt.py) - */ - #define {pfx}CPU_TARGET_MODE - #define {pfx}CPU_TARGET_CURRENT {target_name} - {target_defs} - #include "{path}" - """).format( - pfx=self.conf_c_prefix_, target_name=target_name, - path=os.path.abspath(dispatch_src), target_defs=target_defs - )) - return wrap_path - - def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False): - config_path = os.path.basename(dispatch_src) - config_path = os.path.splitext(config_path)[0] + '.h' - config_path = os.path.join(output_dir, config_path) - # check if targets didn't change to avoid recompiling - cache_hash = self.cache_hash(targets, has_baseline) - try: - with open(config_path) as f: - last_hash = f.readline().split("cache_hash:") - if len(last_hash) == 2 and int(last_hash[1]) == cache_hash: - return True - except OSError: - pass - - os.makedirs(os.path.dirname(config_path), exist_ok=True) - - self.dist_log("generate dispatched config -> ", config_path) - dispatch_calls = [] - for tar in targets: - if isinstance(tar, str): - target_name = tar - else: # multi target - target_name = '__'.join([t for t in tar]) - req_detect = self.feature_detect(tar) - req_detect = '&&'.join([ - "CHK(%s)" % f for f in req_detect - ]) - dispatch_calls.append( - "\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % ( - self.conf_c_prefix_, req_detect, target_name - )) - dispatch_calls = ' \\\n'.join(dispatch_calls) - - if has_baseline: - baseline_calls = ( - "\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))" - ) % self.conf_c_prefix_ - else: - baseline_calls = '' - - with open(config_path, "w") as fd: - fd.write(textwrap.dedent("""\ - // cache_hash:{cache_hash} - /** - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator (distutils/ccompiler_opt.py) - */ - #ifndef {pfx}CPU_DISPATCH_EXPAND_ - #define {pfx}CPU_DISPATCH_EXPAND_(X) X - #endif - #undef {pfx}CPU_DISPATCH_BASELINE_CALL - #undef {pfx}CPU_DISPATCH_CALL - #define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\ - {baseline_calls} - #define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\ - {dispatch_calls} - """).format( - pfx=self.conf_c_prefix_, baseline_calls=baseline_calls, - dispatch_calls=dispatch_calls, cache_hash=cache_hash - )) - return False - -def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs): - """ - Create a new instance of 'CCompilerOpt' and generate the dispatch header - which contains the #definitions and headers of platform-specific instruction-sets for - the enabled CPU baseline and dispatch-able features. - - Parameters - ---------- - compiler : CCompiler instance - dispatch_hpath : str - path of the dispatch header - - **kwargs: passed as-is to `CCompilerOpt(...)` - Returns - ------- - new instance of CCompilerOpt - """ - opt = CCompilerOpt(compiler, **kwargs) - if not os.path.exists(dispatch_hpath) or not opt.is_cached(): - opt.generate_dispatch_header(dispatch_hpath) - return opt diff --git a/numpy/distutils/command/__init__.py b/numpy/distutils/command/__init__.py deleted file mode 100644 index 3ba501de03b6..000000000000 --- a/numpy/distutils/command/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -"""distutils.command - -Package containing implementation of all the standard Distutils -commands. - -""" -def test_na_writable_attributes_deletion(): - a = np.NA(2) - attr = ['payload', 'dtype'] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - -__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" - -distutils_all = [ #'build_py', - 'clean', - 'install_clib', - 'install_scripts', - 'bdist', - 'bdist_dumb', - 'bdist_wininst', - ] - -__import__('distutils.command', globals(), locals(), distutils_all) - -__all__ = ['build', - 'config_compiler', - 'config', - 'build_src', - 'build_py', - 'build_ext', - 'build_clib', - 'build_scripts', - 'install', - 'install_data', - 'install_headers', - 'install_lib', - 'bdist_rpm', - 'sdist', - ] + distutils_all diff --git a/numpy/distutils/command/autodist.py b/numpy/distutils/command/autodist.py deleted file mode 100644 index b72d0cab1a7d..000000000000 --- a/numpy/distutils/command/autodist.py +++ /dev/null @@ -1,148 +0,0 @@ -"""This module implements additional tests ala autoconf which can be useful. - -""" -import textwrap - -# We put them here since they could be easily reused outside numpy.distutils - -def check_inline(cmd): - """Return the inline identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - #ifndef __cplusplus - static %(inline)s int static_func (void) - { - return 0; - } - %(inline)s int nostatic_func (void) - { - return 0; - } - #endif""") - - for kw in ['inline', '__inline__', '__inline']: - st = cmd.try_compile(body % {'inline': kw}, None, None) - if st: - return kw - - return '' - - -def check_restrict(cmd): - """Return the restrict identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - static int static_func (char * %(restrict)s a) - { - return 0; - } - """) - - for kw in ['restrict', '__restrict__', '__restrict']: - st = cmd.try_compile(body % {'restrict': kw}, None, None) - if st: - return kw - - return '' - - -def check_compiler_gcc(cmd): - """Check if the compiler is GCC.""" - - cmd._check_compiler() - body = textwrap.dedent(""" - int - main() - { - #if (! defined __GNUC__) - #error gcc required - #endif - return 0; - } - """) - return cmd.try_compile(body, None, None) - - -def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0): - """ - Check that the gcc version is at least the specified version.""" - - cmd._check_compiler() - version = '.'.join([str(major), str(minor), str(patchlevel)]) - body = textwrap.dedent(""" - int - main() - { - #if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\ - (__GNUC_MINOR__ < %(minor)d) || \\ - (__GNUC_PATCHLEVEL__ < %(patchlevel)d) - #error gcc >= %(version)s required - #endif - return 0; - } - """) - kw = {'version': version, 'major': major, 'minor': minor, - 'patchlevel': patchlevel} - - return cmd.try_compile(body % kw, None, None) - - -def check_gcc_function_attribute(cmd, attribute, name): - """Return True if the given function attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s %s(void* unused) - { - return 0; - } - - int - main() - { - return 0; - } - """) % (attribute, name) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code, - include): - """Return True if the given function attribute is supported with - intrinsics.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #include<%s> - int %s %s(void) - { - %s; - return 0; - } - - int - main() - { - return 0; - } - """) % (include, attribute, name, code) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_variable_attribute(cmd, attribute): - """Return True if the given variable attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s foo; - - int - main() - { - return 0; - } - """) % (attribute, ) - return cmd.try_compile(body, None, None) != 0 diff --git a/numpy/distutils/command/bdist_rpm.py b/numpy/distutils/command/bdist_rpm.py deleted file mode 100644 index 682e7a8eb8e2..000000000000 --- a/numpy/distutils/command/bdist_rpm.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import sys -if 'setuptools' in sys.modules: - from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm -else: - from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm - -class bdist_rpm(old_bdist_rpm): - - def _make_spec_file(self): - spec_file = old_bdist_rpm._make_spec_file(self) - - # Replace hardcoded setup.py script name - # with the real setup script name. - setup_py = os.path.basename(sys.argv[0]) - if setup_py == 'setup.py': - return spec_file - new_spec_file = [] - for line in spec_file: - line = line.replace('setup.py', setup_py) - new_spec_file.append(line) - return new_spec_file diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py deleted file mode 100644 index 80830d559c61..000000000000 --- a/numpy/distutils/command/build.py +++ /dev/null @@ -1,62 +0,0 @@ -import os -import sys -from distutils.command.build import build as old_build -from distutils.util import get_platform -from numpy.distutils.command.config_compiler import show_fortran_compilers - -class build(old_build): - - sub_commands = [('config_cc', lambda *args: True), - ('config_fc', lambda *args: True), - ('build_src', old_build.has_ext_modules), - ] + old_build.sub_commands - - user_options = old_build.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ('simd-test=', None, - "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), - ] - - help_options = old_build.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - def initialize_options(self): - old_build.initialize_options(self) - self.fcompiler = None - self.warn_error = False - self.cpu_baseline = "min" - self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default - self.disable_optimization = False - """ - the '_simd' module is a very large. Adding more dispatched features - will increase binary size and compile time. By default we minimize - the targeted features to those most commonly used by the NumPy SIMD interface(NPYV), - NOTE: any specified features will be ignored if they're: - - part of the baseline(--cpu-baseline) - - not part of dispatch-able features(--cpu-dispatch) - - not supported by compiler or platform - """ - self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F " \ - "AVX512_SKX VSX VSX2 VSX3 VSX4 NEON ASIMD VX VXE VXE2" - - def finalize_options(self): - build_scripts = self.build_scripts - old_build.finalize_options(self) - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - if build_scripts is None: - self.build_scripts = os.path.join(self.build_base, - 'scripts' + plat_specifier) - - def run(self): - old_build.run(self) diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py deleted file mode 100644 index 26e2f4ed0f4a..000000000000 --- a/numpy/distutils/command/build_clib.py +++ /dev/null @@ -1,469 +0,0 @@ -""" Modified version of build_clib that handles fortran source files. -""" -import os -from glob import glob -import shutil -from distutils.command.build_clib import build_clib as old_build_clib -from distutils.errors import DistutilsSetupError, DistutilsError, \ - DistutilsFileError - -from numpy.distutils import log -from distutils.dep_util import newer_group -from numpy.distutils.misc_util import ( - filter_sources, get_lib_source_files, get_numpy_include_dirs, - has_cxx_sources, has_f_sources, is_sequence -) -from numpy.distutils.ccompiler_opt import new_ccompiler_opt - -# Fix Python distutils bug sf #1718574: -_l = old_build_clib.user_options -for _i in range(len(_l)): - if _l[_i][0] in ['build-clib', 'build-temp']: - _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:] -# - - -class build_clib(old_build_clib): - - description = "build C/C++/F libraries used by Python extensions" - - user_options = old_build_clib.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('inplace', 'i', 'Build in-place'), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ] - - boolean_options = old_build_clib.boolean_options + \ - ['inplace', 'warn-error', 'disable-optimization'] - - def initialize_options(self): - old_build_clib.initialize_options(self) - self.fcompiler = None - self.inplace = 0 - self.parallel = None - self.warn_error = None - self.cpu_baseline = None - self.cpu_dispatch = None - self.disable_optimization = None - - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError as e: - raise ValueError("--parallel/-j argument must be an integer") from e - old_build_clib.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ('cpu_baseline', 'cpu_baseline'), - ('cpu_dispatch', 'cpu_dispatch'), - ('disable_optimization', 'disable_optimization') - ) - - def have_f_sources(self): - for (lib_name, build_info) in self.libraries: - if has_f_sources(build_info.get('sources', [])): - return True - return False - - def have_cxx_sources(self): - for (lib_name, build_info) in self.libraries: - if has_cxx_sources(build_info.get('sources', [])): - return True - return False - - def run(self): - if not self.libraries: - return - - # Make sure that library sources are complete. - languages = [] - - # Make sure that extension sources are complete. - self.run_command('build_src') - - for (lib_name, build_info) in self.libraries: - l = build_info.get('language', None) - if l and l not in languages: - languages.append(l) - - from distutils.ccompiler import new_compiler - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution, - need_cxx=self.have_cxx_sources()) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - libraries = self.libraries - self.libraries = None - self.compiler.customize_cmd(self) - self.libraries = libraries - - self.compiler.show_customization() - - if not self.disable_optimization: - dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") - dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) - opt_cache_path = os.path.abspath( - os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py') - ) - if hasattr(self, "compiler_opt"): - # By default `CCompilerOpt` update the cache at the exit of - # the process, which may lead to duplicate building - # (see build_extension()/force_rebuild) if run() called - # multiple times within the same os process/thread without - # giving the chance the previous instances of `CCompilerOpt` - # to update the cache. - self.compiler_opt.cache_flush() - - self.compiler_opt = new_ccompiler_opt( - compiler=self.compiler, dispatch_hpath=dispatch_hpath, - cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, - cache_path=opt_cache_path - ) - def report(copt): - log.info("\n########### CLIB COMPILER OPTIMIZATION ###########") - log.info(copt.report(full=True)) - - import atexit - atexit.register(report, self.compiler_opt) - - if self.have_f_sources(): - from numpy.distutils.fcompiler import new_fcompiler - self._f_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90='f90' in languages, - c_compiler=self.compiler) - if self._f_compiler is not None: - self._f_compiler.customize(self.distribution) - - libraries = self.libraries - self.libraries = None - self._f_compiler.customize_cmd(self) - self.libraries = libraries - - self._f_compiler.show_customization() - else: - self._f_compiler = None - - self.build_libraries(self.libraries) - - if self.inplace: - for l in self.distribution.installed_libraries: - libname = self.compiler.library_filename(l.name) - source = os.path.join(self.build_clib, libname) - target = os.path.join(l.target_dir, libname) - self.mkpath(l.target_dir) - shutil.copy(source, target) - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for lib in self.libraries: - filenames.extend(get_lib_source_files(lib)) - return filenames - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - self.build_a_library(build_info, lib_name, libraries) - - def assemble_flags(self, in_flags): - """ Assemble flags from flag list - - Parameters - ---------- - in_flags : None or sequence - None corresponds to empty list. Sequence elements can be strings - or callables that return lists of strings. Callable takes `self` as - single parameter. - - Returns - ------- - out_flags : list - """ - if in_flags is None: - return [] - out_flags = [] - for in_flag in in_flags: - if callable(in_flag): - out_flags += in_flag(self) - else: - out_flags.append(in_flag) - return out_flags - - def build_a_library(self, build_info, lib_name, libraries): - # default compilers - compiler = self.compiler - fcompiler = self._f_compiler - - sources = build_info.get('sources') - if sources is None or not is_sequence(sources): - raise DistutilsSetupError(("in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames") % lib_name) - sources = list(sources) - - c_sources, cxx_sources, f_sources, fmodule_sources \ - = filter_sources(sources) - requiref90 = not not fmodule_sources or \ - build_info.get('language', 'c') == 'f90' - - # save source type information so that build_ext can use it. - source_languages = [] - if c_sources: - source_languages.append('c') - if cxx_sources: - source_languages.append('c++') - if requiref90: - source_languages.append('f90') - elif f_sources: - source_languages.append('f77') - build_info['source_languages'] = source_languages - - lib_file = compiler.library_filename(lib_name, - output_dir=self.build_clib) - depends = sources + build_info.get('depends', []) - - force_rebuild = self.force - if not self.disable_optimization and not self.compiler_opt.is_cached(): - log.debug("Detected changes on compiler optimizations") - force_rebuild = True - if not (force_rebuild or newer_group(depends, lib_file, 'newer')): - log.debug("skipping '%s' library (up-to-date)", lib_name) - return - else: - log.info("building '%s' library", lib_name) - - config_fc = build_info.get('config_fc', {}) - if fcompiler is not None and config_fc: - log.info('using additional config_fc from setup script ' - 'for fortran compiler: %s' - % (config_fc,)) - from numpy.distutils.fcompiler import new_fcompiler - fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=requiref90, - c_compiler=self.compiler) - if fcompiler is not None: - dist = self.distribution - base_config_fc = dist.get_option_dict('config_fc').copy() - base_config_fc.update(config_fc) - fcompiler.customize(base_config_fc) - - # check availability of Fortran compilers - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("library %s has Fortran sources" - " but no Fortran compiler found" % (lib_name)) - - if fcompiler is not None: - fcompiler.extra_f77_compile_args = build_info.get( - 'extra_f77_compile_args') or [] - fcompiler.extra_f90_compile_args = build_info.get( - 'extra_f90_compile_args') or [] - - macros = build_info.get('macros') - if macros is None: - macros = [] - include_dirs = build_info.get('include_dirs') - if include_dirs is None: - include_dirs = [] - # Flags can be strings, or callables that return a list of strings. - extra_postargs = self.assemble_flags( - build_info.get('extra_compiler_args')) - extra_cflags = self.assemble_flags( - build_info.get('extra_cflags')) - extra_cxxflags = self.assemble_flags( - build_info.get('extra_cxxflags')) - - include_dirs.extend(get_numpy_include_dirs()) - # where compiled F90 module files are: - module_dirs = build_info.get('module_dirs') or [] - module_build_dir = os.path.dirname(lib_file) - if requiref90: - self.mkpath(module_build_dir) - - if compiler.compiler_type == 'msvc': - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - extra_cflags += extra_cxxflags - - # filtering C dispatch-table sources when optimization is not disabled, - # otherwise treated as normal sources. - copt_c_sources = [] - copt_cxx_sources = [] - copt_baseline_flags = [] - copt_macros = [] - if not self.disable_optimization: - bsrc_dir = self.get_finalized_command("build_src").build_src - dispatch_hpath = os.path.join("numpy", "distutils", "include") - dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) - include_dirs.append(dispatch_hpath) - # copt_build_src = None if self.inplace else bsrc_dir - copt_build_src = bsrc_dir - for _srcs, _dst, _ext in ( - ((c_sources,), copt_c_sources, ('.dispatch.c',)), - ((c_sources, cxx_sources), copt_cxx_sources, - ('.dispatch.cpp', '.dispatch.cxx')) - ): - for _src in _srcs: - _dst += [ - _src.pop(_src.index(s)) - for s in _src[:] if s.endswith(_ext) - ] - copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() - else: - copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) - - objects = [] - if copt_cxx_sources: - log.info("compiling C++ dispatch-able sources") - objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=self.build_temp, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs + extra_cxxflags, - ccompiler=cxx_compiler - ) - - if copt_c_sources: - log.info("compiling C dispatch-able sources") - objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=self.build_temp, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs + extra_cflags) - - if c_sources: - log.info("compiling C sources") - objects += compiler.compile( - c_sources, - output_dir=self.build_temp, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_postargs + - copt_baseline_flags + - extra_cflags)) - - if cxx_sources: - log.info("compiling C++ sources") - cxx_compiler = compiler.cxx_compiler() - cxx_objects = cxx_compiler.compile( - cxx_sources, - output_dir=self.build_temp, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_postargs + - copt_baseline_flags + - extra_cxxflags)) - objects.extend(cxx_objects) - - if f_sources or fmodule_sources: - extra_postargs = [] - f_objects = [] - - if requiref90: - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if requiref90 and self._f_compiler.module_dir_switch is None: - # move new compiled F90 module files to module_build_dir - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' - % (f, module_build_dir)) - - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - else: - f_objects = [] - - if f_objects and not fcompiler.can_ccompiler_link(compiler): - # Default linker cannot link Fortran object files, and results - # need to be wrapped later. Instead of creating a real static - # library, just keep track of the object files. - listfn = os.path.join(self.build_clib, - lib_name + '.fobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in f_objects)) - - listfn = os.path.join(self.build_clib, - lib_name + '.cobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in objects)) - - # create empty "library" file for dependency tracking - lib_fname = os.path.join(self.build_clib, - lib_name + compiler.static_lib_extension) - with open(lib_fname, 'wb') as f: - pass - else: - # assume that default linker is suitable for - # linking Fortran object files - objects.extend(f_objects) - compiler.create_static_lib(objects, lib_name, - output_dir=self.build_clib, - debug=self.debug) - - # fix library dependencies - clib_libraries = build_info.get('libraries', []) - for lname, binfo in libraries: - if lname in clib_libraries: - clib_libraries.extend(binfo.get('libraries', [])) - if clib_libraries: - build_info['libraries'] = clib_libraries diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py deleted file mode 100644 index 42137e5f859d..000000000000 --- a/numpy/distutils/command/build_ext.py +++ /dev/null @@ -1,752 +0,0 @@ -""" Modified version of build_ext that handles fortran source files. - -""" -import os -import subprocess -from glob import glob - -from distutils.dep_util import newer_group -from distutils.command.build_ext import build_ext as old_build_ext -from distutils.errors import DistutilsFileError, DistutilsSetupError,\ - DistutilsError -from distutils.file_util import copy_file - -from numpy.distutils import log -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.system_info import combine_paths -from numpy.distutils.misc_util import ( - filter_sources, get_ext_source_files, get_numpy_include_dirs, - has_cxx_sources, has_f_sources, is_sequence -) -from numpy.distutils.command.config_compiler import show_fortran_compilers -from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt - -class build_ext (old_build_ext): - - description = "build C/C++/F extensions (compile/link to build directory)" - - user_options = old_build_ext.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ('simd-test=', None, - "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), - ] - - help_options = old_build_ext.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization'] - - def initialize_options(self): - old_build_ext.initialize_options(self) - self.fcompiler = None - self.parallel = None - self.warn_error = None - self.cpu_baseline = None - self.cpu_dispatch = None - self.disable_optimization = None - self.simd_test = None - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError as e: - raise ValueError("--parallel/-j argument must be an integer") from e - - # Ensure that self.include_dirs and self.distribution.include_dirs - # refer to the same list object. finalize_options will modify - # self.include_dirs, but self.distribution.include_dirs is used - # during the actual build. - # self.include_dirs is None unless paths are specified with - # --include-dirs. - # The include paths will be passed to the compiler in the order: - # numpy paths, --include-dirs paths, Python include path. - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - incl_dirs = self.include_dirs or [] - if self.distribution.include_dirs is None: - self.distribution.include_dirs = [] - self.include_dirs = self.distribution.include_dirs - self.include_dirs.extend(incl_dirs) - - old_build_ext.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ('cpu_baseline', 'cpu_baseline'), - ('cpu_dispatch', 'cpu_dispatch'), - ('disable_optimization', 'disable_optimization'), - ('simd_test', 'simd_test') - ) - CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test - - def run(self): - if not self.extensions: - return - - # Make sure that extension sources are complete. - self.run_command('build_src') - - if self.distribution.has_c_libraries(): - if self.inplace: - if self.distribution.have_run.get('build_clib'): - log.warn('build_clib already run, it is too late to ' - 'ensure in-place build of build_clib') - build_clib = self.distribution.get_command_obj( - 'build_clib') - else: - build_clib = self.distribution.get_command_obj( - 'build_clib') - build_clib.inplace = 1 - build_clib.ensure_finalized() - build_clib.run() - self.distribution.have_run['build_clib'] = 1 - - else: - self.run_command('build_clib') - build_clib = self.get_finalized_command('build_clib') - self.library_dirs.append(build_clib.build_clib) - else: - build_clib = None - - # Not including C libraries to the list of - # extension libraries automatically to prevent - # bogus linking commands. Extensions must - # explicitly specify the C libraries that they use. - - from distutils.ccompiler import new_compiler - from numpy.distutils.fcompiler import new_fcompiler - - compiler_type = self.compiler - # Initialize C compiler: - self.compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution) - self.compiler.customize_cmd(self) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - self.compiler.show_customization() - - if not self.disable_optimization: - dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") - dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) - opt_cache_path = os.path.abspath( - os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py') - ) - if hasattr(self, "compiler_opt"): - # By default `CCompilerOpt` update the cache at the exit of - # the process, which may lead to duplicate building - # (see build_extension()/force_rebuild) if run() called - # multiple times within the same os process/thread without - # giving the chance the previous instances of `CCompilerOpt` - # to update the cache. - self.compiler_opt.cache_flush() - - self.compiler_opt = new_ccompiler_opt( - compiler=self.compiler, dispatch_hpath=dispatch_hpath, - cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, - cache_path=opt_cache_path - ) - def report(copt): - log.info("\n########### EXT COMPILER OPTIMIZATION ###########") - log.info(copt.report(full=True)) - - import atexit - atexit.register(report, self.compiler_opt) - - # Setup directory for storing generated extra DLL files on Windows - self.extra_dll_dir = os.path.join(self.build_temp, '.libs') - if not os.path.isdir(self.extra_dll_dir): - os.makedirs(self.extra_dll_dir) - - # Create mapping of libraries built by build_clib: - clibs = {} - if build_clib is not None: - for libname, build_info in build_clib.libraries or []: - if libname in clibs and clibs[libname] != build_info: - log.warn('library %r defined more than once,' - ' overwriting build_info\n%s... \nwith\n%s...' - % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) - clibs[libname] = build_info - # .. and distribution libraries: - for libname, build_info in self.distribution.libraries or []: - if libname in clibs: - # build_clib libraries have a precedence before distribution ones - continue - clibs[libname] = build_info - - # Determine if C++/Fortran 77/Fortran 90 compilers are needed. - # Update extension libraries, library_dirs, and macros. - all_languages = set() - for ext in self.extensions: - ext_languages = set() - c_libs = [] - c_lib_dirs = [] - macros = [] - for libname in ext.libraries: - if libname in clibs: - binfo = clibs[libname] - c_libs += binfo.get('libraries', []) - c_lib_dirs += binfo.get('library_dirs', []) - for m in binfo.get('macros', []): - if m not in macros: - macros.append(m) - - for l in clibs.get(libname, {}).get('source_languages', []): - ext_languages.add(l) - if c_libs: - new_c_libs = ext.libraries + c_libs - log.info('updating extension %r libraries from %r to %r' - % (ext.name, ext.libraries, new_c_libs)) - ext.libraries = new_c_libs - ext.library_dirs = ext.library_dirs + c_lib_dirs - if macros: - log.info('extending extension %r defined_macros with %r' - % (ext.name, macros)) - ext.define_macros = ext.define_macros + macros - - # determine extension languages - if has_f_sources(ext.sources): - ext_languages.add('f77') - if has_cxx_sources(ext.sources): - ext_languages.add('c++') - l = ext.language or self.compiler.detect_language(ext.sources) - if l: - ext_languages.add(l) - - # reset language attribute for choosing proper linker - # - # When we build extensions with multiple languages, we have to - # choose a linker. The rules here are: - # 1. if there is Fortran code, always prefer the Fortran linker, - # 2. otherwise prefer C++ over C, - # 3. Users can force a particular linker by using - # `language='c'` # or 'c++', 'f90', 'f77' - # in their config.add_extension() calls. - if 'c++' in ext_languages: - ext_language = 'c++' - else: - ext_language = 'c' # default - - has_fortran = False - if 'f90' in ext_languages: - ext_language = 'f90' - has_fortran = True - elif 'f77' in ext_languages: - ext_language = 'f77' - has_fortran = True - - if not ext.language or has_fortran: - if l and l != ext_language and ext.language: - log.warn('resetting extension %r language from %r to %r.' % - (ext.name, l, ext_language)) - - ext.language = ext_language - - # global language - all_languages.update(ext_languages) - - need_f90_compiler = 'f90' in all_languages - need_f77_compiler = 'f77' in all_languages - need_cxx_compiler = 'c++' in all_languages - - # Initialize C++ compiler: - if need_cxx_compiler: - self._cxx_compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - compiler = self._cxx_compiler - compiler.customize(self.distribution, need_cxx=need_cxx_compiler) - compiler.customize_cmd(self) - compiler.show_customization() - self._cxx_compiler = compiler.cxx_compiler() - else: - self._cxx_compiler = None - - # Initialize Fortran 77 compiler: - if need_f77_compiler: - ctype = self.fcompiler - self._f77_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=False, - c_compiler=self.compiler) - fcompiler = self._f77_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f77_compiler=%s is not available.' % - (ctype)) - self._f77_compiler = None - else: - self._f77_compiler = None - - # Initialize Fortran 90 compiler: - if need_f90_compiler: - ctype = self.fcompiler - self._f90_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=True, - c_compiler=self.compiler) - fcompiler = self._f90_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f90_compiler=%s is not available.' % - (ctype)) - self._f90_compiler = None - else: - self._f90_compiler = None - - # Build extensions - self.build_extensions() - - # Copy over any extra DLL files - # FIXME: In the case where there are more than two packages, - # we blindly assume that both packages need all of the libraries, - # resulting in a larger wheel than is required. This should be fixed, - # but it's so rare that I won't bother to handle it. - pkg_roots = { - self.get_ext_fullname(ext.name).split('.')[0] - for ext in self.extensions - } - for pkg_root in pkg_roots: - shared_lib_dir = os.path.join(pkg_root, '.libs') - if not self.inplace: - shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir) - for fn in os.listdir(self.extra_dll_dir): - if not os.path.isdir(shared_lib_dir): - os.makedirs(shared_lib_dir) - if not fn.lower().endswith('.dll'): - continue - runtime_lib = os.path.join(self.extra_dll_dir, fn) - copy_file(runtime_lib, shared_lib_dir) - - def swig_sources(self, sources, extensions=None): - # Do nothing. Swig sources have been handled in build_src command. - return sources - - def build_extension(self, ext): - sources = ext.sources - if sources is None or not is_sequence(sources): - raise DistutilsSetupError( - ("in 'ext_modules' option (extension '%s'), " - "'sources' must be present and must be " - "a list of source filenames") % ext.name) - sources = list(sources) - - if not sources: - return - - fullname = self.get_ext_fullname(ext.name) - if self.inplace: - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - base = modpath[-1] - build_py = self.get_finalized_command('build_py') - package_dir = build_py.get_package_dir(package) - ext_filename = os.path.join(package_dir, - self.get_ext_filename(base)) - else: - ext_filename = os.path.join(self.build_lib, - self.get_ext_filename(fullname)) - depends = sources + ext.depends - - force_rebuild = self.force - if not self.disable_optimization and not self.compiler_opt.is_cached(): - log.debug("Detected changes on compiler optimizations") - force_rebuild = True - if not (force_rebuild or newer_group(depends, ext_filename, 'newer')): - log.debug("skipping '%s' extension (up-to-date)", ext.name) - return - else: - log.info("building '%s' extension", ext.name) - - extra_args = ext.extra_compile_args or [] - extra_cflags = getattr(ext, 'extra_c_compile_args', None) or [] - extra_cxxflags = getattr(ext, 'extra_cxx_compile_args', None) or [] - - macros = ext.define_macros[:] - for undef in ext.undef_macros: - macros.append((undef,)) - - c_sources, cxx_sources, f_sources, fmodule_sources = \ - filter_sources(ext.sources) - - if self.compiler.compiler_type == 'msvc': - if cxx_sources: - # Needed to compile kiva.agg._agg extension. - extra_args.append('/Zm1000') - extra_cflags += extra_cxxflags - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - # Set Fortran/C++ compilers for compilation and linking. - if ext.language == 'f90': - fcompiler = self._f90_compiler - elif ext.language == 'f77': - fcompiler = self._f77_compiler - else: # in case ext.language is c++, for instance - fcompiler = self._f90_compiler or self._f77_compiler - if fcompiler is not None: - fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr( - ext, 'extra_f77_compile_args') else [] - fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr( - ext, 'extra_f90_compile_args') else [] - cxx_compiler = self._cxx_compiler - - # check for the availability of required compilers - if cxx_sources and cxx_compiler is None: - raise DistutilsError("extension %r has C++ sources" - "but no C++ compiler found" % (ext.name)) - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("extension %r has Fortran sources " - "but no Fortran compiler found" % (ext.name)) - if ext.language in ['f77', 'f90'] and fcompiler is None: - self.warn("extension %r has Fortran libraries " - "but no Fortran linker found, using default linker" % (ext.name)) - if ext.language == 'c++' and cxx_compiler is None: - self.warn("extension %r has C++ libraries " - "but no C++ linker found, using default linker" % (ext.name)) - - kws = {'depends': ext.depends} - output_dir = self.build_temp - - include_dirs = ext.include_dirs + get_numpy_include_dirs() - - # filtering C dispatch-table sources when optimization is not disabled, - # otherwise treated as normal sources. - copt_c_sources = [] - copt_cxx_sources = [] - copt_baseline_flags = [] - copt_macros = [] - if not self.disable_optimization: - bsrc_dir = self.get_finalized_command("build_src").build_src - dispatch_hpath = os.path.join("numpy", "distutils", "include") - dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) - include_dirs.append(dispatch_hpath) - - # copt_build_src = None if self.inplace else bsrc_dir - # Always generate the generated config files and - # dispatch-able sources inside the build directory, - # even if the build option `inplace` is enabled. - # This approach prevents conflicts with Meson-generated - # config headers. Since `spin build --clean` will not remove - # these headers, they might overwrite the generated Meson headers, - # causing compatibility issues. Maintaining separate directories - # ensures compatibility between distutils dispatch config headers - # and Meson headers, avoiding build disruptions. - # See gh-24450 for more details. - copt_build_src = bsrc_dir - for _srcs, _dst, _ext in ( - ((c_sources,), copt_c_sources, ('.dispatch.c',)), - ((c_sources, cxx_sources), copt_cxx_sources, - ('.dispatch.cpp', '.dispatch.cxx')) - ): - for _src in _srcs: - _dst += [ - _src.pop(_src.index(s)) - for s in _src[:] if s.endswith(_ext) - ] - copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() - else: - copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) - - c_objects = [] - if copt_cxx_sources: - log.info("compiling C++ dispatch-able sources") - c_objects += self.compiler_opt.try_dispatch( - copt_cxx_sources, - output_dir=output_dir, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args + extra_cxxflags, - ccompiler=cxx_compiler, - **kws - ) - if copt_c_sources: - log.info("compiling C dispatch-able sources") - c_objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=output_dir, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args + extra_cflags, - **kws) - if c_sources: - log.info("compiling C sources") - c_objects += self.compiler.compile( - c_sources, - output_dir=output_dir, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_args + copt_baseline_flags + - extra_cflags), - **kws) - if cxx_sources: - log.info("compiling C++ sources") - c_objects += cxx_compiler.compile( - cxx_sources, - output_dir=output_dir, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_args + copt_baseline_flags + - extra_cxxflags), - **kws) - - extra_postargs = [] - f_objects = [] - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - module_dirs = ext.module_dirs[:] - module_build_dir = os.path.join( - self.build_temp, os.path.dirname( - self.get_ext_filename(fullname))) - - self.mkpath(module_build_dir) - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if fcompiler.module_dir_switch is None: - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' % - (f, module_build_dir)) - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if f_objects and not fcompiler.can_ccompiler_link(self.compiler): - unlinkable_fobjects = f_objects - objects = c_objects - else: - unlinkable_fobjects = [] - objects = c_objects + f_objects - - if ext.extra_objects: - objects.extend(ext.extra_objects) - extra_args = ext.extra_link_args or [] - libraries = self.get_libraries(ext)[:] - library_dirs = ext.library_dirs[:] - - linker = self.compiler.link_shared_object - # Always use system linker when using MSVC compiler. - if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'): - # expand libraries with fcompiler libraries as we are - # not using fcompiler linker - self._libs_with_msvc_and_fortran( - fcompiler, libraries, library_dirs) - if ext.runtime_library_dirs: - # gcc adds RPATH to the link. On windows, copy the dll into - # self.extra_dll_dir instead. - for d in ext.runtime_library_dirs: - for f in glob(d + '/*.dll'): - copy_file(f, self.extra_dll_dir) - ext.runtime_library_dirs = [] - - elif ext.language in ['f77', 'f90'] and fcompiler is not None: - linker = fcompiler.link_shared_object - if ext.language == 'c++' and cxx_compiler is not None: - linker = cxx_compiler.link_shared_object - - if fcompiler is not None: - objects, libraries = self._process_unlinkable_fobjects( - objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects) - - linker(objects, ext_filename, - libraries=libraries, - library_dirs=library_dirs, - runtime_library_dirs=ext.runtime_library_dirs, - extra_postargs=extra_args, - export_symbols=self.get_export_symbols(ext), - debug=self.debug, - build_temp=self.build_temp, - target_lang=ext.language) - - def _add_dummy_mingwex_sym(self, c_sources): - build_src = self.get_finalized_command("build_src").build_src - build_clib = self.get_finalized_command("build_clib").build_clib - objects = self.compiler.compile([os.path.join(build_src, - "gfortran_vs2003_hack.c")], - output_dir=self.build_temp) - self.compiler.create_static_lib( - objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) - - def _process_unlinkable_fobjects(self, objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects): - libraries = list(libraries) - objects = list(objects) - unlinkable_fobjects = list(unlinkable_fobjects) - - # Expand possible fake static libraries to objects; - # make sure to iterate over a copy of the list as - # "fake" libraries will be removed as they are - # encountered - for lib in libraries[:]: - for libdir in library_dirs: - fake_lib = os.path.join(libdir, lib + '.fobjects') - if os.path.isfile(fake_lib): - # Replace fake static library - libraries.remove(lib) - with open(fake_lib) as f: - unlinkable_fobjects.extend(f.read().splitlines()) - - # Expand C objects - c_lib = os.path.join(libdir, lib + '.cobjects') - with open(c_lib) as f: - objects.extend(f.read().splitlines()) - - # Wrap unlinkable objects to a linkable one - if unlinkable_fobjects: - fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects] - wrapped = fcompiler.wrap_unlinkable_objects( - fobjects, output_dir=self.build_temp, - extra_dll_dir=self.extra_dll_dir) - objects.extend(wrapped) - - return objects, libraries - - def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, - c_library_dirs): - if fcompiler is None: - return - - for libname in c_libraries: - if libname.startswith('msvc'): - continue - fileexists = False - for libdir in c_library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: - continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in c_library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(self.build_temp, libname + '.lib') - copy_file(libfile, libfile2) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - fileexists = True - break - if fileexists: - continue - log.warn('could not find library %r in directories %s' - % (libname, c_library_dirs)) - - # Always use system linker when using MSVC compiler. - f_lib_dirs = [] - for dir in fcompiler.library_dirs: - # correct path when compiling in Cygwin but with normal Win - # Python - if dir.startswith('/usr/lib'): - try: - dir = subprocess.check_output(['cygpath', '-w', dir]) - except (OSError, subprocess.CalledProcessError): - pass - else: - dir = filepath_from_subprocess_output(dir) - f_lib_dirs.append(dir) - c_library_dirs.extend(f_lib_dirs) - - # make g77-compiled static libs available to MSVC - for lib in fcompiler.libraries: - if not lib.startswith('msvc'): - c_libraries.append(lib) - p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') - if p: - dst_name = os.path.join(self.build_temp, lib + '.lib') - if not os.path.isfile(dst_name): - copy_file(p[0], dst_name) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - - def get_source_files(self): - self.check_extensions_list(self.extensions) - filenames = [] - for ext in self.extensions: - filenames.extend(get_ext_source_files(ext)) - return filenames - - def get_outputs(self): - self.check_extensions_list(self.extensions) - - outputs = [] - for ext in self.extensions: - if not ext.sources: - continue - fullname = self.get_ext_fullname(ext.name) - outputs.append(os.path.join(self.build_lib, - self.get_ext_filename(fullname))) - return outputs diff --git a/numpy/distutils/command/build_py.py b/numpy/distutils/command/build_py.py deleted file mode 100644 index d30dc5bf42d8..000000000000 --- a/numpy/distutils/command/build_py.py +++ /dev/null @@ -1,31 +0,0 @@ -from distutils.command.build_py import build_py as old_build_py -from numpy.distutils.misc_util import is_string - -class build_py(old_build_py): - - def run(self): - build_src = self.get_finalized_command('build_src') - if build_src.py_modules_dict and self.packages is None: - self.packages = list(build_src.py_modules_dict.keys ()) - old_build_py.run(self) - - def find_package_modules(self, package, package_dir): - modules = old_build_py.find_package_modules(self, package, package_dir) - - # Find build_src generated *.py files. - build_src = self.get_finalized_command('build_src') - modules += build_src.py_modules_dict.get(package, []) - - return modules - - def find_modules(self): - old_py_modules = self.py_modules[:] - new_py_modules = [_m for _m in self.py_modules if is_string(_m)] - self.py_modules[:] = new_py_modules - modules = old_build_py.find_modules(self) - self.py_modules[:] = old_py_modules - - return modules - - # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple - # and item[2] is source file. diff --git a/numpy/distutils/command/build_scripts.py b/numpy/distutils/command/build_scripts.py deleted file mode 100644 index d5cadb2745fe..000000000000 --- a/numpy/distutils/command/build_scripts.py +++ /dev/null @@ -1,49 +0,0 @@ -""" Modified version of build_scripts that handles building scripts from functions. - -""" -from distutils.command.build_scripts import build_scripts as old_build_scripts -from numpy.distutils import log -from numpy.distutils.misc_util import is_string - -class build_scripts(old_build_scripts): - - def generate_scripts(self, scripts): - new_scripts = [] - func_scripts = [] - for script in scripts: - if is_string(script): - new_scripts.append(script) - else: - func_scripts.append(script) - if not func_scripts: - return new_scripts - - build_dir = self.build_dir - self.mkpath(build_dir) - for func in func_scripts: - script = func(build_dir) - if not script: - continue - if is_string(script): - log.info(" adding '%s' to scripts" % (script,)) - new_scripts.append(script) - else: - [log.info(" adding '%s' to scripts" % (s,)) for s in script] - new_scripts.extend(list(script)) - return new_scripts - - def run (self): - if not self.scripts: - return - - self.scripts = self.generate_scripts(self.scripts) - # Now make sure that the distribution object has this list of scripts. - # setuptools' develop command requires that this be a list of filenames, - # not functions. - self.distribution.scripts = self.scripts - - return old_build_scripts.run(self) - - def get_source_files(self): - from numpy.distutils.misc_util import get_script_files - return get_script_files(self.scripts) diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py deleted file mode 100644 index cfcc80caecd6..000000000000 --- a/numpy/distutils/command/build_src.py +++ /dev/null @@ -1,773 +0,0 @@ -""" Build swig and f2py sources. -""" -import os -import re -import sys -import shlex -import copy - -from distutils.command import build_ext -from distutils.dep_util import newer_group, newer -from distutils.util import get_platform -from distutils.errors import DistutilsError, DistutilsSetupError - - -# this import can't be done here, as it uses numpy stuff only available -# after it's installed -#import numpy.f2py -from numpy.distutils import log -from numpy.distutils.misc_util import ( - fortran_ext_match, appendpath, is_string, is_sequence, get_cmd - ) -from numpy.distutils.from_template import process_file as process_f_file -from numpy.distutils.conv_template import process_file as process_c_file - -def subst_vars(target, source, d): - """Substitute any occurrence of @foo@ by d['foo'] from source file into - target.""" - var = re.compile('@([a-zA-Z_]+)@') - with open(source, 'r') as fs: - with open(target, 'w') as ft: - for l in fs: - m = var.search(l) - if m: - ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) - else: - ft.write(l) - -class build_src(build_ext.build_ext): - - description = "build sources from SWIG, F2PY files or a function" - - user_options = [ - ('build-src=', 'd', "directory to \"build\" sources to"), - ('f2py-opts=', None, "list of f2py command line options"), - ('swig=', None, "path to the SWIG executable"), - ('swig-opts=', None, "list of SWIG command line options"), - ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), - ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete - ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('inplace', 'i', - "ignore build-lib and put compiled extensions into the source " - "directory alongside your pure Python modules"), - ('verbose-cfg', None, - "change logging level from WARN to INFO which will show all " - "compiler output") - ] - - boolean_options = ['force', 'inplace', 'verbose-cfg'] - - help_options = [] - - def initialize_options(self): - self.extensions = None - self.package = None - self.py_modules = None - self.py_modules_dict = None - self.build_src = None - self.build_lib = None - self.build_base = None - self.force = None - self.inplace = None - self.package_dir = None - self.f2pyflags = None # obsolete - self.f2py_opts = None - self.swigflags = None # obsolete - self.swig_opts = None - self.swig_cpp = None - self.swig = None - self.verbose_cfg = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('force', 'force')) - if self.package is None: - self.package = self.distribution.ext_package - self.extensions = self.distribution.ext_modules - self.libraries = self.distribution.libraries or [] - self.py_modules = self.distribution.py_modules or [] - self.data_files = self.distribution.data_files or [] - - if self.build_src is None: - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) - - # py_modules_dict is used in build_py.find_package_modules - self.py_modules_dict = {} - - if self.f2pyflags: - if self.f2py_opts: - log.warn('ignoring --f2pyflags as --f2py-opts already used') - else: - self.f2py_opts = self.f2pyflags - self.f2pyflags = None - if self.f2py_opts is None: - self.f2py_opts = [] - else: - self.f2py_opts = shlex.split(self.f2py_opts) - - if self.swigflags: - if self.swig_opts: - log.warn('ignoring --swigflags as --swig-opts already used') - else: - self.swig_opts = self.swigflags - self.swigflags = None - - if self.swig_opts is None: - self.swig_opts = [] - else: - self.swig_opts = shlex.split(self.swig_opts) - - # use options from build_ext command - build_ext = self.get_finalized_command('build_ext') - if self.inplace is None: - self.inplace = build_ext.inplace - if self.swig_cpp is None: - self.swig_cpp = build_ext.swig_cpp - for c in ['swig', 'swig_opt']: - o = '--'+c.replace('_', '-') - v = getattr(build_ext, c, None) - if v: - if getattr(self, c): - log.warn('both build_src and build_ext define %s option' % (o)) - else: - log.info('using "%s=%s" option from build_ext command' % (o, v)) - setattr(self, c, v) - - def run(self): - log.info("build_src") - if not (self.extensions or self.libraries): - return - self.build_sources() - - def build_sources(self): - - if self.inplace: - self.get_package_dir = \ - self.get_finalized_command('build_py').get_package_dir - - self.build_py_modules_sources() - - for libname_info in self.libraries: - self.build_library_sources(*libname_info) - - if self.extensions: - self.check_extensions_list(self.extensions) - - for ext in self.extensions: - self.build_extension_sources(ext) - - self.build_data_files_sources() - self.build_npy_pkg_config() - - def build_data_files_sources(self): - if not self.data_files: - return - log.info('building data_files sources') - from numpy.distutils.misc_util import get_data_files - new_data_files = [] - for data in self.data_files: - if isinstance(data, str): - new_data_files.append(data) - elif isinstance(data, tuple): - d, files = data - if self.inplace: - build_dir = self.get_package_dir('.'.join(d.split(os.sep))) - else: - build_dir = os.path.join(self.build_src, d) - funcs = [f for f in files if hasattr(f, '__call__')] - files = [f for f in files if not hasattr(f, '__call__')] - for f in funcs: - if f.__code__.co_argcount==1: - s = f(build_dir) - else: - s = f() - if s is not None: - if isinstance(s, list): - files.extend(s) - elif isinstance(s, str): - files.append(s) - else: - raise TypeError(repr(s)) - filenames = get_data_files((d, files)) - new_data_files.append((d, filenames)) - else: - raise TypeError(repr(data)) - self.data_files[:] = new_data_files - - - def _build_npy_pkg_config(self, info, gd): - template, install_dir, subst_dict = info - template_dir = os.path.dirname(template) - for k, v in gd.items(): - subst_dict[k] = v - - if self.inplace == 1: - generated_dir = os.path.join(template_dir, install_dir) - else: - generated_dir = os.path.join(self.build_src, template_dir, - install_dir) - generated = os.path.basename(os.path.splitext(template)[0]) - generated_path = os.path.join(generated_dir, generated) - if not os.path.exists(generated_dir): - os.makedirs(generated_dir) - - subst_vars(generated_path, template, subst_dict) - - # Where to install relatively to install prefix - full_install_dir = os.path.join(template_dir, install_dir) - return full_install_dir, generated_path - - def build_npy_pkg_config(self): - log.info('build_src: building npy-pkg config files') - - # XXX: another ugly workaround to circumvent distutils brain damage. We - # need the install prefix here, but finalizing the options of the - # install command when only building sources cause error. Instead, we - # copy the install command instance, and finalize the copy so that it - # does not disrupt how distutils want to do things when with the - # original install command instance. - install_cmd = copy.copy(get_cmd('install')) - if not install_cmd.finalized == 1: - install_cmd.finalize_options() - build_npkg = False - if self.inplace == 1: - top_prefix = '.' - build_npkg = True - elif hasattr(install_cmd, 'install_libbase'): - top_prefix = install_cmd.install_libbase - build_npkg = True - - if build_npkg: - for pkg, infos in self.distribution.installed_pkg_config.items(): - pkg_path = self.distribution.package_dir[pkg] - prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) - d = {'prefix': prefix} - for info in infos: - install_dir, generated = self._build_npy_pkg_config(info, d) - self.distribution.data_files.append((install_dir, - [generated])) - - def build_py_modules_sources(self): - if not self.py_modules: - return - log.info('building py_modules sources') - new_py_modules = [] - for source in self.py_modules: - if is_sequence(source) and len(source)==3: - package, module_base, source = source - if self.inplace: - build_dir = self.get_package_dir(package) - else: - build_dir = os.path.join(self.build_src, - os.path.join(*package.split('.'))) - if hasattr(source, '__call__'): - target = os.path.join(build_dir, module_base + '.py') - source = source(target) - if source is None: - continue - modules = [(package, module_base, source)] - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - self.py_modules_dict[package] += modules - else: - new_py_modules.append(source) - self.py_modules[:] = new_py_modules - - def build_library_sources(self, lib_name, build_info): - sources = list(build_info.get('sources', [])) - - if not sources: - return - - log.info('building library "%s" sources' % (lib_name)) - - sources = self.generate_sources(sources, (lib_name, build_info)) - - sources = self.template_sources(sources, (lib_name, build_info)) - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - self.package, h_files) - - #for f in h_files: - # self.distribution.headers.append((lib_name,f)) - - build_info['sources'] = sources - return - - def build_extension_sources(self, ext): - - sources = list(ext.sources) - - log.info('building extension "%s" sources' % (ext.name)) - - fullname = self.get_ext_fullname(ext.name) - - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - - if self.inplace: - self.ext_target_dir = self.get_package_dir(package) - - sources = self.generate_sources(sources, ext) - sources = self.template_sources(sources, ext) - sources = self.swig_sources(sources, ext) - sources = self.f2py_sources(sources, ext) - sources = self.pyrex_sources(sources, ext) - - sources, py_files = self.filter_py_files(sources) - - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - modules = [] - for f in py_files: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - self.py_modules_dict[package] += modules - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - package, h_files) - #for f in h_files: - # self.distribution.headers.append((package,f)) - - ext.sources = sources - - def generate_sources(self, sources, extension): - new_sources = [] - func_sources = [] - for source in sources: - if is_string(source): - new_sources.append(source) - else: - func_sources.append(source) - if not func_sources: - return new_sources - if self.inplace and not is_sequence(extension): - build_dir = self.ext_target_dir - else: - if is_sequence(extension): - name = extension[0] - # if 'include_dirs' not in extension[1]: - # extension[1]['include_dirs'] = [] - # incl_dirs = extension[1]['include_dirs'] - else: - name = extension.name - # incl_dirs = extension.include_dirs - #if self.build_src not in incl_dirs: - # incl_dirs.append(self.build_src) - build_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - self.mkpath(build_dir) - - if self.verbose_cfg: - new_level = log.INFO - else: - new_level = log.WARN - old_level = log.set_threshold(new_level) - - for func in func_sources: - source = func(extension, build_dir) - if not source: - continue - if is_sequence(source): - [log.info(" adding '%s' to sources." % (s,)) for s in source] - new_sources.extend(source) - else: - log.info(" adding '%s' to sources." % (source,)) - new_sources.append(source) - log.set_threshold(old_level) - return new_sources - - def filter_py_files(self, sources): - return self.filter_files(sources, ['.py']) - - def filter_h_files(self, sources): - return self.filter_files(sources, ['.h', '.hpp', '.inc']) - - def filter_files(self, sources, exts = []): - new_sources = [] - files = [] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext in exts: - files.append(source) - else: - new_sources.append(source) - return new_sources, files - - def template_sources(self, sources, extension): - new_sources = [] - if is_sequence(extension): - depends = extension[1].get('depends') - include_dirs = extension[1].get('include_dirs') - else: - depends = extension.depends - include_dirs = extension.include_dirs - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.src': # Template file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - self.mkpath(target_dir) - target_file = os.path.join(target_dir, os.path.basename(base)) - if (self.force or newer_group([source] + depends, target_file)): - if _f_pyf_ext_match(base): - log.info("from_template:> %s" % (target_file)) - outstr = process_f_file(source) - else: - log.info("conv_template:> %s" % (target_file)) - outstr = process_c_file(source) - with open(target_file, 'w') as fid: - fid.write(outstr) - if _header_ext_match(target_file): - d = os.path.dirname(target_file) - if d not in include_dirs: - log.info(" adding '%s' to include_dirs." % (d)) - include_dirs.append(d) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def pyrex_sources(self, sources, extension): - """Pyrex not supported; this remains for Cython support (see below)""" - new_sources = [] - ext_name = extension.name.split('.')[-1] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyx': - target_file = self.generate_a_pyrex_source(base, ext_name, - source, - extension) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def generate_a_pyrex_source(self, base, ext_name, source, extension): - """Pyrex is not supported, but some projects monkeypatch this method. - - That allows compiling Cython code, see gh-6955. - This method will remain here for compatibility reasons. - """ - return [] - - def f2py_sources(self, sources, extension): - new_sources = [] - f2py_sources = [] - f_sources = [] - f2py_targets = {} - target_dirs = [] - ext_name = extension.name.split('.')[-1] - skip_f2py = 0 - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyf': # F2PY interface file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - if os.path.isfile(source): - name = get_f2py_modulename(source) - if name != ext_name: - raise DistutilsSetupError('mismatch of extension names: %s ' - 'provides %r but expected %r' % ( - source, name, ext_name)) - target_file = os.path.join(target_dir, name+'module.c') - else: - log.debug(' source %s does not exist: skipping f2py\'ing.' \ - % (source)) - name = ext_name - skip_f2py = 1 - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %smodule.c was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.info(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - f2py_sources.append(source) - f2py_targets[source] = target_file - new_sources.append(target_file) - elif fortran_ext_match(ext): - f_sources.append(source) - else: - new_sources.append(source) - - if not (f2py_sources or f_sources): - return new_sources - - for d in target_dirs: - self.mkpath(d) - - f2py_options = extension.f2py_options + self.f2py_opts - - if self.distribution.libraries: - for name, build_info in self.distribution.libraries: - if name in extension.libraries: - f2py_options.extend(build_info.get('f2py_options', [])) - - log.info("f2py options: %s" % (f2py_options)) - - if f2py_sources: - if len(f2py_sources) != 1: - raise DistutilsSetupError( - 'only one .pyf file is allowed per extension module but got'\ - ' more: %r' % (f2py_sources,)) - source = f2py_sources[0] - target_file = f2py_targets[source] - target_dir = os.path.dirname(target_file) or '.' - depends = [source] + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py: %s" % (source)) - from numpy.f2py import f2py2e - f2py2e.run_main(f2py_options - + ['--build-dir', target_dir, source]) - else: - log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) - else: - #XXX TODO: --inplace support for sdist command - if is_sequence(extension): - name = extension[0] - else: name = extension.name - target_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - target_file = os.path.join(target_dir, ext_name + 'module.c') - new_sources.append(target_file) - depends = f_sources + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py:> %s" % (target_file)) - self.mkpath(target_dir) - from numpy.f2py import f2py2e - f2py2e.run_main(f2py_options + ['--lower', - '--build-dir', target_dir]+\ - ['-m', ext_name]+f_sources) - else: - log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ - % (target_file)) - - if not os.path.isfile(target_file): - raise DistutilsError("f2py target file %r not generated" % (target_file,)) - - build_dir = os.path.join(self.build_src, target_dir) - target_c = os.path.join(build_dir, 'fortranobject.c') - target_h = os.path.join(build_dir, 'fortranobject.h') - log.info(" adding '%s' to sources." % (target_c)) - new_sources.append(target_c) - if build_dir not in extension.include_dirs: - log.info(" adding '%s' to include_dirs." % (build_dir)) - extension.include_dirs.append(build_dir) - - if not skip_f2py: - import numpy.f2py - d = os.path.dirname(numpy.f2py.__file__) - source_c = os.path.join(d, 'src', 'fortranobject.c') - source_h = os.path.join(d, 'src', 'fortranobject.h') - if newer(source_c, target_c) or newer(source_h, target_h): - self.mkpath(os.path.dirname(target_c)) - self.copy_file(source_c, target_c) - self.copy_file(source_h, target_h) - else: - if not os.path.isfile(target_c): - raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) - if not os.path.isfile(target_h): - raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) - - for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: - filename = os.path.join(target_dir, ext_name + name_ext) - if os.path.isfile(filename): - log.info(" adding '%s' to sources." % (filename)) - f_sources.append(filename) - - return new_sources + f_sources - - def swig_sources(self, sources, extension): - # Assuming SWIG 1.3.14 or later. See compatibility note in - # http://www.swig.org/Doc1.3/Python.html#Python_nn6 - - new_sources = [] - swig_sources = [] - swig_targets = {} - target_dirs = [] - py_files = [] # swig generated .py files - target_ext = '.c' - if '-c++' in extension.swig_opts: - typ = 'c++' - is_cpp = True - extension.swig_opts.remove('-c++') - elif self.swig_cpp: - typ = 'c++' - is_cpp = True - else: - typ = None - is_cpp = False - skip_swig = 0 - ext_name = extension.name.split('.')[-1] - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.i': # SWIG interface file - # the code below assumes that the sources list - # contains not more than one .i SWIG interface file - if self.inplace: - target_dir = os.path.dirname(base) - py_target_dir = self.ext_target_dir - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - py_target_dir = target_dir - if os.path.isfile(source): - name = get_swig_modulename(source) - if name != ext_name[1:]: - raise DistutilsSetupError( - 'mismatch of extension names: %s provides %r' - ' but expected %r' % (source, name, ext_name[1:])) - if typ is None: - typ = get_swig_target(source) - is_cpp = typ=='c++' - else: - typ2 = get_swig_target(source) - if typ2 is None: - log.warn('source %r does not define swig target, assuming %s swig target' \ - % (source, typ)) - elif typ!=typ2: - log.warn('expected %r but source %r defines %r swig target' \ - % (typ, source, typ2)) - if typ2=='c++': - log.warn('resetting swig target to c++ (some targets may have .c extension)') - is_cpp = True - else: - log.warn('assuming that %r has c++ swig target' % (source)) - if is_cpp: - target_ext = '.cpp' - target_file = os.path.join(target_dir, '%s_wrap%s' \ - % (name, target_ext)) - else: - log.warn(' source %s does not exist: skipping swig\'ing.' \ - % (source)) - name = ext_name[1:] - skip_swig = 1 - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %s_wrap.{c,cpp} was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.warn(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - new_sources.append(target_file) - py_files.append(os.path.join(py_target_dir, name+'.py')) - swig_sources.append(source) - swig_targets[source] = new_sources[-1] - else: - new_sources.append(source) - - if not swig_sources: - return new_sources - - if skip_swig: - return new_sources + py_files - - for d in target_dirs: - self.mkpath(d) - - swig = self.swig or self.find_swig() - swig_cmd = [swig, "-python"] + extension.swig_opts - if is_cpp: - swig_cmd.append('-c++') - for d in extension.include_dirs: - swig_cmd.append('-I'+d) - for source in swig_sources: - target = swig_targets[source] - depends = [source] + extension.depends - if self.force or newer_group(depends, target, 'newer'): - log.info("%s: %s" % (os.path.basename(swig) \ - + (is_cpp and '++' or ''), source)) - self.spawn(swig_cmd + self.swig_opts \ - + ["-o", target, '-outdir', py_target_dir, source]) - else: - log.debug(" skipping '%s' swig interface (up-to-date)" \ - % (source)) - - return new_sources + py_files - -_f_pyf_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match -_header_ext_match = re.compile(r'.*\.(inc|h|hpp)\Z', re.I).match - -#### SWIG related auxiliary functions #### -_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', - re.I).match -_has_c_header = re.compile(r'-\*-\s*c\s*-\*-', re.I).search -_has_cpp_header = re.compile(r'-\*-\s*c\+\+\s*-\*-', re.I).search - -def get_swig_target(source): - with open(source) as f: - result = None - line = f.readline() - if _has_cpp_header(line): - result = 'c++' - if _has_c_header(line): - result = 'c' - return result - -def get_swig_modulename(source): - with open(source) as f: - name = None - for line in f: - m = _swig_module_name_match(line) - if m: - name = m.group('name') - break - return name - -def _find_swig_target(target_dir, name): - for ext in ['.cpp', '.c']: - target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) - if os.path.isfile(target): - break - return target - -#### F2PY related auxiliary functions #### - -_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', - re.I).match -_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' - r'__user__[\w_]*)', re.I).match - -def get_f2py_modulename(source): - name = None - with open(source) as f: - for line in f: - m = _f2py_module_name_match(line) - if m: - if _f2py_user_module_name_match(line): # skip *__user__* names - continue - name = m.group('name') - break - return name - -########################################## diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py deleted file mode 100644 index 8bdfb7ec5823..000000000000 --- a/numpy/distutils/command/config.py +++ /dev/null @@ -1,516 +0,0 @@ -# Added Fortran compiler support to config. Currently useful only for -# try_compile call. try_run works but is untested for most of Fortran -# compilers (they must define linker_exe first). -# Pearu Peterson -import os -import signal -import subprocess -import sys -import textwrap -import warnings - -from distutils.command.config import config as old_config -from distutils.command.config import LANG_EXT -from distutils import log -from distutils.file_util import copy_file -from distutils.ccompiler import CompileError, LinkError -import distutils -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.mingw32ccompiler import generate_manifest -from numpy.distutils.command.autodist import (check_gcc_function_attribute, - check_gcc_function_attribute_with_intrinsics, - check_gcc_variable_attribute, - check_gcc_version_at_least, - check_inline, - check_restrict, - check_compiler_gcc) - -LANG_EXT['f77'] = '.f' -LANG_EXT['f90'] = '.f90' - -class config(old_config): - old_config.user_options += [ - ('fcompiler=', None, "specify the Fortran compiler type"), - ] - - def initialize_options(self): - self.fcompiler = None - old_config.initialize_options(self) - - def _check_compiler (self): - old_config._check_compiler(self) - from numpy.distutils.fcompiler import FCompiler, new_fcompiler - - if sys.platform == 'win32' and (self.compiler.compiler_type in - ('msvc', 'intelw', 'intelemw')): - # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: - # initialize call query_vcvarsall, which throws an OSError, and - # causes an error along the way without much information. We try to - # catch it here, hoping it is early enough, and print a helpful - # message instead of Error: None. - if not self.compiler.initialized: - try: - self.compiler.initialize() - except OSError as e: - msg = textwrap.dedent("""\ - Could not initialize compiler instance: do you have Visual Studio - installed? If you are trying to build with MinGW, please use "python setup.py - build -c mingw32" instead. If you have Visual Studio installed, check it is - correctly installed, and the right version (VS 2015 as of this writing). - - Original exception was: %s, and the Compiler class was %s - ============================================================================""") \ - % (e, self.compiler.__class__.__name__) - print(textwrap.dedent("""\ - ============================================================================""")) - raise distutils.errors.DistutilsPlatformError(msg) from e - - # After MSVC is initialized, add an explicit /MANIFEST to linker - # flags. See issues gh-4245 and gh-4101 for details. Also - # relevant are issues 4431 and 16296 on the Python bug tracker. - from distutils import msvc9compiler - if msvc9compiler.get_build_version() >= 10: - for ldflags in [self.compiler.ldflags_shared, - self.compiler.ldflags_shared_debug]: - if '/MANIFEST' not in ldflags: - ldflags.append('/MANIFEST') - - if not isinstance(self.fcompiler, FCompiler): - self.fcompiler = new_fcompiler(compiler=self.fcompiler, - dry_run=self.dry_run, force=1, - c_compiler=self.compiler) - if self.fcompiler is not None: - self.fcompiler.customize(self.distribution) - if self.fcompiler.get_version(): - self.fcompiler.customize_cmd(self) - self.fcompiler.show_customization() - - def _wrap_method(self, mth, lang, args): - from distutils.ccompiler import CompileError - from distutils.errors import DistutilsExecError - save_compiler = self.compiler - if lang in ['f77', 'f90']: - self.compiler = self.fcompiler - if self.compiler is None: - raise CompileError('%s compiler is not set' % (lang,)) - try: - ret = mth(*((self,)+args)) - except (DistutilsExecError, CompileError) as e: - self.compiler = save_compiler - raise CompileError from e - self.compiler = save_compiler - return ret - - def _compile (self, body, headers, include_dirs, lang): - src, obj = self._wrap_method(old_config._compile, lang, - (body, headers, include_dirs, lang)) - # _compile in unixcompiler.py sometimes creates .d dependency files. - # Clean them up. - self.temp_files.append(obj + '.d') - return src, obj - - def _link (self, body, - headers, include_dirs, - libraries, library_dirs, lang): - if self.compiler.compiler_type=='msvc': - libraries = (libraries or [])[:] - library_dirs = (library_dirs or [])[:] - if lang in ['f77', 'f90']: - lang = 'c' # always use system linker when using MSVC compiler - if self.fcompiler: - for d in self.fcompiler.library_dirs or []: - # correct path when compiling in Cygwin but with - # normal Win Python - if d.startswith('/usr/lib'): - try: - d = subprocess.check_output(['cygpath', - '-w', d]) - except (OSError, subprocess.CalledProcessError): - pass - else: - d = filepath_from_subprocess_output(d) - library_dirs.append(d) - for libname in self.fcompiler.libraries or []: - if libname not in libraries: - libraries.append(libname) - for libname in libraries: - if libname.startswith('msvc'): continue - fileexists = False - for libdir in library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(libdir, '%s.lib' % (libname)) - copy_file(libfile, libfile2) - self.temp_files.append(libfile2) - fileexists = True - break - if fileexists: continue - log.warn('could not find library %r in directories %s' \ - % (libname, library_dirs)) - elif self.compiler.compiler_type == 'mingw32': - generate_manifest(self) - return self._wrap_method(old_config._link, lang, - (body, headers, include_dirs, - libraries, library_dirs, lang)) - - def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): - self._check_compiler() - return self.try_compile( - "/* we need a dummy line to make distutils happy */", - [header], include_dirs) - - def check_decl(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #ifndef %s - (void) %s; - #endif - ; - return 0; - }""") % (symbol, symbol) - - return self.try_compile(body, headers, include_dirs) - - def check_macro_true(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #if %s - #else - #error false or undefined macro - #endif - ; - return 0; - }""") % (symbol,) - - return self.try_compile(body, headers, include_dirs) - - def check_type(self, type_name, headers=None, include_dirs=None, - library_dirs=None): - """Check type availability. Return True if the type can be compiled, - False otherwise""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - int main(void) { - if ((%(name)s *) 0) - return 0; - if (sizeof (%(name)s)) - return 0; - } - """) % {'name': type_name} - - st = False - try: - try: - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - st = True - except distutils.errors.CompileError: - st = False - finally: - self._clean() - - return st - - def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): - """Check size of a given type.""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; - test_array [0] = 0 - - ; - return 0; - } - """) - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - self._clean() - - if expected: - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - for size in expected: - try: - self._compile(body % {'type': type_name, 'size': size}, - headers, include_dirs, 'c') - self._clean() - return size - except CompileError: - pass - - # this fails to *compile* if size > sizeof(type) - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - - # The principle is simple: we first find low and high bounds of size - # for the type, where low/high are looked up on a log scale. Then, we - # do a binary search to find the exact size between low and high - low = 0 - mid = 0 - while True: - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - break - except CompileError: - #log.info("failure to test for bound %d" % mid) - low = mid + 1 - mid = 2 * mid + 1 - - high = mid - # Binary search: - while low != high: - mid = (high - low) // 2 + low - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - high = mid - except CompileError: - low = mid + 1 - return low - - def check_func(self, func, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - # clean up distutils's config a bit: add void to main(), and - # return a value. - self._check_compiler() - body = [] - if decl: - if type(decl) == str: - body.append(decl) - else: - body.append("int %s (void);" % func) - # Handle MSVC intrinsics: force MS compiler to make a function call. - # Useful to test for some functions when built with optimization on, to - # avoid build error because the intrinsic and our 'fake' test - # declaration do not match. - body.append("#ifdef _MSC_VER") - body.append("#pragma function(%s)" % func) - body.append("#endif") - body.append("int main (void) {") - if call: - if call_args is None: - call_args = '' - body.append(" %s(%s);" % (func, call_args)) - else: - body.append(" %s;" % func) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_funcs_once(self, funcs, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - """Check a list of functions at once. - - This is useful to speed up things, since all the functions in the funcs - list will be put in one compilation unit. - - Arguments - --------- - funcs : seq - list of functions to test - include_dirs : seq - list of header paths - libraries : seq - list of libraries to link the code snippet to - library_dirs : seq - list of library paths - decl : dict - for every (key, value), the declaration in the value will be - used for function in key. If a function is not in the - dictionary, no declaration will be used. - call : dict - for every item (f, value), if the value is True, a call will be - done to the function f. - """ - self._check_compiler() - body = [] - if decl: - for f, v in decl.items(): - if v: - body.append("int %s (void);" % f) - - # Handle MS intrinsics. See check_func for more info. - body.append("#ifdef _MSC_VER") - for func in funcs: - body.append("#pragma function(%s)" % func) - body.append("#endif") - - body.append("int main (void) {") - if call: - for f in funcs: - if f in call and call[f]: - if not (call_args and f in call_args and call_args[f]): - args = '' - else: - args = call_args[f] - body.append(" %s(%s);" % (f, args)) - else: - body.append(" %s;" % f) - else: - for f in funcs: - body.append(" %s;" % f) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_inline(self): - """Return the inline keyword recognized by the compiler, empty string - otherwise.""" - return check_inline(self) - - def check_restrict(self): - """Return the restrict keyword recognized by the compiler, empty string - otherwise.""" - return check_restrict(self) - - def check_compiler_gcc(self): - """Return True if the C compiler is gcc""" - return check_compiler_gcc(self) - - def check_gcc_function_attribute(self, attribute, name): - return check_gcc_function_attribute(self, attribute, name) - - def check_gcc_function_attribute_with_intrinsics(self, attribute, name, - code, include): - return check_gcc_function_attribute_with_intrinsics(self, attribute, - name, code, include) - - def check_gcc_variable_attribute(self, attribute): - return check_gcc_variable_attribute(self, attribute) - - def check_gcc_version_at_least(self, major, minor=0, patchlevel=0): - """Return True if the GCC version is greater than or equal to the - specified version.""" - return check_gcc_version_at_least(self, major, minor, patchlevel) - - def get_output(self, body, headers=None, include_dirs=None, - libraries=None, library_dirs=None, - lang="c", use_tee=None): - """Try to compile, link to an executable, and run a program - built from 'body' and 'headers'. Returns the exit status code - of the program and its output. - """ - # 2008-11-16, RemoveMe - warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" - "Usage of get_output is deprecated: please do not \n" - "use it anymore, and avoid configuration checks \n" - "involving running executable on the target machine.\n" - "+++++++++++++++++++++++++++++++++++++++++++++++++\n", - DeprecationWarning, stacklevel=2) - self._check_compiler() - exitcode, output = 255, '' - try: - grabber = GrabStdout() - try: - src, obj, exe = self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - grabber.restore() - except Exception: - output = grabber.data - grabber.restore() - raise - exe = os.path.join('.', exe) - try: - # specify cwd arg for consistency with - # historic usage pattern of exec_command() - # also, note that exe appears to be a string, - # which exec_command() handled, but we now - # use a list for check_output() -- this assumes - # that exe is always a single command - output = subprocess.check_output([exe], cwd='.') - except subprocess.CalledProcessError as exc: - exitstatus = exc.returncode - output = '' - except OSError: - # preserve the EnvironmentError exit status - # used historically in exec_command() - exitstatus = 127 - output = '' - else: - output = filepath_from_subprocess_output(output) - if hasattr(os, 'WEXITSTATUS'): - exitcode = os.WEXITSTATUS(exitstatus) - if os.WIFSIGNALED(exitstatus): - sig = os.WTERMSIG(exitstatus) - log.error('subprocess exited with signal %d' % (sig,)) - if sig == signal.SIGINT: - # control-C - raise KeyboardInterrupt - else: - exitcode = exitstatus - log.info("success!") - except (CompileError, LinkError): - log.info("failure.") - self._clean() - return exitcode, output - -class GrabStdout: - - def __init__(self): - self.sys_stdout = sys.stdout - self.data = '' - sys.stdout = self - - def write (self, data): - self.sys_stdout.write(data) - self.data += data - - def flush (self): - self.sys_stdout.flush() - - def restore(self): - sys.stdout = self.sys_stdout diff --git a/numpy/distutils/command/config_compiler.py b/numpy/distutils/command/config_compiler.py deleted file mode 100644 index ca4099886d8c..000000000000 --- a/numpy/distutils/command/config_compiler.py +++ /dev/null @@ -1,126 +0,0 @@ -from distutils.core import Command -from numpy.distutils import log - -#XXX: Linker flags - -def show_fortran_compilers(_cache=None): - # Using cache to prevent infinite recursion. - if _cache: - return - elif _cache is None: - _cache = [] - _cache.append(1) - from numpy.distutils.fcompiler import show_fcompilers - import distutils.core - dist = distutils.core._setup_distribution - show_fcompilers(dist) - -class config_fc(Command): - """ Distutils command to hold user specified options - to Fortran compilers. - - config_fc command is used by the FCompiler.customize() method. - """ - - description = "specify Fortran 77/Fortran 90 compiler information" - - user_options = [ - ('fcompiler=', None, "specify Fortran compiler type"), - ('f77exec=', None, "specify F77 compiler command"), - ('f90exec=', None, "specify F90 compiler command"), - ('f77flags=', None, "specify F77 compiler flags"), - ('f90flags=', None, "specify F90 compiler flags"), - ('opt=', None, "specify optimization flags"), - ('arch=', None, "specify architecture specific optimization flags"), - ('debug', 'g', "compile with debugging information"), - ('noopt', None, "compile without optimization"), - ('noarch', None, "compile without arch-dependent optimization"), - ] - - help_options = [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = ['debug', 'noopt', 'noarch'] - - def initialize_options(self): - self.fcompiler = None - self.f77exec = None - self.f90exec = None - self.f77flags = None - self.f90flags = None - self.opt = None - self.arch = None - self.debug = None - self.noopt = None - self.noarch = None - - def finalize_options(self): - log.info('unifying config_fc, config, build_clib, build_ext, build commands --fcompiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['fcompiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - - def run(self): - # Do nothing. - return - -class config_cc(Command): - """ Distutils command to hold user specified options - to C/C++ compilers. - """ - - description = "specify C/C++ compiler information" - - user_options = [ - ('compiler=', None, "specify C/C++ compiler type"), - ] - - def initialize_options(self): - self.compiler = None - - def finalize_options(self): - log.info('unifying config_cc, config, build_clib, build_ext, build commands --compiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['compiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - return - - def run(self): - # Do nothing. - return diff --git a/numpy/distutils/command/develop.py b/numpy/distutils/command/develop.py deleted file mode 100644 index af24baf2e7e1..000000000000 --- a/numpy/distutils/command/develop.py +++ /dev/null @@ -1,15 +0,0 @@ -""" Override the develop command from setuptools so we can ensure that our -generated files (from build_src or build_scripts) are properly converted to real -files with filenames. - -""" -from setuptools.command.develop import develop as old_develop - -class develop(old_develop): - __doc__ = old_develop.__doc__ - def install_for_development(self): - # Build sources in-place, too. - self.reinitialize_command('build_src', inplace=1) - # Make sure scripts are built. - self.run_command('build_scripts') - old_develop.install_for_development(self) diff --git a/numpy/distutils/command/egg_info.py b/numpy/distutils/command/egg_info.py deleted file mode 100644 index 14c62b4d1b90..000000000000 --- a/numpy/distutils/command/egg_info.py +++ /dev/null @@ -1,25 +0,0 @@ -import sys - -from setuptools.command.egg_info import egg_info as _egg_info - -class egg_info(_egg_info): - def run(self): - if 'sdist' in sys.argv: - import warnings - import textwrap - msg = textwrap.dedent(""" - `build_src` is being run, this may lead to missing - files in your sdist! You want to use distutils.sdist - instead of the setuptools version: - - from distutils.command.sdist import sdist - cmdclass={'sdist': sdist}" - - See numpy's setup.py or gh-7131 for details.""") - warnings.warn(msg, UserWarning, stacklevel=2) - - # We need to ensure that build_src has been executed in order to give - # setuptools' egg_info command real filenames instead of functions which - # generate files. - self.run_command("build_src") - _egg_info.run(self) diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py deleted file mode 100644 index efa9b4740fc4..000000000000 --- a/numpy/distutils/command/install.py +++ /dev/null @@ -1,79 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - import setuptools.command.install as old_install_mod - have_setuptools = True -else: - import distutils.command.install as old_install_mod - have_setuptools = False -from distutils.file_util import write_file - -old_install = old_install_mod.install - -class install(old_install): - - # Always run install_clib - the command is cheap, so no need to bypass it; - # but it's not run by setuptools -- so it's run again in install_data - sub_commands = old_install.sub_commands + [ - ('install_clib', lambda x: True) - ] - - def finalize_options (self): - old_install.finalize_options(self) - self.install_lib = self.install_libbase - - def setuptools_run(self): - """ The setuptools version of the .run() method. - - We must pull in the entire code so we can override the level used in the - _getframe() call since we wrap this call by one more level. - """ - from distutils.command.install import install as distutils_install - - # Explicit request for old-style install? Just do it - if self.old_and_unmanageable or self.single_version_externally_managed: - return distutils_install.run(self) - - # Attempt to detect whether we were called from setup() or by another - # command. If we were called by setup(), our caller will be the - # 'run_command' method in 'distutils.dist', and *its* caller will be - # the 'run_commands' method. If we were called any other way, our - # immediate caller *might* be 'run_command', but it won't have been - # called by 'run_commands'. This is slightly kludgy, but seems to - # work. - # - caller = sys._getframe(3) - caller_module = caller.f_globals.get('__name__', '') - caller_name = caller.f_code.co_name - - if caller_module != 'distutils.dist' or caller_name!='run_commands': - # We weren't called from the command line or setup(), so we - # should run in backward-compatibility mode to support bdist_* - # commands. - distutils_install.run(self) - else: - self.do_egg_install() - - def run(self): - if not have_setuptools: - r = old_install.run(self) - else: - r = self.setuptools_run() - if self.record: - # bdist_rpm fails when INSTALLED_FILES contains - # paths with spaces. Such paths must be enclosed - # with double-quotes. - with open(self.record) as f: - lines = [] - need_rewrite = False - for l in f: - l = l.rstrip() - if ' ' in l: - need_rewrite = True - l = '"%s"' % (l) - lines.append(l) - if need_rewrite: - self.execute(write_file, - (self.record, lines), - "re-writing list of installed files to '%s'" % - self.record) - return r diff --git a/numpy/distutils/command/install_clib.py b/numpy/distutils/command/install_clib.py deleted file mode 100644 index aa2e5594c3c2..000000000000 --- a/numpy/distutils/command/install_clib.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -from distutils.core import Command -from distutils.ccompiler import new_compiler -from numpy.distutils.misc_util import get_cmd - -class install_clib(Command): - description = "Command to install installable C libraries" - - user_options = [] - - def initialize_options(self): - self.install_dir = None - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options('install', ('install_lib', 'install_dir')) - - def run (self): - build_clib_cmd = get_cmd("build_clib") - if not build_clib_cmd.build_clib: - # can happen if the user specified `--skip-build` - build_clib_cmd.finalize_options() - build_dir = build_clib_cmd.build_clib - - # We need the compiler to get the library name -> filename association - if not build_clib_cmd.compiler: - compiler = new_compiler(compiler=None) - compiler.customize(self.distribution) - else: - compiler = build_clib_cmd.compiler - - for l in self.distribution.installed_libraries: - target_dir = os.path.join(self.install_dir, l.target_dir) - name = compiler.library_filename(l.name) - source = os.path.join(build_dir, name) - self.mkpath(target_dir) - self.outfiles.append(self.copy_file(source, target_dir)[0]) - - def get_outputs(self): - return self.outfiles diff --git a/numpy/distutils/command/install_data.py b/numpy/distutils/command/install_data.py deleted file mode 100644 index 0a2e68ae192a..000000000000 --- a/numpy/distutils/command/install_data.py +++ /dev/null @@ -1,24 +0,0 @@ -import sys -have_setuptools = ('setuptools' in sys.modules) - -from distutils.command.install_data import install_data as old_install_data - -#data installer with improved intelligence over distutils -#data files are copied into the project directory instead -#of willy-nilly -class install_data (old_install_data): - - def run(self): - old_install_data.run(self) - - if have_setuptools: - # Run install_clib again, since setuptools does not run sub-commands - # of install automatically - self.run_command('install_clib') - - def finalize_options (self): - self.set_undefined_options('install', - ('install_lib', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) diff --git a/numpy/distutils/command/install_headers.py b/numpy/distutils/command/install_headers.py deleted file mode 100644 index 91eba6f17c29..000000000000 --- a/numpy/distutils/command/install_headers.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -from distutils.command.install_headers import install_headers as old_install_headers - -class install_headers (old_install_headers): - - def run (self): - headers = self.distribution.headers - if not headers: - return - - prefix = os.path.dirname(self.install_dir) - for header in headers: - if isinstance(header, tuple): - # Kind of a hack, but I don't know where else to change this... - if header[0] == 'numpy._core': - header = ('numpy', header[1]) - if os.path.splitext(header[1])[1] == '.inc': - continue - d = os.path.join(*([prefix]+header[0].split('.'))) - header = header[1] - else: - d = self.install_dir - self.mkpath(d) - (out, _) = self.copy_file(header, d) - self.outfiles.append(out) diff --git a/numpy/distutils/command/sdist.py b/numpy/distutils/command/sdist.py deleted file mode 100644 index e34193883dea..000000000000 --- a/numpy/distutils/command/sdist.py +++ /dev/null @@ -1,27 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - from setuptools.command.sdist import sdist as old_sdist -else: - from distutils.command.sdist import sdist as old_sdist - -from numpy.distutils.misc_util import get_data_files - -class sdist(old_sdist): - - def add_defaults (self): - old_sdist.add_defaults(self) - - dist = self.distribution - - if dist.has_data_files(): - for data in dist.data_files: - self.filelist.extend(get_data_files(data)) - - if dist.has_headers(): - headers = [] - for h in dist.headers: - if isinstance(h, str): headers.append(h) - else: headers.append(h[1]) - self.filelist.extend(headers) - - return diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py deleted file mode 100644 index c4a14e59901f..000000000000 --- a/numpy/distutils/core.py +++ /dev/null @@ -1,215 +0,0 @@ -import sys -from distutils.core import Distribution - -if 'setuptools' in sys.modules: - have_setuptools = True - from setuptools import setup as old_setup - # easy_install imports math, it may be picked up from cwd - from setuptools.command import easy_install - try: - # very old versions of setuptools don't have this - from setuptools.command import bdist_egg - except ImportError: - have_setuptools = False -else: - from distutils.core import setup as old_setup - have_setuptools = False - -import warnings -import distutils.core -import distutils.dist - -from numpy.distutils.extension import Extension # noqa: F401 -from numpy.distutils.numpy_distribution import NumpyDistribution -from numpy.distutils.command import config, config_compiler, \ - build, build_py, build_ext, build_clib, build_src, build_scripts, \ - sdist, install_data, install_headers, install, bdist_rpm, \ - install_clib -from numpy.distutils.misc_util import is_sequence, is_string - -numpy_cmdclass = {'build': build.build, - 'build_src': build_src.build_src, - 'build_scripts': build_scripts.build_scripts, - 'config_cc': config_compiler.config_cc, - 'config_fc': config_compiler.config_fc, - 'config': config.config, - 'build_ext': build_ext.build_ext, - 'build_py': build_py.build_py, - 'build_clib': build_clib.build_clib, - 'sdist': sdist.sdist, - 'install_data': install_data.install_data, - 'install_headers': install_headers.install_headers, - 'install_clib': install_clib.install_clib, - 'install': install.install, - 'bdist_rpm': bdist_rpm.bdist_rpm, - } -if have_setuptools: - # Use our own versions of develop and egg_info to ensure that build_src is - # handled appropriately. - from numpy.distutils.command import develop, egg_info - numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg - numpy_cmdclass['develop'] = develop.develop - numpy_cmdclass['easy_install'] = easy_install.easy_install - numpy_cmdclass['egg_info'] = egg_info.egg_info - -def _dict_append(d, **kws): - for k, v in kws.items(): - if k not in d: - d[k] = v - continue - dv = d[k] - if isinstance(dv, tuple): - d[k] = dv + tuple(v) - elif isinstance(dv, list): - d[k] = dv + list(v) - elif isinstance(dv, dict): - _dict_append(dv, **v) - elif is_string(dv): - d[k] = dv + v - else: - raise TypeError(repr(type(dv))) - -def _command_line_ok(_cache=None): - """ Return True if command line does not contain any - help or display requests. - """ - if _cache: - return _cache[0] - elif _cache is None: - _cache = [] - ok = True - display_opts = ['--'+n for n in Distribution.display_option_names] - for o in Distribution.display_options: - if o[1]: - display_opts.append('-'+o[1]) - for arg in sys.argv: - if arg.startswith('--help') or arg=='-h' or arg in display_opts: - ok = False - break - _cache.append(ok) - return ok - -def get_distribution(always=False): - dist = distutils.core._setup_distribution - # XXX Hack to get numpy installable with easy_install. - # The problem is easy_install runs it's own setup(), which - # sets up distutils.core._setup_distribution. However, - # when our setup() runs, that gets overwritten and lost. - # We can't use isinstance, as the DistributionWithoutHelpCommands - # class is local to a function in setuptools.command.easy_install - if dist is not None and \ - 'DistributionWithoutHelpCommands' in repr(dist): - dist = None - if always and dist is None: - dist = NumpyDistribution() - return dist - -def setup(**attr): - - cmdclass = numpy_cmdclass.copy() - - new_attr = attr.copy() - if 'cmdclass' in new_attr: - cmdclass.update(new_attr['cmdclass']) - new_attr['cmdclass'] = cmdclass - - if 'configuration' in new_attr: - # To avoid calling configuration if there are any errors - # or help request in command in the line. - configuration = new_attr.pop('configuration') - - old_dist = distutils.core._setup_distribution - old_stop = distutils.core._setup_stop_after - distutils.core._setup_distribution = None - distutils.core._setup_stop_after = "commandline" - try: - dist = setup(**new_attr) - finally: - distutils.core._setup_distribution = old_dist - distutils.core._setup_stop_after = old_stop - if dist.help or not _command_line_ok(): - # probably displayed help, skip running any commands - return dist - - # create setup dictionary and append to new_attr - config = configuration() - if hasattr(config, 'todict'): - config = config.todict() - _dict_append(new_attr, **config) - - # Move extension source libraries to libraries - libraries = [] - for ext in new_attr.get('ext_modules', []): - new_libraries = [] - for item in ext.libraries: - if is_sequence(item): - lib_name, build_info = item - _check_append_ext_library(libraries, lib_name, build_info) - new_libraries.append(lib_name) - elif is_string(item): - new_libraries.append(item) - else: - raise TypeError("invalid description of extension module " - "library %r" % (item,)) - ext.libraries = new_libraries - if libraries: - if 'libraries' not in new_attr: - new_attr['libraries'] = [] - for item in libraries: - _check_append_library(new_attr['libraries'], item) - - # sources in ext_modules or libraries may contain header files - if ('ext_modules' in new_attr or 'libraries' in new_attr) \ - and 'headers' not in new_attr: - new_attr['headers'] = [] - - # Use our custom NumpyDistribution class instead of distutils' one - new_attr['distclass'] = NumpyDistribution - - return old_setup(**new_attr) - -def _check_append_library(libraries, item): - for libitem in libraries: - if is_sequence(libitem): - if is_sequence(item): - if item[0]==libitem[0]: - if item[1] is libitem[1]: - return - warnings.warn("[0] libraries list contains %r with" - " different build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem[0]: - warnings.warn("[1] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if is_sequence(item): - if item[0]==libitem: - warnings.warn("[2] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem: - return - libraries.append(item) - -def _check_append_ext_library(libraries, lib_name, build_info): - for item in libraries: - if is_sequence(item): - if item[0]==lib_name: - if item[1] is build_info: - return - warnings.warn("[3] libraries list contains %r with" - " different build_info" % (lib_name,), - stacklevel=2) - break - elif item==lib_name: - warnings.warn("[4] libraries list contains %r with" - " no build_info" % (lib_name,), - stacklevel=2) - break - libraries.append((lib_name, build_info)) diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py deleted file mode 100644 index 77620210981d..000000000000 --- a/numpy/distutils/cpuinfo.py +++ /dev/null @@ -1,683 +0,0 @@ -#!/usr/bin/env python3 -""" -cpuinfo - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -Pearu Peterson - -""" -__all__ = ['cpu'] - -import os -import platform -import re -import sys -import types -import warnings - -from subprocess import getstatusoutput - - -def getoutput(cmd, successful_status=(0,), stacklevel=1): - try: - status, output = getstatusoutput(cmd) - except OSError as e: - warnings.warn(str(e), UserWarning, stacklevel=stacklevel) - return False, "" - if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: - return True, output - return False, output - -def command_info(successful_status=(0,), stacklevel=1, **kw): - info = {} - for key in kw: - ok, output = getoutput(kw[key], successful_status=successful_status, - stacklevel=stacklevel+1) - if ok: - info[key] = output.strip() - return info - -def command_by_line(cmd, successful_status=(0,), stacklevel=1): - ok, output = getoutput(cmd, successful_status=successful_status, - stacklevel=stacklevel+1) - if not ok: - return - for line in output.splitlines(): - yield line.strip() - -def key_value_from_command(cmd, sep, successful_status=(0,), - stacklevel=1): - d = {} - for line in command_by_line(cmd, successful_status=successful_status, - stacklevel=stacklevel+1): - l = [s.strip() for s in line.split(sep, 1)] - if len(l) == 2: - d[l[0]] = l[1] - return d - -class CPUInfoBase: - """Holds CPU information and provides methods for requiring - the availability of various CPU features. - """ - - def _try_call(self, func): - try: - return func() - except Exception: - pass - - def __getattr__(self, name): - if not name.startswith('_'): - if hasattr(self, '_'+name): - attr = getattr(self, '_'+name) - if isinstance(attr, types.MethodType): - return lambda func=self._try_call,attr=attr : func(attr) - else: - return lambda : None - raise AttributeError(name) - - def _getNCPUs(self): - return 1 - - def __get_nbits(self): - abits = platform.architecture()[0] - nbits = re.compile(r'(\d+)bit').search(abits).group(1) - return nbits - - def _is_32bit(self): - return self.__get_nbits() == '32' - - def _is_64bit(self): - return self.__get_nbits() == '64' - -class LinuxCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = [ {} ] - ok, output = getoutput('uname -m') - if ok: - info[0]['uname_m'] = output.strip() - try: - fo = open('/proc/cpuinfo') - except OSError as e: - warnings.warn(str(e), UserWarning, stacklevel=2) - else: - for line in fo: - name_value = [s.strip() for s in line.split(':', 1)] - if len(name_value) != 2: - continue - name, value = name_value - if not info or name in info[-1]: # next processor - info.append({}) - info[-1][name] = value - fo.close() - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['vendor_id']=='AuthenticAMD' - - def _is_AthlonK6_2(self): - return self._is_AMD() and self.info[0]['model'] == '2' - - def _is_AthlonK6_3(self): - return self._is_AMD() and self.info[0]['model'] == '3' - - def _is_AthlonK6(self): - return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None - - def _is_AthlonK7(self): - return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None - - def _is_AthlonMP(self): - return re.match(r'.*?Athlon\(tm\) MP\b', - self.info[0]['model name']) is not None - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['family'] == '15' - - def _is_Athlon64(self): - return re.match(r'.*?Athlon\(tm\) 64\b', - self.info[0]['model name']) is not None - - def _is_AthlonHX(self): - return re.match(r'.*?Athlon HX\b', - self.info[0]['model name']) is not None - - def _is_Opteron(self): - return re.match(r'.*?Opteron\b', - self.info[0]['model name']) is not None - - def _is_Hammer(self): - return re.match(r'.*?Hammer\b', - self.info[0]['model name']) is not None - - # Alpha - - def _is_Alpha(self): - return self.info[0]['cpu']=='Alpha' - - def _is_EV4(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' - - def _is_EV5(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' - - def _is_EV56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' - - def _is_PCA56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' - - # Intel - - #XXX - _is_i386 = _not_impl - - def _is_Intel(self): - return self.info[0]['vendor_id']=='GenuineIntel' - - def _is_i486(self): - return self.info[0]['cpu']=='i486' - - def _is_i586(self): - return self.is_Intel() and self.info[0]['cpu family'] == '5' - - def _is_i686(self): - return self.is_Intel() and self.info[0]['cpu family'] == '6' - - def _is_Celeron(self): - return re.match(r'.*?Celeron', - self.info[0]['model name']) is not None - - def _is_Pentium(self): - return re.match(r'.*?Pentium', - self.info[0]['model name']) is not None - - def _is_PentiumII(self): - return re.match(r'.*?Pentium.*?II\b', - self.info[0]['model name']) is not None - - def _is_PentiumPro(self): - return re.match(r'.*?PentiumPro\b', - self.info[0]['model name']) is not None - - def _is_PentiumMMX(self): - return re.match(r'.*?Pentium.*?MMX\b', - self.info[0]['model name']) is not None - - def _is_PentiumIII(self): - return re.match(r'.*?Pentium.*?III\b', - self.info[0]['model name']) is not None - - def _is_PentiumIV(self): - return re.match(r'.*?Pentium.*?(IV|4)\b', - self.info[0]['model name']) is not None - - def _is_PentiumM(self): - return re.match(r'.*?Pentium.*?M\b', - self.info[0]['model name']) is not None - - def _is_Prescott(self): - return self.is_PentiumIV() and self.has_sse3() - - def _is_Nocona(self): - return (self.is_Intel() - and (self.info[0]['cpu family'] == '6' - or self.info[0]['cpu family'] == '15') - and (self.has_sse3() and not self.has_ssse3()) - and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) - - def _is_Core2(self): - return (self.is_64bit() and self.is_Intel() and - re.match(r'.*?Core\(TM\)2\b', - self.info[0]['model name']) is not None) - - def _is_Itanium(self): - return re.match(r'.*?Itanium\b', - self.info[0]['family']) is not None - - def _is_XEON(self): - return re.match(r'.*?XEON\b', - self.info[0]['model name'], re.IGNORECASE) is not None - - _is_Xeon = _is_XEON - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_fdiv_bug(self): - return self.info[0]['fdiv_bug']=='yes' - - def _has_f00f_bug(self): - return self.info[0]['f00f_bug']=='yes' - - def _has_mmx(self): - return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None - - def _has_sse(self): - return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None - - def _has_sse2(self): - return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None - - def _has_sse3(self): - return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None - - def _has_ssse3(self): - return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None - - def _has_3dnow(self): - return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None - - def _has_3dnowext(self): - return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None - -class IRIXCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = key_value_from_command('sysconf', sep=' ', - successful_status=(0, 1)) - self.__class__.info = info - - def _not_impl(self): pass - - def _is_singleCPU(self): - return self.info.get('NUM_PROCESSORS') == '1' - - def _getNCPUs(self): - return int(self.info.get('NUM_PROCESSORS', 1)) - - def __cputype(self, n): - return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) - def _is_r2000(self): return self.__cputype(2000) - def _is_r3000(self): return self.__cputype(3000) - def _is_r3900(self): return self.__cputype(3900) - def _is_r4000(self): return self.__cputype(4000) - def _is_r4100(self): return self.__cputype(4100) - def _is_r4300(self): return self.__cputype(4300) - def _is_r4400(self): return self.__cputype(4400) - def _is_r4600(self): return self.__cputype(4600) - def _is_r4650(self): return self.__cputype(4650) - def _is_r5000(self): return self.__cputype(5000) - def _is_r6000(self): return self.__cputype(6000) - def _is_r8000(self): return self.__cputype(8000) - def _is_r10000(self): return self.__cputype(10000) - def _is_r12000(self): return self.__cputype(12000) - def _is_rorion(self): return self.__cputype('orion') - - def get_ip(self): - try: return self.info.get('MACHINE') - except Exception: pass - def __machine(self, n): - return self.info.get('MACHINE').lower() == 'ip%s' % (n) - def _is_IP19(self): return self.__machine(19) - def _is_IP20(self): return self.__machine(20) - def _is_IP21(self): return self.__machine(21) - def _is_IP22(self): return self.__machine(22) - def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() - def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() - def _is_IP24(self): return self.__machine(24) - def _is_IP25(self): return self.__machine(25) - def _is_IP26(self): return self.__machine(26) - def _is_IP27(self): return self.__machine(27) - def _is_IP28(self): return self.__machine(28) - def _is_IP30(self): return self.__machine(30) - def _is_IP32(self): return self.__machine(32) - def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() - def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() - - -class DarwinCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - machine='machine') - info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') - self.__class__.info = info - - def _not_impl(self): pass - - def _getNCPUs(self): - return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) - - def _is_Power_Macintosh(self): - return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' - - def _is_i386(self): - return self.info['arch']=='i386' - def _is_ppc(self): - return self.info['arch']=='ppc' - - def __machine(self, n): - return self.info['machine'] == 'ppc%s'%n - def _is_ppc601(self): return self.__machine(601) - def _is_ppc602(self): return self.__machine(602) - def _is_ppc603(self): return self.__machine(603) - def _is_ppc603e(self): return self.__machine('603e') - def _is_ppc604(self): return self.__machine(604) - def _is_ppc604e(self): return self.__machine('604e') - def _is_ppc620(self): return self.__machine(620) - def _is_ppc630(self): return self.__machine(630) - def _is_ppc740(self): return self.__machine(740) - def _is_ppc7400(self): return self.__machine(7400) - def _is_ppc7450(self): return self.__machine(7450) - def _is_ppc750(self): return self.__machine(750) - def _is_ppc403(self): return self.__machine(403) - def _is_ppc505(self): return self.__machine(505) - def _is_ppc801(self): return self.__machine(801) - def _is_ppc821(self): return self.__machine(821) - def _is_ppc823(self): return self.__machine(823) - def _is_ppc860(self): return self.__machine(860) - - -class SunOSCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - mach='mach', - uname_i='uname_i', - isainfo_b='isainfo -b', - isainfo_n='isainfo -n', - ) - info['uname_X'] = key_value_from_command('uname -X', sep='=') - for line in command_by_line('psrinfo -v 0'): - m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) - if m: - info['processor'] = m.group('p') - break - self.__class__.info = info - - def _not_impl(self): pass - - def _is_i386(self): - return self.info['isainfo_n']=='i386' - def _is_sparc(self): - return self.info['isainfo_n']=='sparc' - def _is_sparcv9(self): - return self.info['isainfo_n']=='sparcv9' - - def _getNCPUs(self): - return int(self.info['uname_X'].get('NumCPU', 1)) - - def _is_sun4(self): - return self.info['arch']=='sun4' - - def _is_SUNW(self): - return re.match(r'SUNW', self.info['uname_i']) is not None - def _is_sparcstation5(self): - return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None - def _is_ultra1(self): - return re.match(r'.*Ultra-1', self.info['uname_i']) is not None - def _is_ultra250(self): - return re.match(r'.*Ultra-250', self.info['uname_i']) is not None - def _is_ultra2(self): - return re.match(r'.*Ultra-2', self.info['uname_i']) is not None - def _is_ultra30(self): - return re.match(r'.*Ultra-30', self.info['uname_i']) is not None - def _is_ultra4(self): - return re.match(r'.*Ultra-4', self.info['uname_i']) is not None - def _is_ultra5_10(self): - return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None - def _is_ultra5(self): - return re.match(r'.*Ultra-5', self.info['uname_i']) is not None - def _is_ultra60(self): - return re.match(r'.*Ultra-60', self.info['uname_i']) is not None - def _is_ultra80(self): - return re.match(r'.*Ultra-80', self.info['uname_i']) is not None - def _is_ultraenterprice(self): - return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None - def _is_ultraenterprice10k(self): - return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None - def _is_sunfire(self): - return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None - def _is_ultra(self): - return re.match(r'.*Ultra', self.info['uname_i']) is not None - - def _is_cpusparcv7(self): - return self.info['processor']=='sparcv7' - def _is_cpusparcv8(self): - return self.info['processor']=='sparcv8' - def _is_cpusparcv9(self): - return self.info['processor']=='sparcv9' - -class Win32CPUInfo(CPUInfoBase): - - info = None - pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" - # XXX: what does the value of - # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 - # mean? - - def __init__(self): - if self.info is not None: - return - info = [] - try: - #XXX: Bad style to use so long `try:...except:...`. Fix it! - import winreg - - prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" - r"\s+stepping\s+(?P\d+)", re.IGNORECASE) - chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) - pnum=0 - while True: - try: - proc=winreg.EnumKey(chnd, pnum) - except winreg.error: - break - else: - pnum+=1 - info.append({"Processor":proc}) - phnd=winreg.OpenKey(chnd, proc) - pidx=0 - while True: - try: - name, value, vtpe=winreg.EnumValue(phnd, pidx) - except winreg.error: - break - else: - pidx=pidx+1 - info[-1][name]=value - if name=="Identifier": - srch=prgx.search(value) - if srch: - info[-1]["Family"]=int(srch.group("FML")) - info[-1]["Model"]=int(srch.group("MDL")) - info[-1]["Stepping"]=int(srch.group("STP")) - except Exception as e: - print(e, '(ignoring)') - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['VendorIdentifier']=='AuthenticAMD' - - def _is_Am486(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_Am5x86(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_AMDK5(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [0, 1, 2, 3] - - def _is_AMDK6(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [6, 7] - - def _is_AMDK6_2(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==8 - - def _is_AMDK6_3(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==9 - - def _is_AMDK7(self): - return self.is_AMD() and self.info[0]['Family'] == 6 - - # To reliably distinguish between the different types of AMD64 chips - # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would - # require looking at the 'brand' from cpuid - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['Family'] == 15 - - # Intel - - def _is_Intel(self): - return self.info[0]['VendorIdentifier']=='GenuineIntel' - - def _is_i386(self): - return self.info[0]['Family']==3 - - def _is_i486(self): - return self.info[0]['Family']==4 - - def _is_i586(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_i686(self): - return self.is_Intel() and self.info[0]['Family']==6 - - def _is_Pentium(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_PentiumMMX(self): - return self.is_Intel() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==4 - - def _is_PentiumPro(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model']==1 - - def _is_PentiumII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [3, 5, 6] - - def _is_PentiumIII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [7, 8, 9, 10, 11] - - def _is_PentiumIV(self): - return self.is_Intel() and self.info[0]['Family']==15 - - def _is_PentiumM(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [9, 13, 14] - - def _is_Core2(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [15, 16, 17] - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_mmx(self): - if self.is_Intel(): - return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ - or (self.info[0]['Family'] in [6, 15]) - elif self.is_AMD(): - return self.info[0]['Family'] in [5, 6, 15] - else: - return False - - def _has_sse(self): - if self.is_Intel(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [7, 8, 9, 10, 11]) - or self.info[0]['Family']==15) - elif self.is_AMD(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [6, 7, 8, 10]) - or self.info[0]['Family']==15) - else: - return False - - def _has_sse2(self): - if self.is_Intel(): - return self.is_Pentium4() or self.is_PentiumM() \ - or self.is_Core2() - elif self.is_AMD(): - return self.is_AMD64() - else: - return False - - def _has_3dnow(self): - return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] - - def _has_3dnowext(self): - return self.is_AMD() and self.info[0]['Family'] in [6, 15] - -if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) - cpuinfo = LinuxCPUInfo -elif sys.platform.startswith('irix'): - cpuinfo = IRIXCPUInfo -elif sys.platform == 'darwin': - cpuinfo = DarwinCPUInfo -elif sys.platform.startswith('sunos'): - cpuinfo = SunOSCPUInfo -elif sys.platform.startswith('win32'): - cpuinfo = Win32CPUInfo -elif sys.platform.startswith('cygwin'): - cpuinfo = LinuxCPUInfo -#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. -else: - cpuinfo = CPUInfoBase - -cpu = cpuinfo() - -#if __name__ == "__main__": -# -# cpu.is_blaa() -# cpu.is_Intel() -# cpu.is_Alpha() -# -# print('CPU information:'), -# for name in dir(cpuinfo): -# if name[0]=='_' and name[1]!='_': -# r = getattr(cpu,name[1:])() -# if r: -# if r!=1: -# print('%s=%s' %(name[1:],r)) -# else: -# print(name[1:]), -# print() diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py deleted file mode 100644 index c701465d9ade..000000000000 --- a/numpy/distutils/exec_command.py +++ /dev/null @@ -1,315 +0,0 @@ -""" -exec_command - -Implements exec_command function that is (almost) equivalent to -commands.getstatusoutput function but on NT, DOS systems the -returned status is actually correct (though, the returned status -values may be different by a factor). In addition, exec_command -takes keyword arguments for (re-)defining environment variables. - -Provides functions: - - exec_command --- execute command in a specified directory and - in the modified environment. - find_executable --- locate a command using info from environment - variable PATH. Equivalent to posix `which` - command. - -Author: Pearu Peterson -Created: 11 January 2003 - -Requires: Python 2.x - -Successfully tested on: - -======== ============ ================================================= -os.name sys.platform comments -======== ============ ================================================= -posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 - PyCrust 0.9.3, Idle 1.0.2 -posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 -posix sunos5 SunOS 5.9, Python 2.2, 2.3.2 -posix darwin Darwin 7.2.0, Python 2.3 -nt win32 Windows Me - Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 - Python 2.1.1 Idle 0.8 -nt win32 Windows 98, Python 2.1.1. Idle 0.8 -nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests - fail i.e. redefining environment variables may - not work. FIXED: don't use cygwin echo! - Comment: also `cmd /c echo` will not work - but redefining environment variables do work. -posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special) -nt win32 Windows XP, Python 2.3.3 -======== ============ ================================================= - -Known bugs: - -* Tests, that send messages to stderr, fail when executed from MSYS prompt - because the messages are lost at some point. - -""" -__all__ = ['exec_command', 'find_executable'] - -import os -import sys -import subprocess -import locale -import warnings - -from numpy.distutils.misc_util import is_sequence, make_temp_file -from numpy.distutils import log - -def filepath_from_subprocess_output(output): - """ - Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`. - - Inherited from `exec_command`, and possibly incorrect. - """ - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - output = output.decode(mylocale, errors='replace') - output = output.replace('\r\n', '\n') - # Another historical oddity - if output[-1:] == '\n': - output = output[:-1] - return output - - -def forward_bytes_to_stdout(val): - """ - Forward bytes from a subprocess call to the console, without attempting to - decode them. - - The assumption is that the subprocess call already returned bytes in - a suitable encoding. - """ - if hasattr(sys.stdout, 'buffer'): - # use the underlying binary output if there is one - sys.stdout.buffer.write(val) - elif hasattr(sys.stdout, 'encoding'): - # round-trip the encoding if necessary - sys.stdout.write(val.decode(sys.stdout.encoding)) - else: - # make a best-guess at the encoding - sys.stdout.write(val.decode('utf8', errors='replace')) - - -def temp_file_name(): - # 2019-01-30, 1.17 - warnings.warn('temp_file_name is deprecated since NumPy v1.17, use ' - 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1) - fo, name = make_temp_file() - fo.close() - return name - -def get_pythonexe(): - pythonexe = sys.executable - if os.name in ['nt', 'dos']: - fdir, fn = os.path.split(pythonexe) - fn = fn.upper().replace('PYTHONW', 'PYTHON') - pythonexe = os.path.join(fdir, fn) - assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) - return pythonexe - -def find_executable(exe, path=None, _cache={}): - """Return full path of an executable or None. - - Symbolic links are not followed. - """ - key = exe, path - try: - return _cache[key] - except KeyError: - pass - log.debug('find_executable(%r)' % exe) - orig_exe = exe - - if path is None: - path = os.environ.get('PATH', os.defpath) - if os.name=='posix': - realpath = os.path.realpath - else: - realpath = lambda a:a - - if exe.startswith('"'): - exe = exe[1:-1] - - suffixes = [''] - if os.name in ['nt', 'dos', 'os2']: - fn, ext = os.path.splitext(exe) - extra_suffixes = ['.exe', '.com', '.bat'] - if ext.lower() not in extra_suffixes: - suffixes = extra_suffixes - - if os.path.isabs(exe): - paths = [''] - else: - paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] - - for path in paths: - fn = os.path.join(path, exe) - for s in suffixes: - f_ext = fn+s - if not os.path.islink(f_ext): - f_ext = realpath(f_ext) - if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): - log.info('Found executable %s' % f_ext) - _cache[key] = f_ext - return f_ext - - log.warn('Could not locate executable %s' % orig_exe) - return None - -############################################################ - -def _preserve_environment( names ): - log.debug('_preserve_environment(%r)' % (names)) - env = {name: os.environ.get(name) for name in names} - return env - -def _update_environment( **env ): - log.debug('_update_environment(...)') - for name, value in env.items(): - os.environ[name] = value or '' - -def exec_command(command, execute_in='', use_shell=None, use_tee=None, - _with_python = 1, **env ): - """ - Return (status,output) of executed command. - - .. deprecated:: 1.17 - Use subprocess.Popen instead - - Parameters - ---------- - command : str - A concatenated string of executable and arguments. - execute_in : str - Before running command ``cd execute_in`` and after ``cd -``. - use_shell : {bool, None}, optional - If True, execute ``sh -c command``. Default None (True) - use_tee : {bool, None}, optional - If True use tee. Default None (True) - - - Returns - ------- - res : str - Both stdout and stderr messages. - - Notes - ----- - On NT, DOS systems the returned status is correct for external commands. - Wild cards will not work for non-posix systems or when use_shell=0. - - """ - # 2019-01-30, 1.17 - warnings.warn('exec_command is deprecated since NumPy v1.17, use ' - 'subprocess.Popen instead', DeprecationWarning, stacklevel=1) - log.debug('exec_command(%r,%s)' % (command, - ','.join(['%s=%r'%kv for kv in env.items()]))) - - if use_tee is None: - use_tee = os.name=='posix' - if use_shell is None: - use_shell = os.name=='posix' - execute_in = os.path.abspath(execute_in) - oldcwd = os.path.abspath(os.getcwd()) - - if __name__[-12:] == 'exec_command': - exec_dir = os.path.dirname(os.path.abspath(__file__)) - elif os.path.isfile('exec_command.py'): - exec_dir = os.path.abspath('.') - else: - exec_dir = os.path.abspath(sys.argv[0]) - if os.path.isfile(exec_dir): - exec_dir = os.path.dirname(exec_dir) - - if oldcwd!=execute_in: - os.chdir(execute_in) - log.debug('New cwd: %s' % execute_in) - else: - log.debug('Retaining cwd: %s' % oldcwd) - - oldenv = _preserve_environment( list(env.keys()) ) - _update_environment( **env ) - - try: - st = _exec_command(command, - use_shell=use_shell, - use_tee=use_tee, - **env) - finally: - if oldcwd!=execute_in: - os.chdir(oldcwd) - log.debug('Restored cwd to %s' % oldcwd) - _update_environment(**oldenv) - - return st - - -def _exec_command(command, use_shell=None, use_tee = None, **env): - """ - Internal workhorse for exec_command(). - """ - if use_shell is None: - use_shell = os.name=='posix' - if use_tee is None: - use_tee = os.name=='posix' - - if os.name == 'posix' and use_shell: - # On POSIX, subprocess always uses /bin/sh, override - sh = os.environ.get('SHELL', '/bin/sh') - if is_sequence(command): - command = [sh, '-c', ' '.join(command)] - else: - command = [sh, '-c', command] - use_shell = False - - elif os.name == 'nt' and is_sequence(command): - # On Windows, join the string for CreateProcess() ourselves as - # subprocess does it a bit differently - command = ' '.join(_quote_arg(arg) for arg in command) - - # Inherit environment by default - env = env or None - try: - # text is set to False so that communicate() - # will return bytes. We need to decode the output ourselves - # so that Python will not raise a UnicodeDecodeError when - # it encounters an invalid character; rather, we simply replace it - proc = subprocess.Popen(command, shell=use_shell, env=env, text=False, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - except OSError: - # Return 127, as os.spawn*() and /bin/sh do - return 127, '' - - text, err = proc.communicate() - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - text = text.decode(mylocale, errors='replace') - text = text.replace('\r\n', '\n') - # Another historical oddity - if text[-1:] == '\n': - text = text[:-1] - - if use_tee and text: - print(text) - return proc.returncode, text - - -def _quote_arg(arg): - """ - Quote the argument for safe use in a shell command line. - """ - # If there is a quote in the string, assume relevant parts of the - # string are already quoted (e.g. '-I"C:\\Program Files\\..."') - if '"' not in arg and ' ' in arg: - return '"%s"' % arg - return arg - -############################################################ diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py deleted file mode 100644 index 06e6441e65df..000000000000 --- a/numpy/distutils/extension.py +++ /dev/null @@ -1,101 +0,0 @@ -"""distutils.extension - -Provides the Extension class, used to describe C/C++ extension -modules in setup scripts. - -Overridden to support f2py. - -""" -import re -from distutils.extension import Extension as old_Extension - - -cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match -fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match - - -class Extension(old_Extension): - """ - Parameters - ---------- - name : str - Extension name. - sources : list of str - List of source file locations relative to the top directory of - the package. - extra_compile_args : list of str - Extra command line arguments to pass to the compiler. - extra_f77_compile_args : list of str - Extra command line arguments to pass to the fortran77 compiler. - extra_f90_compile_args : list of str - Extra command line arguments to pass to the fortran90 compiler. - """ - def __init__( - self, name, sources, - include_dirs=None, - define_macros=None, - undef_macros=None, - library_dirs=None, - libraries=None, - runtime_library_dirs=None, - extra_objects=None, - extra_compile_args=None, - extra_link_args=None, - export_symbols=None, - swig_opts=None, - depends=None, - language=None, - f2py_options=None, - module_dirs=None, - extra_c_compile_args=None, - extra_cxx_compile_args=None, - extra_f77_compile_args=None, - extra_f90_compile_args=None,): - - old_Extension.__init__( - self, name, [], - include_dirs=include_dirs, - define_macros=define_macros, - undef_macros=undef_macros, - library_dirs=library_dirs, - libraries=libraries, - runtime_library_dirs=runtime_library_dirs, - extra_objects=extra_objects, - extra_compile_args=extra_compile_args, - extra_link_args=extra_link_args, - export_symbols=export_symbols) - - # Avoid assert statements checking that sources contains strings: - self.sources = sources - - # Python 2.4 distutils new features - self.swig_opts = swig_opts or [] - # swig_opts is assumed to be a list. Here we handle the case where it - # is specified as a string instead. - if isinstance(self.swig_opts, str): - import warnings - msg = "swig_opts is specified as a string instead of a list" - warnings.warn(msg, SyntaxWarning, stacklevel=2) - self.swig_opts = self.swig_opts.split() - - # Python 2.3 distutils new features - self.depends = depends or [] - self.language = language - - # numpy_distutils features - self.f2py_options = f2py_options or [] - self.module_dirs = module_dirs or [] - self.extra_c_compile_args = extra_c_compile_args or [] - self.extra_cxx_compile_args = extra_cxx_compile_args or [] - self.extra_f77_compile_args = extra_f77_compile_args or [] - self.extra_f90_compile_args = extra_f90_compile_args or [] - - return - - def has_cxx_sources(self): - return any(cxx_ext_re(str(source)) for source in self.sources) - - def has_f2py_sources(self): - return any(fortran_pyf_ext_re(source) for source in self.sources) - -# class Extension diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py deleted file mode 100644 index 5160e2abf54f..000000000000 --- a/numpy/distutils/fcompiler/__init__.py +++ /dev/null @@ -1,1035 +0,0 @@ -"""numpy.distutils.fcompiler - -Contains FCompiler, an abstract base class that defines the interface -for the numpy.distutils Fortran compiler abstraction model. - -Terminology: - -To be consistent, where the term 'executable' is used, it means the single -file, like 'gcc', that is executed, and should be a string. In contrast, -'command' means the entire command line, like ['gcc', '-c', 'file.c'], and -should be a list. - -But note that FCompiler.executables is actually a dictionary of commands. - -""" -__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', - 'dummy_fortran_file'] - -import os -import sys -import re -from pathlib import Path - -from distutils.sysconfig import get_python_lib -from distutils.fancy_getopt import FancyGetopt -from distutils.errors import DistutilsModuleError, \ - DistutilsExecError, CompileError, LinkError, DistutilsPlatformError -from distutils.util import split_quoted, strtobool - -from numpy.distutils.ccompiler import CCompiler, gen_lib_options -from numpy.distutils import log -from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ - make_temp_file, get_shared_lib_extension -from numpy.distutils.exec_command import find_executable -from numpy.distutils import _shell_utils - -from .environment import EnvironmentConfig - -__metaclass__ = type - - -FORTRAN_COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f'] - - -class CompilerNotFound(Exception): - pass - -def flaglist(s): - if is_string(s): - return split_quoted(s) - else: - return s - -def str2bool(s): - if is_string(s): - return strtobool(s) - return bool(s) - -def is_sequence_of_strings(seq): - return is_sequence(seq) and all_strings(seq) - -class FCompiler(CCompiler): - """Abstract base class to define the interface that must be implemented - by real Fortran compiler classes. - - Methods that subclasses may redefine: - - update_executables(), find_executables(), get_version() - get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() - get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), - get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), - get_flags_arch_f90(), get_flags_debug_f90(), - get_flags_fix(), get_flags_linker_so() - - DON'T call these methods (except get_version) after - constructing a compiler instance or inside any other method. - All methods, except update_executables() and find_executables(), - may call the get_version() method. - - After constructing a compiler instance, always call customize(dist=None) - method that finalizes compiler construction and makes the following - attributes available: - compiler_f77 - compiler_f90 - compiler_fix - linker_so - archiver - ranlib - libraries - library_dirs - """ - - # These are the environment variables and distutils keys used. - # Each configuration description is - # (, , , , ) - # The hook names are handled by the self._environment_hook method. - # - names starting with 'self.' call methods in this class - # - names starting with 'exe.' return the key in the executables dict - # - names like 'flags.YYY' return self.get_flag_YYY() - # convert is either None or a function to convert a string to the - # appropriate type used. - - distutils_vars = EnvironmentConfig( - distutils_section='config_fc', - noopt = (None, None, 'noopt', str2bool, False), - noarch = (None, None, 'noarch', str2bool, False), - debug = (None, None, 'debug', str2bool, False), - verbose = (None, None, 'verbose', str2bool, False), - ) - - command_vars = EnvironmentConfig( - distutils_section='config_fc', - compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False), - compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False), - compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False), - version_cmd = ('exe.version_cmd', None, None, None, False), - linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False), - linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False), - archiver = (None, 'AR', 'ar', None, False), - ranlib = (None, 'RANLIB', 'ranlib', None, False), - ) - - flag_vars = EnvironmentConfig( - distutils_section='config_fc', - f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True), - f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True), - free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True), - fix = ('flags.fix', None, None, flaglist, False), - opt = ('flags.opt', 'FOPT', 'opt', flaglist, True), - opt_f77 = ('flags.opt_f77', None, None, flaglist, False), - opt_f90 = ('flags.opt_f90', None, None, flaglist, False), - arch = ('flags.arch', 'FARCH', 'arch', flaglist, False), - arch_f77 = ('flags.arch_f77', None, None, flaglist, False), - arch_f90 = ('flags.arch_f90', None, None, flaglist, False), - debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True), - debug_f77 = ('flags.debug_f77', None, None, flaglist, False), - debug_f90 = ('flags.debug_f90', None, None, flaglist, False), - flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True), - linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True), - linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True), - ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True), - ) - - language_map = {'.f': 'f77', - '.for': 'f77', - '.F': 'f77', # XXX: needs preprocessor - '.ftn': 'f77', - '.f77': 'f77', - '.f90': 'f90', - '.F90': 'f90', # XXX: needs preprocessor - '.f95': 'f90', - } - language_order = ['f90', 'f77'] - - - # These will be set by the subclass - - compiler_type = None - compiler_aliases = () - version_pattern = None - - possible_executables = [] - executables = { - 'version_cmd': ["f77", "-v"], - 'compiler_f77': ["f77"], - 'compiler_f90': ["f90"], - 'compiler_fix': ["f90", "-fixed"], - 'linker_so': ["f90", "-shared"], - 'linker_exe': ["f90"], - 'archiver': ["ar", "-cr"], - 'ranlib': None, - } - - # If compiler does not support compiling Fortran 90 then it can - # suggest using another compiler. For example, gnu would suggest - # gnu95 compiler type when there are F90 sources. - suggested_f90_compiler = None - - compile_switch = "-c" - object_switch = "-o " # Ending space matters! It will be stripped - # but if it is missing then object_switch - # will be prefixed to object file name by - # string concatenation. - library_switch = "-o " # Ditto! - - # Switch to specify where module files are created and searched - # for USE statement. Normally it is a string and also here ending - # space matters. See above. - module_dir_switch = None - - # Switch to specify where module files are searched for USE statement. - module_include_switch = '-I' - - pic_flags = [] # Flags to create position-independent code - - src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] - obj_extension = ".o" - - shared_lib_extension = get_shared_lib_extension() - static_lib_extension = ".a" # or .lib - static_lib_format = "lib%s%s" # or %s%s - shared_lib_format = "%s%s" - exe_extension = "" - - _exe_cache = {} - - _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', - 'ranlib'] - - # This will be set by new_fcompiler when called in - # command/{build_ext.py, build_clib.py, config.py} files. - c_compiler = None - - # extra_{f77,f90}_compile_args are set by build_ext.build_extension method - extra_f77_compile_args = [] - extra_f90_compile_args = [] - - def __init__(self, *args, **kw): - CCompiler.__init__(self, *args, **kw) - self.distutils_vars = self.distutils_vars.clone(self._environment_hook) - self.command_vars = self.command_vars.clone(self._environment_hook) - self.flag_vars = self.flag_vars.clone(self._environment_hook) - self.executables = self.executables.copy() - for e in self._executable_keys: - if e not in self.executables: - self.executables[e] = None - - # Some methods depend on .customize() being called first, so - # this keeps track of whether that's happened yet. - self._is_customised = False - - def __copy__(self): - obj = self.__new__(self.__class__) - obj.__dict__.update(self.__dict__) - obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) - obj.command_vars = obj.command_vars.clone(obj._environment_hook) - obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) - obj.executables = obj.executables.copy() - return obj - - def copy(self): - return self.__copy__() - - # Use properties for the attributes used by CCompiler. Setting them - # as attributes from the self.executables dictionary is error-prone, - # so we get them from there each time. - def _command_property(key): - def fget(self): - assert self._is_customised - return self.executables[key] - return property(fget=fget) - version_cmd = _command_property('version_cmd') - compiler_f77 = _command_property('compiler_f77') - compiler_f90 = _command_property('compiler_f90') - compiler_fix = _command_property('compiler_fix') - linker_so = _command_property('linker_so') - linker_exe = _command_property('linker_exe') - archiver = _command_property('archiver') - ranlib = _command_property('ranlib') - - # Make our terminology consistent. - def set_executable(self, key, value): - self.set_command(key, value) - - def set_commands(self, **kw): - for k, v in kw.items(): - self.set_command(k, v) - - def set_command(self, key, value): - if not key in self._executable_keys: - raise ValueError( - "unknown executable '%s' for class %s" % - (key, self.__class__.__name__)) - if is_string(value): - value = split_quoted(value) - assert value is None or is_sequence_of_strings(value[1:]), (key, value) - self.executables[key] = value - - ###################################################################### - ## Methods that subclasses may redefine. But don't call these methods! - ## They are private to FCompiler class and may return unexpected - ## results if used elsewhere. So, you have been warned.. - - def find_executables(self): - """Go through the self.executables dictionary, and attempt to - find and assign appropriate executables. - - Executable names are looked for in the environment (environment - variables, the distutils.cfg, and command line), the 0th-element of - the command list, and the self.possible_executables list. - - Also, if the 0th element is "" or "", the Fortran 77 - or the Fortran 90 compiler executable is used, unless overridden - by an environment setting. - - Subclasses should call this if overridden. - """ - assert self._is_customised - exe_cache = self._exe_cache - def cached_find_executable(exe): - if exe in exe_cache: - return exe_cache[exe] - fc_exe = find_executable(exe) - exe_cache[exe] = exe_cache[fc_exe] = fc_exe - return fc_exe - def verify_command_form(name, value): - if value is not None and not is_sequence_of_strings(value): - raise ValueError( - "%s value %r is invalid in class %s" % - (name, value, self.__class__.__name__)) - def set_exe(exe_key, f77=None, f90=None): - cmd = self.executables.get(exe_key, None) - if not cmd: - return None - # Note that we get cmd[0] here if the environment doesn't - # have anything set - exe_from_environ = getattr(self.command_vars, exe_key) - if not exe_from_environ: - possibles = [f90, f77] + self.possible_executables - else: - possibles = [exe_from_environ] + self.possible_executables - - seen = set() - unique_possibles = [] - for e in possibles: - if e == '': - e = f77 - elif e == '': - e = f90 - if not e or e in seen: - continue - seen.add(e) - unique_possibles.append(e) - - for exe in unique_possibles: - fc_exe = cached_find_executable(exe) - if fc_exe: - cmd[0] = fc_exe - return fc_exe - self.set_command(exe_key, None) - return None - - ctype = self.compiler_type - f90 = set_exe('compiler_f90') - if not f90: - f77 = set_exe('compiler_f77') - if f77: - log.warn('%s: no Fortran 90 compiler found' % ctype) - else: - raise CompilerNotFound('%s: f90 nor f77' % ctype) - else: - f77 = set_exe('compiler_f77', f90=f90) - if not f77: - log.warn('%s: no Fortran 77 compiler found' % ctype) - set_exe('compiler_fix', f90=f90) - - set_exe('linker_so', f77=f77, f90=f90) - set_exe('linker_exe', f77=f77, f90=f90) - set_exe('version_cmd', f77=f77, f90=f90) - set_exe('archiver') - set_exe('ranlib') - - def update_executables(self): - """Called at the beginning of customisation. Subclasses should - override this if they need to set up the executables dictionary. - - Note that self.find_executables() is run afterwards, so the - self.executables dictionary values can contain or as - the command, which will be replaced by the found F77 or F90 - compiler. - """ - pass - - def get_flags(self): - """List of flags common to all compiler types.""" - return [] + self.pic_flags - - def _get_command_flags(self, key): - cmd = self.executables.get(key, None) - if cmd is None: - return [] - return cmd[1:] - - def get_flags_f77(self): - """List of Fortran 77 specific flags.""" - return self._get_command_flags('compiler_f77') - def get_flags_f90(self): - """List of Fortran 90 specific flags.""" - return self._get_command_flags('compiler_f90') - def get_flags_free(self): - """List of Fortran 90 free format specific flags.""" - return [] - def get_flags_fix(self): - """List of Fortran 90 fixed format specific flags.""" - return self._get_command_flags('compiler_fix') - def get_flags_linker_so(self): - """List of linker flags to build a shared library.""" - return self._get_command_flags('linker_so') - def get_flags_linker_exe(self): - """List of linker flags to build an executable.""" - return self._get_command_flags('linker_exe') - def get_flags_ar(self): - """List of archiver flags. """ - return self._get_command_flags('archiver') - def get_flags_opt(self): - """List of architecture independent compiler flags.""" - return [] - def get_flags_arch(self): - """List of architecture dependent compiler flags.""" - return [] - def get_flags_debug(self): - """List of compiler flags to compile with debugging information.""" - return [] - - get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt - get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch - get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug - - def get_libraries(self): - """List of compiler libraries.""" - return self.libraries[:] - def get_library_dirs(self): - """List of compiler library directories.""" - return self.library_dirs[:] - - def get_version(self, force=False, ok_status=[0]): - assert self._is_customised - version = CCompiler.get_version(self, force=force, ok_status=ok_status) - if version is None: - raise CompilerNotFound() - return version - - - ############################################################ - - ## Public methods: - - def customize(self, dist = None): - """Customize Fortran compiler. - - This method gets Fortran compiler specific information from - (i) class definition, (ii) environment, (iii) distutils config - files, and (iv) command line (later overrides earlier). - - This method should be always called after constructing a - compiler instance. But not in __init__ because Distribution - instance is needed for (iii) and (iv). - """ - log.info('customize %s' % (self.__class__.__name__)) - - self._is_customised = True - - self.distutils_vars.use_distribution(dist) - self.command_vars.use_distribution(dist) - self.flag_vars.use_distribution(dist) - - self.update_executables() - - # find_executables takes care of setting the compiler commands, - # version_cmd, linker_so, linker_exe, ar, and ranlib - self.find_executables() - - noopt = self.distutils_vars.get('noopt', False) - noarch = self.distutils_vars.get('noarch', noopt) - debug = self.distutils_vars.get('debug', False) - - f77 = self.command_vars.compiler_f77 - f90 = self.command_vars.compiler_f90 - - f77flags = [] - f90flags = [] - freeflags = [] - fixflags = [] - - if f77: - f77 = _shell_utils.NativeParser.split(f77) - f77flags = self.flag_vars.f77 - if f90: - f90 = _shell_utils.NativeParser.split(f90) - f90flags = self.flag_vars.f90 - freeflags = self.flag_vars.free - # XXX Assuming that free format is default for f90 compiler. - fix = self.command_vars.compiler_fix - # NOTE: this and similar examples are probably just - # excluding --coverage flag when F90 = gfortran --coverage - # instead of putting that flag somewhere more appropriate - # this and similar examples where a Fortran compiler - # environment variable has been customized by CI or a user - # should perhaps eventually be more thoroughly tested and more - # robustly handled - if fix: - fix = _shell_utils.NativeParser.split(fix) - fixflags = self.flag_vars.fix + f90flags - - oflags, aflags, dflags = [], [], [] - # examine get_flags__ for extra flags - # only add them if the method is different from get_flags_ - def get_flags(tag, flags): - # note that self.flag_vars. calls self.get_flags_() - flags.extend(getattr(self.flag_vars, tag)) - this_get = getattr(self, 'get_flags_' + tag) - for name, c, flagvar in [('f77', f77, f77flags), - ('f90', f90, f90flags), - ('f90', fix, fixflags)]: - t = '%s_%s' % (tag, name) - if c and this_get is not getattr(self, 'get_flags_' + t): - flagvar.extend(getattr(self.flag_vars, t)) - if not noopt: - get_flags('opt', oflags) - if not noarch: - get_flags('arch', aflags) - if debug: - get_flags('debug', dflags) - - fflags = self.flag_vars.flags + dflags + oflags + aflags - - if f77: - self.set_commands(compiler_f77=f77+f77flags+fflags) - if f90: - self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags) - if fix: - self.set_commands(compiler_fix=fix+fixflags+fflags) - - - #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS - linker_so = self.linker_so - if linker_so: - linker_so_flags = self.flag_vars.linker_so - if sys.platform.startswith('aix'): - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - if sys.platform.startswith('os400'): - from distutils.sysconfig import get_config_var - python_config = get_config_var('LIBPL') - ld_so_aix = os.path.join(python_config, 'ld_so_aix') - python_exp = os.path.join(python_config, 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - self.set_commands(linker_so=linker_so+linker_so_flags) - - linker_exe = self.linker_exe - if linker_exe: - linker_exe_flags = self.flag_vars.linker_exe - self.set_commands(linker_exe=linker_exe+linker_exe_flags) - - ar = self.command_vars.archiver - if ar: - arflags = self.flag_vars.ar - self.set_commands(archiver=[ar]+arflags) - - self.set_library_dirs(self.get_library_dirs()) - self.set_libraries(self.get_libraries()) - - def dump_properties(self): - """Print out the attributes of a compiler instance.""" - props = [] - for key in list(self.executables.keys()) + \ - ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch']: - if hasattr(self, key): - v = getattr(self, key) - props.append((key, None, '= '+repr(v))) - props.sort() - - pretty_printer = FancyGetopt(props) - for l in pretty_printer.generate_help("%s instance properties:" \ - % (self.__class__.__name__)): - if l[:4]==' --': - l = ' ' + l[4:] - print(l) - - ################### - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile 'src' to product 'obj'.""" - src_flags = {} - if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ - and not has_f90_header(src): - flavor = ':f77' - compiler = self.compiler_f77 - src_flags = get_f77flags(src) - extra_compile_args = self.extra_f77_compile_args or [] - elif is_free_format(src): - flavor = ':f90' - compiler = self.compiler_f90 - if compiler is None: - raise DistutilsExecError('f90 not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - else: - flavor = ':fix' - compiler = self.compiler_fix - if compiler is None: - raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - if self.object_switch[-1]==' ': - o_args = [self.object_switch.strip(), obj] - else: - o_args = [self.object_switch.strip()+obj] - - assert self.compile_switch.strip() - s_args = [self.compile_switch, src] - - if extra_compile_args: - log.info('extra %s options: %r' \ - % (flavor[1:], ' '.join(extra_compile_args))) - - extra_flags = src_flags.get(self.compiler_type, []) - if extra_flags: - log.info('using compile options from source: %r' \ - % ' '.join(extra_flags)) - - command = compiler + cc_args + extra_flags + s_args + o_args \ - + extra_postargs + extra_compile_args - - display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, - src) - try: - self.spawn(command, display=display) - except DistutilsExecError as e: - msg = str(e) - raise CompileError(msg) from None - - def module_options(self, module_dirs, module_build_dir): - options = [] - if self.module_dir_switch is not None: - if self.module_dir_switch[-1]==' ': - options.extend([self.module_dir_switch.strip(), module_build_dir]) - else: - options.append(self.module_dir_switch.strip()+module_build_dir) - else: - print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) - print('XXX: Fix module_dir_switch for ', self.__class__.__name__) - if self.module_include_switch is not None: - for d in [module_build_dir]+module_dirs: - options.append('%s%s' % (self.module_include_switch, d)) - else: - print('XXX: module_dirs=%r option ignored' % (module_dirs)) - print('XXX: Fix module_include_switch for ', self.__class__.__name__) - return options - - def library_option(self, lib): - return "-l" + lib - def library_dir_option(self, dir): - return "-L" + dir - - def link(self, target_desc, objects, - output_filename, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - libraries, library_dirs, runtime_library_dirs = \ - self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, - libraries) - if is_string(output_dir): - output_filename = os.path.join(output_dir, output_filename) - elif output_dir is not None: - raise TypeError("'output_dir' must be a string or None") - - if self._need_link(objects, output_filename): - if self.library_switch[-1]==' ': - o_args = [self.library_switch.strip(), output_filename] - else: - o_args = [self.library_switch.strip()+output_filename] - - if is_string(self.objects): - ld_args = objects + [self.objects] - else: - ld_args = objects + self.objects - ld_args = ld_args + lib_opts + o_args - if debug: - ld_args[:0] = ['-g'] - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) - if target_desc == CCompiler.EXECUTABLE: - linker = self.linker_exe[:] - else: - linker = self.linker_so[:] - command = linker + ld_args - try: - self.spawn(command) - except DistutilsExecError as e: - msg = str(e) - raise LinkError(msg) from None - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def _environment_hook(self, name, hook_name): - if hook_name is None: - return None - if is_string(hook_name): - if hook_name.startswith('self.'): - hook_name = hook_name[5:] - hook = getattr(self, hook_name) - return hook() - elif hook_name.startswith('exe.'): - hook_name = hook_name[4:] - var = self.executables[hook_name] - if var: - return var[0] - else: - return None - elif hook_name.startswith('flags.'): - hook_name = hook_name[6:] - hook = getattr(self, 'get_flags_' + hook_name) - return hook() - else: - return hook_name() - - def can_ccompiler_link(self, ccompiler): - """ - Check if the given C compiler can link objects produced by - this compiler. - """ - return True - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - - Parameters - ---------- - objects : list - List of object files to include. - output_dir : str - Output directory to place generated object files. - extra_dll_dir : str - Output directory to place extra DLL files that need to be - included on Windows. - - Returns - ------- - converted_objects : list of str - List of converted object files. - Note that the number of output files is not necessarily - the same as inputs. - - """ - raise NotImplementedError() - - ## class FCompiler - -_default_compilers = ( - # sys.platform mappings - ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', - 'intelvem', 'intelem', 'flang')), - ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), - ('linux.*', ('arm', 'gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', - 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', - 'pathf95', 'nagfor', 'fujitsu')), - ('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu', - 'g95', 'pg')), - ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), - ('irix.*', ('mips', 'gnu', 'gnu95',)), - ('aix.*', ('ibm', 'gnu', 'gnu95',)), - # os.name mappings - ('posix', ('gnu', 'gnu95',)), - ('nt', ('gnu', 'gnu95',)), - ('mac', ('gnu95', 'gnu', 'pg')), - ) - -fcompiler_class = None -fcompiler_aliases = None - -def load_all_fcompiler_classes(): - """Cache all the FCompiler classes found in modules in the - numpy.distutils.fcompiler package. - """ - from glob import glob - global fcompiler_class, fcompiler_aliases - if fcompiler_class is not None: - return - pys = os.path.join(os.path.dirname(__file__), '*.py') - fcompiler_class = {} - fcompiler_aliases = {} - for fname in glob(pys): - module_name, ext = os.path.splitext(os.path.basename(fname)) - module_name = 'numpy.distutils.fcompiler.' + module_name - __import__ (module_name) - module = sys.modules[module_name] - if hasattr(module, 'compilers'): - for cname in module.compilers: - klass = getattr(module, cname) - desc = (klass.compiler_type, klass, klass.description) - fcompiler_class[klass.compiler_type] = desc - for alias in klass.compiler_aliases: - if alias in fcompiler_aliases: - raise ValueError("alias %r defined for both %s and %s" - % (alias, klass.__name__, - fcompiler_aliases[alias][1].__name__)) - fcompiler_aliases[alias] = desc - -def _find_existing_fcompiler(compiler_types, - osname=None, platform=None, - requiref90=False, - c_compiler=None): - from numpy.distutils.core import get_distribution - dist = get_distribution(always=True) - for compiler_type in compiler_types: - v = None - try: - c = new_fcompiler(plat=platform, compiler=compiler_type, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if requiref90 and c.compiler_f90 is None: - v = None - new_compiler = c.suggested_f90_compiler - if new_compiler: - log.warn('Trying %r compiler as suggested by %r ' - 'compiler for f90 support.' % (compiler_type, - new_compiler)) - c = new_fcompiler(plat=platform, compiler=new_compiler, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if v is not None: - compiler_type = new_compiler - if requiref90 and c.compiler_f90 is None: - raise ValueError('%s does not support compiling f90 codes, ' - 'skipping.' % (c.__class__.__name__)) - except DistutilsModuleError: - log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) - except CompilerNotFound: - log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) - if v is not None: - return compiler_type - return None - -def available_fcompilers_for_platform(osname=None, platform=None): - if osname is None: - osname = os.name - if platform is None: - platform = sys.platform - matching_compiler_types = [] - for pattern, compiler_type in _default_compilers: - if re.match(pattern, platform) or re.match(pattern, osname): - for ct in compiler_type: - if ct not in matching_compiler_types: - matching_compiler_types.append(ct) - if not matching_compiler_types: - matching_compiler_types.append('gnu') - return matching_compiler_types - -def get_default_fcompiler(osname=None, platform=None, requiref90=False, - c_compiler=None): - """Determine the default Fortran compiler to use for the given - platform.""" - matching_compiler_types = available_fcompilers_for_platform(osname, - platform) - log.info("get_default_fcompiler: matching types: '%s'", - matching_compiler_types) - compiler_type = _find_existing_fcompiler(matching_compiler_types, - osname=osname, - platform=platform, - requiref90=requiref90, - c_compiler=c_compiler) - return compiler_type - -# Flag to avoid rechecking for Fortran compiler every time -failed_fcompilers = set() - -def new_fcompiler(plat=None, - compiler=None, - verbose=0, - dry_run=0, - force=0, - requiref90=False, - c_compiler = None): - """Generate an instance of some FCompiler subclass for the supplied - platform/compiler combination. - """ - global failed_fcompilers - fcompiler_key = (plat, compiler) - if fcompiler_key in failed_fcompilers: - return None - - load_all_fcompiler_classes() - if plat is None: - plat = os.name - if compiler is None: - compiler = get_default_fcompiler(plat, requiref90=requiref90, - c_compiler=c_compiler) - if compiler in fcompiler_class: - module_name, klass, long_description = fcompiler_class[compiler] - elif compiler in fcompiler_aliases: - module_name, klass, long_description = fcompiler_aliases[compiler] - else: - msg = "don't know how to compile Fortran code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler." % compiler - msg = msg + " Supported compilers are: %s)" \ - % (','.join(fcompiler_class.keys())) - log.warn(msg) - failed_fcompilers.add(fcompiler_key) - return None - - compiler = klass(verbose=verbose, dry_run=dry_run, force=force) - compiler.c_compiler = c_compiler - return compiler - -def show_fcompilers(dist=None): - """Print list of available compilers (used by the "--help-fcompiler" - option to "config_fc"). - """ - if dist is None: - from distutils.dist import Distribution - from numpy.distutils.command.config_compiler import config_fc - dist = Distribution() - dist.script_name = os.path.basename(sys.argv[0]) - dist.script_args = ['config_fc'] + sys.argv[1:] - try: - dist.script_args.remove('--help-fcompiler') - except ValueError: - pass - dist.cmdclass['config_fc'] = config_fc - dist.parse_config_files() - dist.parse_command_line() - compilers = [] - compilers_na = [] - compilers_ni = [] - if not fcompiler_class: - load_all_fcompiler_classes() - platform_compilers = available_fcompilers_for_platform() - for compiler in platform_compilers: - v = None - log.set_verbosity(-2) - try: - c = new_fcompiler(compiler=compiler, verbose=dist.verbose) - c.customize(dist) - v = c.get_version() - except (DistutilsModuleError, CompilerNotFound) as e: - log.debug("show_fcompilers: %s not found" % (compiler,)) - log.debug(repr(e)) - - if v is None: - compilers_na.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2])) - else: - c.dump_properties() - compilers.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2] + ' (%s)' % v)) - - compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) - compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) - for fc in compilers_ni] - - compilers.sort() - compilers_na.sort() - compilers_ni.sort() - pretty_printer = FancyGetopt(compilers) - pretty_printer.print_help("Fortran compilers found:") - pretty_printer = FancyGetopt(compilers_na) - pretty_printer.print_help("Compilers available for this " - "platform, but not found:") - if compilers_ni: - pretty_printer = FancyGetopt(compilers_ni) - pretty_printer.print_help("Compilers not available on this platform:") - print("For compiler details, run 'config_fc --verbose' setup command.") - - -def dummy_fortran_file(): - fo, name = make_temp_file(suffix='.f') - fo.write(" subroutine dummy()\n end\n") - fo.close() - return name[:-2] - - -_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search -_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search -_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search -_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match - -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - with open(file, encoding='latin1') as f: - line = f.readline() - n = 10000 # the number of non-comment lines to scan for hints - if _has_f_header(line) or _has_fix_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n>0 and line: - line = line.rstrip() - if line and line[0]!='!': - n -= 1 - if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': - result = 1 - break - line = f.readline() - return result - -def has_f90_header(src): - with open(src, encoding='latin1') as f: - line = f.readline() - return _has_f90_header(line) or _has_fix_header(line) - -_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)', re.I) -def get_f77flags(src): - """ - Search the first 20 lines of fortran 77 code for line pattern - `CF77FLAGS()=` - Return a dictionary {:}. - """ - flags = {} - with open(src, encoding='latin1') as f: - i = 0 - for line in f: - i += 1 - if i>20: break - m = _f77flags_re.match(line) - if not m: continue - fcname = m.group('fcname').strip() - fflags = m.group('fflags').strip() - flags[fcname] = split_quoted(fflags) - return flags - -# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags - -if __name__ == '__main__': - show_fcompilers() diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py deleted file mode 100644 index e013def5d1a4..000000000000 --- a/numpy/distutils/fcompiler/absoft.py +++ /dev/null @@ -1,158 +0,0 @@ - -# Absoft Corporation ceased operations on 12/31/2022. -# Thus, all links to are invalid. - -# Notes: -# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py -# generated extension modules (works for f2py v2.45.241_1936 and up) -import os - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from numpy.distutils.misc_util import cyg2win32 - -compilers = ['AbsoftFCompiler'] - -class AbsoftFCompiler(FCompiler): - - compiler_type = 'absoft' - description = 'Absoft Corp Fortran Compiler' - #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' - version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler'\ - r'|Absoft Fortran Compiler Version'\ - r'|Copyright Absoft Corporation.*?Version))'\ - r' (?P[^\s*,]*)(.*?Absoft Corp|)' - - # on windows: f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 - - # samt5735(8)$ f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 - # Note that fink installs g77 as f77, so need to use f90 for detection. - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : ["f77"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - if os.name=='nt': - library_switch = '/out:' #No space after /out:! - - module_dir_switch = None - module_include_switch = '-p' - - def update_executables(self): - f = cyg2win32(dummy_fortran_file()) - self.executables['version_cmd'] = ['', '-V', '-c', - f+'.f', '-o', f+'.o'] - - def get_flags_linker_so(self): - if os.name=='nt': - opt = ['/dll'] - # The "-K shared" switches are being left in for pre-9.0 versions - # of Absoft though I don't think versions earlier than 9 can - # actually be used to build shared libraries. In fact, version - # 8 of Absoft doesn't recognize "-K shared" and will fail. - elif self.get_version() >= '9.0': - opt = ['-shared'] - else: - opt = ["-K", "shared"] - return opt - - def library_dir_option(self, dir): - if os.name=='nt': - return ['-link', '/PATH:%s' % (dir)] - return "-L" + dir - - def library_option(self, lib): - if os.name=='nt': - return '%s.lib' % (lib) - return "-l" + lib - - def get_library_dirs(self): - opt = FCompiler.get_library_dirs(self) - d = os.environ.get('ABSOFT') - if d: - if self.get_version() >= '10.0': - # use shared libraries, the static libraries were not compiled -fPIC - prefix = 'sh' - else: - prefix = '' - if cpu.is_64bit(): - suffix = '64' - else: - suffix = '' - opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) - return opt - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - if self.get_version() >= '11.0': - opt.extend(['af90math', 'afio', 'af77math', 'amisc']) - elif self.get_version() >= '10.0': - opt.extend(['af90math', 'afio', 'af77math', 'U77']) - elif self.get_version() >= '8.0': - opt.extend(['f90math', 'fio', 'f77math', 'U77']) - else: - opt.extend(['fio', 'f90math', 'fmath', 'U77']) - if os.name =='nt': - opt.append('COMDLG32') - return opt - - def get_flags(self): - opt = FCompiler.get_flags(self) - if os.name != 'nt': - opt.extend(['-s']) - if self.get_version(): - if self.get_version()>='8.2': - opt.append('-fpic') - return opt - - def get_flags_f77(self): - opt = FCompiler.get_flags_f77(self) - opt.extend(['-N22', '-N90', '-N110']) - v = self.get_version() - if os.name == 'nt': - if v and v>='8.0': - opt.extend(['-f', '-N15']) - else: - opt.append('-f') - if v: - if v<='4.6': - opt.append('-B108') - else: - # Though -N15 is undocumented, it works with - # Absoft 8.0 on Linux - opt.append('-N15') - return opt - - def get_flags_f90(self): - opt = FCompiler.get_flags_f90(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - if self.get_version(): - if self.get_version()>'4.6': - opt.extend(["-YDEALLOC=ALL"]) - return opt - - def get_flags_fix(self): - opt = FCompiler.get_flags_fix(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - opt.extend(["-f", "fixed"]) - return opt - - def get_flags_opt(self): - opt = ['-O'] - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='absoft').get_version()) diff --git a/numpy/distutils/fcompiler/arm.py b/numpy/distutils/fcompiler/arm.py deleted file mode 100644 index 3eb7e9af9c8c..000000000000 --- a/numpy/distutils/fcompiler/arm.py +++ /dev/null @@ -1,71 +0,0 @@ -import sys - -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from sys import platform -from os.path import join, dirname, normpath - -compilers = ['ArmFlangCompiler'] - -import functools - -class ArmFlangCompiler(FCompiler): - compiler_type = 'arm' - description = 'Arm Compiler' - version_pattern = r'\s*Arm.*version (?P[\d.-]+).*' - - ar_exe = 'lib.exe' - possible_executables = ['armflang'] - - executables = { - 'version_cmd': ["", "--version"], - 'compiler_f77': ["armflang", "-fPIC"], - 'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"], - 'compiler_f90': ["armflang", "-fPIC"], - 'linker_so': ["armflang", "-fPIC", "-shared"], - 'archiver': ["ar", "-cr"], - 'ranlib': None - } - - pic_flags = ["-fPIC", "-DPIC"] - c_compiler = 'arm' - module_dir_switch = '-module ' # Don't remove ending space! - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - opt.extend(['flang', 'flangrti', 'ompstub']) - return opt - - @functools.lru_cache(maxsize=128) - def get_library_dirs(self): - """List of compiler library directories.""" - opt = FCompiler.get_library_dirs(self) - flang_dir = dirname(self.executables['compiler_f77'][0]) - opt.append(normpath(join(flang_dir, '..', 'lib'))) - - return opt - - def get_flags(self): - return [] - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - return ['-O3'] - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - return '-Wl,-rpath=%s' % dir - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='armflang').get_version()) - diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py deleted file mode 100644 index 01314c136acf..000000000000 --- a/numpy/distutils/fcompiler/compaq.py +++ /dev/null @@ -1,120 +0,0 @@ - -#http://www.compaq.com/fortran/docs/ -import os -import sys - -from numpy.distutils.fcompiler import FCompiler -from distutils.errors import DistutilsPlatformError - -compilers = ['CompaqFCompiler'] -if os.name != 'posix' or sys.platform[:6] == 'cygwin' : - # Otherwise we'd get a false positive on posix systems with - # case-insensitive filesystems (like darwin), because we'll pick - # up /bin/df - compilers.append('CompaqVisualFCompiler') - -class CompaqFCompiler(FCompiler): - - compiler_type = 'compaq' - description = 'Compaq Fortran Compiler' - version_pattern = r'Compaq Fortran (?P[^\s]*).*' - - if sys.platform[:5]=='linux': - fc_exe = 'fort' - else: - fc_exe = 'f90' - - executables = { - 'version_cmd' : ['', "-version"], - 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], - 'compiler_fix' : [fc_exe, "-fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = '-module ' # not tested - module_include_switch = '-I' - - def get_flags(self): - return ['-assume no2underscore', '-nomixed_str_len_arg'] - def get_flags_debug(self): - return ['-g', '-check bounds'] - def get_flags_opt(self): - return ['-O4', '-align dcommons', '-assume bigarrays', - '-assume nozsize', '-math_library fast'] - def get_flags_arch(self): - return ['-arch host', '-tune host'] - def get_flags_linker_so(self): - if sys.platform[:5]=='linux': - return ['-shared'] - return ['-shared', '-Wl,-expect_unresolved,*'] - -class CompaqVisualFCompiler(FCompiler): - - compiler_type = 'compaqv' - description = 'DIGITAL or Compaq Visual Fortran Compiler' - version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler' - r' Version (?P[^\s]*).*') - - compile_switch = '/compile_only' - object_switch = '/object:' - library_switch = '/OUT:' #No space after /OUT:! - - static_lib_extension = ".lib" - static_lib_format = "%s%s" - module_dir_switch = '/module:' - module_include_switch = '/I' - - ar_exe = 'lib.exe' - fc_exe = 'DF' - - if sys.platform=='win32': - from numpy.distutils.msvccompiler import MSVCCompiler - - try: - m = MSVCCompiler() - m.initialize() - ar_exe = m.lib - except DistutilsPlatformError: - pass - except AttributeError as e: - if '_MSVCCompiler__root' in str(e): - print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e)) - else: - raise - except OSError as e: - if not "vcvarsall.bat" in str(e): - print("Unexpected OSError in", __file__) - raise - except ValueError as e: - if not "'path'" in str(e): - print("Unexpected ValueError in", __file__) - raise - - executables = { - 'version_cmd' : ['', "/what"], - 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], - 'compiler_fix' : [fc_exe, "/fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : [ar_exe, "/OUT:"], - 'ranlib' : None - } - - def get_flags(self): - return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', - '/names:lowercase', '/assume:underscore'] - def get_flags_opt(self): - return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] - def get_flags_arch(self): - return ['/threads'] - def get_flags_debug(self): - return ['/debug'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='compaq').get_version()) diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py deleted file mode 100644 index ecd4d9989279..000000000000 --- a/numpy/distutils/fcompiler/environment.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -from distutils.dist import Distribution - -__metaclass__ = type - -class EnvironmentConfig: - def __init__(self, distutils_section='ALL', **kw): - self._distutils_section = distutils_section - self._conf_keys = kw - self._conf = None - self._hook_handler = None - - def dump_variable(self, name): - conf_desc = self._conf_keys[name] - hook, envvar, confvar, convert, append = conf_desc - if not convert: - convert = lambda x : x - print('%s.%s:' % (self._distutils_section, name)) - v = self._hook_handler(name, hook) - print(' hook : %s' % (convert(v),)) - if envvar: - v = os.environ.get(envvar, None) - print(' environ: %s' % (convert(v),)) - if confvar and self._conf: - v = self._conf.get(confvar, (None, None))[1] - print(' config : %s' % (convert(v),)) - - def dump_variables(self): - for name in self._conf_keys: - self.dump_variable(name) - - def __getattr__(self, name): - try: - conf_desc = self._conf_keys[name] - except KeyError: - raise AttributeError( - f"'EnvironmentConfig' object has no attribute '{name}'" - ) from None - - return self._get_var(name, conf_desc) - - def get(self, name, default=None): - try: - conf_desc = self._conf_keys[name] - except KeyError: - return default - var = self._get_var(name, conf_desc) - if var is None: - var = default - return var - - def _get_var(self, name, conf_desc): - hook, envvar, confvar, convert, append = conf_desc - if convert is None: - convert = lambda x: x - var = self._hook_handler(name, hook) - if envvar is not None: - envvar_contents = os.environ.get(envvar) - if envvar_contents is not None: - envvar_contents = convert(envvar_contents) - if var and append: - if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1': - var.extend(envvar_contents) - else: - # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0 - # to keep old (overwrite flags rather than append to - # them) behavior - var = envvar_contents - else: - var = envvar_contents - if confvar is not None and self._conf: - if confvar in self._conf: - source, confvar_contents = self._conf[confvar] - var = convert(confvar_contents) - return var - - - def clone(self, hook_handler): - ec = self.__class__(distutils_section=self._distutils_section, - **self._conf_keys) - ec._hook_handler = hook_handler - return ec - - def use_distribution(self, dist): - if isinstance(dist, Distribution): - self._conf = dist.get_option_dict(self._distutils_section) - else: - self._conf = dist diff --git a/numpy/distutils/fcompiler/fujitsu.py b/numpy/distutils/fcompiler/fujitsu.py deleted file mode 100644 index ddce67456d18..000000000000 --- a/numpy/distutils/fcompiler/fujitsu.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -fujitsu - -Supports Fujitsu compiler function. -This compiler is developed by Fujitsu and is used in A64FX on Fugaku. -""" -from numpy.distutils.fcompiler import FCompiler - -compilers = ['FujitsuFCompiler'] - -class FujitsuFCompiler(FCompiler): - compiler_type = 'fujitsu' - description = 'Fujitsu Fortran Compiler' - - possible_executables = ['frt'] - version_pattern = r'frt \(FRT\) (?P[a-z\d.]+)' - # $ frt --version - # frt (FRT) x.x.x yyyymmdd - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["frt", "-Fixed"], - 'compiler_fix' : ["frt", "-Fixed"], - 'compiler_f90' : ["frt"], - 'linker_so' : ["frt", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-KPIC'] - module_dir_switch = '-M' - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - def runtime_library_dir_option(self, dir): - return f'-Wl,-rpath={dir}' - def get_libraries(self): - return ['fj90f', 'fj90i', 'fjsrcinfo'] - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler('fujitsu').get_version()) diff --git a/numpy/distutils/fcompiler/g95.py b/numpy/distutils/fcompiler/g95.py deleted file mode 100644 index e109a972a872..000000000000 --- a/numpy/distutils/fcompiler/g95.py +++ /dev/null @@ -1,42 +0,0 @@ -# http://g95.sourceforge.net/ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['G95FCompiler'] - -class G95FCompiler(FCompiler): - compiler_type = 'g95' - description = 'G95 Fortran Compiler' - -# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95!) May 22 2006) - - version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["g95", "-ffixed-form"], - 'compiler_fix' : ["g95", "-ffixed-form"], - 'compiler_f90' : ["g95"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fpic'] - module_dir_switch = '-fmod=' - module_include_switch = '-I' - - def get_flags(self): - return ['-fno-second-underscore'] - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler('g95').get_version()) diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py deleted file mode 100644 index 474ee35945b2..000000000000 --- a/numpy/distutils/fcompiler/gnu.py +++ /dev/null @@ -1,555 +0,0 @@ -import re -import os -import sys -import warnings -import platform -import tempfile -import hashlib -import base64 -import subprocess -from subprocess import Popen, PIPE, STDOUT -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.fcompiler import FCompiler -from distutils.version import LooseVersion - -compilers = ['GnuFCompiler', 'Gnu95FCompiler'] - -TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") - -# XXX: handle cross compilation - - -def is_win64(): - return sys.platform == "win32" and platform.architecture()[0] == "64bit" - - -class GnuFCompiler(FCompiler): - compiler_type = 'gnu' - compiler_aliases = ('g77', ) - description = 'GNU Fortran 77 compiler' - - def gnu_version_match(self, version_string): - """Handle the different versions of GNU fortran compilers""" - # Strip warning(s) that may be emitted by gfortran - while version_string.startswith('gfortran: warning'): - version_string =\ - version_string[version_string.find('\n') + 1:].strip() - - # Gfortran versions from after 2010 will output a simple string - # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older - # gfortrans may still return long version strings (``-dumpversion`` was - # an alias for ``--version``) - if len(version_string) <= 20: - # Try to find a valid version string - m = re.search(r'([0-9.]+)', version_string) - if m: - # g77 provides a longer version string that starts with GNU - # Fortran - if version_string.startswith('GNU Fortran'): - return ('g77', m.group(1)) - - # gfortran only outputs a version string such as #.#.#, so check - # if the match is at the start of the string - elif m.start() == 0: - return ('gfortran', m.group(1)) - else: - # Output probably from --version, try harder: - m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) - if m: - return ('gfortran', m.group(1)) - m = re.search( - r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string) - if m: - v = m.group(1) - if v.startswith(('0', '2', '3')): - # the '0' is for early g77's - return ('g77', v) - else: - # at some point in the 4.x series, the ' 95' was dropped - # from the version string - return ('gfortran', v) - - # If still nothing, raise an error to make the problem easy to find. - err = 'A valid Fortran version was not found in this string:\n' - raise ValueError(err + version_string) - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'g77': - return None - return v[1] - - possible_executables = ['g77', 'f77'] - executables = { - 'version_cmd' : [None, "-dumpversion"], - 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], - 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes - 'compiler_fix' : None, - 'linker_so' : [None, "-g", "-Wall"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-g", "-Wall"] - } - module_dir_switch = None - module_include_switch = None - - # Cygwin: f771: warning: -fPIC ignored for target (all code is - # position independent) - if os.name != 'nt' and sys.platform != 'cygwin': - pic_flags = ['-fPIC'] - - # use -mno-cygwin for g77 when Python is not Cygwin-Python - if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: - executables[key].append('-mno-cygwin') - - g2c = 'g2c' - suggested_f90_compiler = 'gnu95' - - def get_flags_linker_so(self): - opt = self.linker_so[1:] - if sys.platform == 'darwin': - target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) - # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value - # and leave it alone. But, distutils will complain if the - # environment's value is different from the one in the Python - # Makefile used to build Python. We let distutils handle this - # error checking. - if not target: - # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, - # we try to get it first from sysconfig and then - # fall back to setting it to 10.9 This is a reasonable default - # even when using the official Python dist and those derived - # from it. - import sysconfig - target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') - if not target: - target = '10.9' - s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}' - warnings.warn(s, stacklevel=2) - os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target) - opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) - else: - opt.append("-shared") - if sys.platform.startswith('sunos'): - # SunOS often has dynamically loaded symbols defined in the - # static library libg2c.a The linker doesn't like this. To - # ignore the problem, use the -mimpure-text flag. It isn't - # the safest thing, but seems to work. 'man gcc' says: - # ".. Instead of using -mimpure-text, you should compile all - # source code with -fpic or -fPIC." - opt.append('-mimpure-text') - return opt - - def get_libgcc_dir(self): - try: - output = subprocess.check_output(self.compiler_f77 + - ['-print-libgcc-file-name']) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - return os.path.dirname(output) - return None - - def get_libgfortran_dir(self): - if sys.platform[:5] == 'linux': - libgfortran_name = 'libgfortran.so' - elif sys.platform == 'darwin': - libgfortran_name = 'libgfortran.dylib' - else: - libgfortran_name = None - - libgfortran_dir = None - if libgfortran_name: - find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] - try: - output = subprocess.check_output( - self.compiler_f77 + find_lib_arg) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - libgfortran_dir = os.path.dirname(output) - return libgfortran_dir - - def get_library_dirs(self): - opt = [] - if sys.platform[:5] != 'linux': - d = self.get_libgcc_dir() - if d: - # if windows and not cygwin, libg2c lies in a different folder - if sys.platform == 'win32' and not d.startswith('/usr/lib'): - d = os.path.normpath(d) - path = os.path.join(d, "lib%s.a" % self.g2c) - if not os.path.exists(path): - root = os.path.join(d, *((os.pardir, ) * 4)) - d2 = os.path.abspath(os.path.join(root, 'lib')) - path = os.path.join(d2, "lib%s.a" % self.g2c) - if os.path.exists(path): - opt.append(d2) - opt.append(d) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = [] - d = self.get_libgcc_dir() - if d is not None: - g2c = self.g2c + '-pic' - f = self.static_lib_format % (g2c, self.static_lib_extension) - if not os.path.isfile(os.path.join(d, f)): - g2c = self.g2c - else: - g2c = self.g2c - - if g2c is not None: - opt.append(g2c) - c_compiler = self.c_compiler - if sys.platform == 'win32' and c_compiler and \ - c_compiler.compiler_type == 'msvc': - opt.append('gcc') - if sys.platform == 'darwin': - opt.append('cc_dynamic') - return opt - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - v = self.get_version() - if v and v <= '3.3.3': - # With this compiler version building Fortran BLAS/LAPACK - # with -O3 caused failures in lib.lapack heevr,syevr tests. - opt = ['-O2'] - else: - opt = ['-O3'] - opt.append('-funroll-loops') - return opt - - def _c_arch_flags(self): - """ Return detected arch flags from CFLAGS """ - import sysconfig - try: - cflags = sysconfig.get_config_vars()['CFLAGS'] - except KeyError: - return [] - arch_re = re.compile(r"-arch\s+(\w+)") - arch_flags = [] - for arch in arch_re.findall(cflags): - arch_flags += ['-arch', arch] - return arch_flags - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - if sys.platform == 'win32' or sys.platform == 'cygwin': - # Linux/Solaris/Unix support RPATH, Windows does not - raise NotImplementedError - - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - if sys.platform == 'darwin': - return f'-Wl,-rpath,{dir}' - elif sys.platform.startswith(('aix', 'os400')): - # AIX RPATH is called LIBPATH - return f'-Wl,-blibpath:{dir}' - else: - return f'-Wl,-rpath={dir}' - - -class Gnu95FCompiler(GnuFCompiler): - compiler_type = 'gnu95' - compiler_aliases = ('gfortran', ) - description = 'GNU Fortran 95 compiler' - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'gfortran': - return None - v = v[1] - if LooseVersion(v) >= "4": - # gcc-4 series releases do not support -mno-cygwin option - pass - else: - # use -mno-cygwin flag for gfortran when Python is not - # Cygwin-Python - if sys.platform == 'win32': - for key in [ - 'version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe' - ]: - self.executables[key].append('-mno-cygwin') - return v - - possible_executables = ['gfortran', 'f95'] - executables = { - 'version_cmd' : ["", "-dumpversion"], - 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", - "-fno-second-underscore"], - 'compiler_f90' : [None, "-Wall", "-g", - "-fno-second-underscore"], - 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", - "-fno-second-underscore"], - 'linker_so' : ["", "-Wall", "-g"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-Wall"] - } - - module_dir_switch = '-J' - module_include_switch = '-I' - - if sys.platform.startswith(('aix', 'os400')): - executables['linker_so'].append('-lpthread') - if platform.architecture()[0][:2] == '64': - for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']: - executables[key].append('-maix64') - - g2c = 'gfortran' - - def _universal_flags(self, cmd): - """Return a list of -arch flags for every supported architecture.""" - if not sys.platform == 'darwin': - return [] - arch_flags = [] - # get arches the C compiler gets. - c_archs = self._c_arch_flags() - if "i386" in c_archs: - c_archs[c_archs.index("i386")] = "i686" - # check the arches the Fortran compiler supports, and compare with - # arch flags from C compiler - for arch in ["ppc", "i686", "x86_64", "ppc64", "s390x"]: - if _can_target(cmd, arch) and arch in c_archs: - arch_flags.extend(["-arch", arch]) - return arch_flags - - def get_flags(self): - flags = GnuFCompiler.get_flags(self) - arch_flags = self._universal_flags(self.compiler_f90) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_flags_linker_so(self): - flags = GnuFCompiler.get_flags_linker_so(self) - arch_flags = self._universal_flags(self.linker_so) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_library_dirs(self): - opt = GnuFCompiler.get_library_dirs(self) - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - target = self.get_target() - if target: - d = os.path.normpath(self.get_libgcc_dir()) - root = os.path.join(d, *((os.pardir, ) * 4)) - path = os.path.join(root, "lib") - mingwdir = os.path.normpath(path) - if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): - opt.append(mingwdir) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = GnuFCompiler.get_libraries(self) - if sys.platform == 'darwin': - opt.remove('cc_dynamic') - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - if "gcc" in opt: - i = opt.index("gcc") - opt.insert(i + 1, "mingwex") - opt.insert(i + 1, "mingw32") - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - return [] - else: - pass - return opt - - def get_target(self): - try: - p = subprocess.Popen( - self.compiler_f77 + ['-v'], - stdin=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - stdout, stderr = p.communicate() - output = (stdout or b"") + (stderr or b"") - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - m = TARGET_R.search(output) - if m: - return m.group(1) - return "" - - def _hash_files(self, filenames): - h = hashlib.sha1() - for fn in filenames: - with open(fn, 'rb') as f: - while True: - block = f.read(131072) - if not block: - break - h.update(block) - text = base64.b32encode(h.digest()) - text = text.decode('ascii') - return text.rstrip('=') - - def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, - chained_dlls, is_archive): - """Create a wrapper shared library for the given objects - - Return an MSVC-compatible lib - """ - - c_compiler = self.c_compiler - if c_compiler.compiler_type != "msvc": - raise ValueError("This method only supports MSVC") - - object_hash = self._hash_files(list(objects) + list(chained_dlls)) - - if is_win64(): - tag = 'win_amd64' - else: - tag = 'win32' - - basename = 'lib' + os.path.splitext( - os.path.basename(objects[0]))[0][:8] - root_name = basename + '.' + object_hash + '.gfortran-' + tag - dll_name = root_name + '.dll' - def_name = root_name + '.def' - lib_name = root_name + '.lib' - dll_path = os.path.join(extra_dll_dir, dll_name) - def_path = os.path.join(output_dir, def_name) - lib_path = os.path.join(output_dir, lib_name) - - if os.path.isfile(lib_path): - # Nothing to do - return lib_path, dll_path - - if is_archive: - objects = (["-Wl,--whole-archive"] + list(objects) + - ["-Wl,--no-whole-archive"]) - self.link_shared_object( - objects, - dll_name, - output_dir=extra_dll_dir, - extra_postargs=list(chained_dlls) + [ - '-Wl,--allow-multiple-definition', - '-Wl,--output-def,' + def_path, - '-Wl,--export-all-symbols', - '-Wl,--enable-auto-import', - '-static', - '-mlong-double-64', - ]) - - # No PowerPC! - if is_win64(): - specifier = '/MACHINE:X64' - else: - specifier = '/MACHINE:X86' - - # MSVC specific code - lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] - if not c_compiler.initialized: - c_compiler.initialize() - c_compiler.spawn([c_compiler.lib] + lib_args) - - return lib_path, dll_path - - def can_ccompiler_link(self, compiler): - # MSVC cannot link objects compiled by GNU fortran - return compiler.compiler_type not in ("msvc", ) - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - """ - if self.c_compiler.compiler_type == "msvc": - # Compile a DLL and return the lib for the DLL as - # the object. Also keep track of previous DLLs that - # we have compiled so that we can link against them. - - # If there are .a archives, assume they are self-contained - # static libraries, and build separate DLLs for each - archives = [] - plain_objects = [] - for obj in objects: - if obj.lower().endswith('.a'): - archives.append(obj) - else: - plain_objects.append(obj) - - chained_libs = [] - chained_dlls = [] - for archive in archives[::-1]: - lib, dll = self._link_wrapper_lib( - [archive], - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=True) - chained_libs.insert(0, lib) - chained_dlls.insert(0, dll) - - if not plain_objects: - return chained_libs - - lib, dll = self._link_wrapper_lib( - plain_objects, - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=False) - return [lib] + chained_libs - else: - raise ValueError("Unsupported C compiler") - - -def _can_target(cmd, arch): - """Return true if the architecture supports the -arch flag""" - newcmd = cmd[:] - fid, filename = tempfile.mkstemp(suffix=".f") - os.close(fid) - try: - d = os.path.dirname(filename) - output = os.path.splitext(filename)[0] + ".o" - try: - newcmd.extend(["-arch", arch, "-c", filename]) - p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) - p.communicate() - return p.returncode == 0 - finally: - if os.path.exists(output): - os.remove(output) - finally: - os.remove(filename) - - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - - print(customized_fcompiler('gnu').get_version()) - try: - print(customized_fcompiler('g95').get_version()) - except Exception as e: - print(e) diff --git a/numpy/distutils/fcompiler/hpux.py b/numpy/distutils/fcompiler/hpux.py deleted file mode 100644 index 09e6483bf5ad..000000000000 --- a/numpy/distutils/fcompiler/hpux.py +++ /dev/null @@ -1,41 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['HPUXFCompiler'] - -class HPUXFCompiler(FCompiler): - - compiler_type = 'hpux' - description = 'HP Fortran 90 Compiler' - version_pattern = r'HP F90 (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["f90", "+version"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["ld", "-b"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['+Z'] - def get_flags(self): - return self.pic_flags + ['+ppu', '+DD64'] - def get_flags_opt(self): - return ['-O3'] - def get_libraries(self): - return ['m'] - def get_library_dirs(self): - opt = ['/usr/lib/hpux64'] - return opt - def get_version(self, force=0, ok_status=[256, 0, 1]): - # XXX status==256 may indicate 'unrecognized option' or - # 'no input file'. So, version_cmd needs more work. - return FCompiler.get_version(self, force, ok_status) - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(10) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='hpux').get_version()) diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py deleted file mode 100644 index 29927518c703..000000000000 --- a/numpy/distutils/fcompiler/ibm.py +++ /dev/null @@ -1,97 +0,0 @@ -import os -import re -import sys -import subprocess - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.misc_util import make_temp_file -from distutils import log - -compilers = ['IBMFCompiler'] - -class IBMFCompiler(FCompiler): - compiler_type = 'ibm' - description = 'IBM XL Fortran Compiler' - version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' - #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 - - executables = { - 'version_cmd' : ["", "-qversion"], - 'compiler_f77' : ["xlf"], - 'compiler_fix' : ["xlf90", "-qfixed"], - 'compiler_f90' : ["xlf90"], - 'linker_so' : ["xlf95"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_version(self,*args,**kwds): - version = FCompiler.get_version(self,*args,**kwds) - - if version is None and sys.platform.startswith('aix'): - # use lslpp to find out xlf version - lslpp = find_executable('lslpp') - xlf = find_executable('xlf') - if os.path.exists(xlf) and os.path.exists(lslpp): - try: - o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) - except (OSError, subprocess.CalledProcessError): - pass - else: - m = re.search(r'xlfcmp:(?P\d+([.]\d+)+)', o) - if m: version = m.group('version') - - xlf_dir = '/etc/opt/ibmcmp/xlf' - if version is None and os.path.isdir(xlf_dir): - # linux: - # If the output of xlf does not contain version info - # (that's the case with xlf 8.1, for instance) then - # let's try another method: - l = sorted(os.listdir(xlf_dir)) - l.reverse() - l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] - if l: - from distutils.version import LooseVersion - self.version = version = LooseVersion(l[0]) - return version - - def get_flags(self): - return ['-qextname'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - opt = [] - if sys.platform=='darwin': - opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') - else: - opt.append('-bshared') - version = self.get_version(ok_status=[0, 40]) - if version is not None: - if sys.platform.startswith('aix'): - xlf_cfg = '/etc/xlf.cfg' - else: - xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version - fo, new_cfg = make_temp_file(suffix='_xlf.cfg') - log.info('Creating '+new_cfg) - with open(xlf_cfg) as fi: - crt1_match = re.compile(r'\s*crt\s*=\s*(?P.*)/crt1.o').match - for line in fi: - m = crt1_match(line) - if m: - fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) - else: - fo.write(line) - fo.close() - opt.append('-F'+new_cfg) - return opt - - def get_flags_opt(self): - return ['-O3'] - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler(compiler='ibm').get_version()) diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py deleted file mode 100644 index 1d6065904110..000000000000 --- a/numpy/distutils/fcompiler/intel.py +++ /dev/null @@ -1,211 +0,0 @@ -# http://developer.intel.com/software/products/compilers/flin/ -import sys - -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file - -compilers = ['IntelFCompiler', 'IntelVisualFCompiler', - 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', - 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] - - -def intel_version_match(type): - # Match against the important stuff in the version string - return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) - - -class BaseIntelFCompiler(FCompiler): - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '-FI', '-V', '-c', - f + '.f', '-o', f + '.o'] - - def runtime_library_dir_option(self, dir): - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - return '-Wl,-rpath=%s' % dir - - -class IntelFCompiler(BaseIntelFCompiler): - - compiler_type = 'intel' - compiler_aliases = ('ifort',) - description = 'Intel Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - possible_executables = ['ifort', 'ifc'] - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : [None, "-72", "-w90", "-w95"], - 'compiler_f90' : [None], - 'compiler_fix' : [None, "-FI"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_free(self): - return ['-FR'] - - def get_flags(self): - return ['-fPIC'] - - def get_flags_opt(self): # Scipy test failures with -O2 - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - return ['-fp-model', 'strict', '-O1', - '-assume', 'minus0', '-{}'.format(mpopt)] - - def get_flags_arch(self): - return [] - - def get_flags_linker_so(self): - opt = FCompiler.get_flags_linker_so(self) - v = self.get_version() - if v and v >= '8.0': - opt.append('-nofor_main') - if sys.platform == 'darwin': - # Here, it's -dynamiclib - try: - idx = opt.index('-shared') - opt.remove('-shared') - except ValueError: - idx = 0 - opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] - return opt - - -class IntelItaniumFCompiler(IntelFCompiler): - compiler_type = 'intele' - compiler_aliases = () - description = 'Intel Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium|IA-64') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - -class IntelEM64TFCompiler(IntelFCompiler): - compiler_type = 'intelem' - compiler_aliases = () - description = 'Intel Fortran Compiler for 64-bit apps' - - version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - -# Is there no difference in the version string between the above compilers -# and the Visual compilers? - - -class IntelVisualFCompiler(BaseIntelFCompiler): - compiler_type = 'intelv' - description = 'Intel Visual Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '/FI', '/c', - f + '.f', '/o', f + '.o'] - - ar_exe = 'lib.exe' - possible_executables = ['ifort', 'ifl'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None], - 'compiler_fix' : [None], - 'compiler_f90' : [None], - 'linker_so' : [None], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - compile_switch = '/c ' - object_switch = '/Fo' # No space after /Fo! - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '/module:' # No space after /module: - module_include_switch = '/I' - - def get_flags(self): - opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', - '/assume:underscore', '/fpp'] - return opt - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['/4Yb', '/d2'] - - def get_flags_opt(self): - return ['/O1', '/assume:minus0'] # Scipy test failures with /O2 - - def get_flags_arch(self): - return ["/arch:IA32", "/QaxSSE3"] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - - -class IntelItaniumVisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelev' - description = 'Intel Visual Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium') - - possible_executables = ['efl'] # XXX this is a wild guess - ar_exe = IntelVisualFCompiler.ar_exe - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI", "-4L72", "-w"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - -class IntelEM64VisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelvem' - description = 'Intel Visual Fortran Compiler for 64-bit apps' - - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - - def get_flags_arch(self): - return [] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='intel').get_version()) diff --git a/numpy/distutils/fcompiler/lahey.py b/numpy/distutils/fcompiler/lahey.py deleted file mode 100644 index e925838268b8..000000000000 --- a/numpy/distutils/fcompiler/lahey.py +++ /dev/null @@ -1,45 +0,0 @@ -import os - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['LaheyFCompiler'] - -class LaheyFCompiler(FCompiler): - - compiler_type = 'lahey' - description = 'Lahey/Fujitsu Fortran 95 Compiler' - version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["lf95", "--fix"], - 'compiler_fix' : ["lf95", "--fix"], - 'compiler_f90' : ["lf95"], - 'linker_so' : ["lf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g', '--chk', '--chkglobal'] - def get_library_dirs(self): - opt = [] - d = os.environ.get('LAHEY') - if d: - opt.append(os.path.join(d, 'lib')) - return opt - def get_libraries(self): - opt = [] - opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='lahey').get_version()) diff --git a/numpy/distutils/fcompiler/mips.py b/numpy/distutils/fcompiler/mips.py deleted file mode 100644 index a0973804571b..000000000000 --- a/numpy/distutils/fcompiler/mips.py +++ /dev/null @@ -1,54 +0,0 @@ -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler - -compilers = ['MIPSFCompiler'] - -class MIPSFCompiler(FCompiler): - - compiler_type = 'mips' - description = 'MIPSpro Fortran Compiler' - version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["", "-version"], - 'compiler_f77' : ["f77", "-f77"], - 'compiler_fix' : ["f90", "-fixedform"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["f90", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['-KPIC'] - - def get_flags(self): - return self.pic_flags + ['-n32'] - def get_flags_opt(self): - return ['-O3'] - def get_flags_arch(self): - opt = [] - for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): - if getattr(cpu, 'is_IP%s'%a)(): - opt.append('-TARG:platform=IP%s' % a) - break - return opt - def get_flags_arch_f77(self): - r = None - if cpu.is_r10000(): r = 10000 - elif cpu.is_r12000(): r = 12000 - elif cpu.is_r8000(): r = 8000 - elif cpu.is_r5000(): r = 5000 - elif cpu.is_r4000(): r = 4000 - if r is not None: - return ['r%s' % (r)] - return [] - def get_flags_arch_f90(self): - r = self.get_flags_arch_f77() - if r: - r[0] = '-' + r[0] - return r - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='mips').get_version()) diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py deleted file mode 100644 index 939201f44e02..000000000000 --- a/numpy/distutils/fcompiler/nag.py +++ /dev/null @@ -1,87 +0,0 @@ -import sys -import re -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NAGFCompiler', 'NAGFORCompiler'] - -class BaseNAGFCompiler(FCompiler): - version_pattern = r'NAG.* Release (?P[^(\s]*)' - - def version_match(self, version_string): - m = re.search(self.version_pattern, version_string) - if m: - return m.group('version') - else: - return None - - def get_flags_linker_so(self): - return ["-Wl,-shared"] - def get_flags_opt(self): - return ['-O4'] - def get_flags_arch(self): - return [] - -class NAGFCompiler(BaseNAGFCompiler): - - compiler_type = 'nag' - description = 'NAGWare Fortran 95 Compiler' - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f95", "-fixed"], - 'compiler_fix' : ["f95", "-fixed"], - 'compiler_f90' : ["f95"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform == 'darwin': - return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return BaseNAGFCompiler.get_flags_linker_so(self) - def get_flags_arch(self): - version = self.get_version() - if version and version < '5.1': - return ['-target=native'] - else: - return BaseNAGFCompiler.get_flags_arch(self) - def get_flags_debug(self): - return ['-g', '-gline', '-g90', '-nan', '-C'] - -class NAGFORCompiler(BaseNAGFCompiler): - - compiler_type = 'nagfor' - description = 'NAG Fortran Compiler' - - executables = { - 'version_cmd' : ["nagfor", "-V"], - 'compiler_f77' : ["nagfor", "-fixed"], - 'compiler_fix' : ["nagfor", "-fixed"], - 'compiler_f90' : ["nagfor"], - 'linker_so' : ["nagfor"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform == 'darwin': - return ['-unsharedrts', - '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return BaseNAGFCompiler.get_flags_linker_so(self) - def get_flags_debug(self): - version = self.get_version() - if version and version > '6.1': - return ['-g', '-u', '-nan', '-C=all', '-thread_safe', - '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] - else: - return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - compiler = customized_fcompiler(compiler='nagfor') - print(compiler.get_version()) - print(compiler.get_flags_debug()) diff --git a/numpy/distutils/fcompiler/none.py b/numpy/distutils/fcompiler/none.py deleted file mode 100644 index ef411fffc7cb..000000000000 --- a/numpy/distutils/fcompiler/none.py +++ /dev/null @@ -1,28 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils import customized_fcompiler - -compilers = ['NoneFCompiler'] - -class NoneFCompiler(FCompiler): - - compiler_type = 'none' - description = 'Fake Fortran compiler' - - executables = {'compiler_f77': None, - 'compiler_f90': None, - 'compiler_fix': None, - 'linker_so': None, - 'linker_exe': None, - 'archiver': None, - 'ranlib': None, - 'version_cmd': None, - } - - def find_executables(self): - pass - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - print(customized_fcompiler(compiler='none').get_version()) diff --git a/numpy/distutils/fcompiler/nv.py b/numpy/distutils/fcompiler/nv.py deleted file mode 100644 index f518c8b0027a..000000000000 --- a/numpy/distutils/fcompiler/nv.py +++ /dev/null @@ -1,53 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NVHPCFCompiler'] - -class NVHPCFCompiler(FCompiler): - """ NVIDIA High Performance Computing (HPC) SDK Fortran Compiler - - https://developer.nvidia.com/hpc-sdk - - Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers, - https://www.pgroup.com/index.htm. - See also `numpy.distutils.fcompiler.pg`. - """ - - compiler_type = 'nv' - description = 'NVIDIA HPC SDK' - version_pattern = r'\s*(nvfortran|.+ \(aka nvfortran\)) (?P[\d.-]+).*' - - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["nvfortran"], - 'compiler_fix': ["nvfortran", "-Mfixed"], - 'compiler_f90': ["nvfortran"], - 'linker_so': [""], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = ['-fpic'] - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform', '-Mnosecond_underscore'] - return self.pic_flags + opt - - def get_flags_opt(self): - return ['-fast'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - return ["-shared", '-fpic'] - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='nv').get_version()) diff --git a/numpy/distutils/fcompiler/pathf95.py b/numpy/distutils/fcompiler/pathf95.py deleted file mode 100644 index 0768cb12e87a..000000000000 --- a/numpy/distutils/fcompiler/pathf95.py +++ /dev/null @@ -1,33 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['PathScaleFCompiler'] - -class PathScaleFCompiler(FCompiler): - - compiler_type = 'pathf95' - description = 'PathScale Fortran Compiler' - version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' - - executables = { - 'version_cmd' : ["pathf95", "-version"], - 'compiler_f77' : ["pathf95", "-fixedform"], - 'compiler_fix' : ["pathf95", "-fixedform"], - 'compiler_f90' : ["pathf95"], - 'linker_so' : ["pathf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='pathf95').get_version()) diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py deleted file mode 100644 index 72442c4fec61..000000000000 --- a/numpy/distutils/fcompiler/pg.py +++ /dev/null @@ -1,128 +0,0 @@ -# http://www.pgroup.com -import sys - -from numpy.distutils.fcompiler import FCompiler -from sys import platform -from os.path import join, dirname, normpath - -compilers = ['PGroupFCompiler', 'PGroupFlangCompiler'] - - -class PGroupFCompiler(FCompiler): - - compiler_type = 'pg' - description = 'Portland Group Fortran Compiler' - version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' - - if platform == 'darwin': - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran", "-dynamiclib"], - 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"], - 'compiler_f90': ["pgfortran", "-dynamiclib"], - 'linker_so': ["libtool"], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = [''] - else: - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran"], - 'compiler_fix': ["pgfortran", "-Mfixed"], - 'compiler_f90': ["pgfortran"], - 'linker_so': [""], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = ['-fpic'] - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform', '-Mnosecond_underscore'] - return self.pic_flags + opt - - def get_flags_opt(self): - return ['-fast'] - - def get_flags_debug(self): - return ['-g'] - - if platform == 'darwin': - def get_flags_linker_so(self): - return ["-dynamic", '-undefined', 'dynamic_lookup'] - - else: - def get_flags_linker_so(self): - return ["-shared", '-fpic'] - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - - -import functools - -class PGroupFlangCompiler(FCompiler): - compiler_type = 'flang' - description = 'Portland Group Fortran LLVM Compiler' - version_pattern = r'\s*(flang|clang) version (?P[\d.-]+).*' - - ar_exe = 'lib.exe' - possible_executables = ['flang'] - - executables = { - 'version_cmd': ["", "--version"], - 'compiler_f77': ["flang"], - 'compiler_fix': ["flang"], - 'compiler_f90': ["flang"], - 'linker_so': [None], - 'archiver': [ar_exe, "/verbose", "/OUT:"], - 'ranlib': None - } - - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '-module ' # Don't remove ending space! - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - opt.extend(['flang', 'flangrti', 'ompstub']) - return opt - - @functools.lru_cache(maxsize=128) - def get_library_dirs(self): - """List of compiler library directories.""" - opt = FCompiler.get_library_dirs(self) - flang_dir = dirname(self.executables['compiler_f77'][0]) - opt.append(normpath(join(flang_dir, '..', 'lib'))) - - return opt - - def get_flags(self): - return [] - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - return ['-O3'] - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - if 'flang' in sys.argv: - print(customized_fcompiler(compiler='flang').get_version()) - else: - print(customized_fcompiler(compiler='pg').get_version()) diff --git a/numpy/distutils/fcompiler/sun.py b/numpy/distutils/fcompiler/sun.py deleted file mode 100644 index d039f0b25705..000000000000 --- a/numpy/distutils/fcompiler/sun.py +++ /dev/null @@ -1,51 +0,0 @@ -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler - -compilers = ['SunFCompiler'] - -class SunFCompiler(FCompiler): - - compiler_type = 'sun' - description = 'Sun or Forte Fortran 95 Compiler' - # ex: - # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 - version_match = simple_version_match( - start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90", "-fixed"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["", "-Bdynamic", "-G"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = '-moddir=' - module_include_switch = '-M' - pic_flags = ['-xcode=pic32'] - - def get_flags_f77(self): - ret = ["-ftrap=%none"] - if (self.get_version() or '') >= '7': - ret.append("-f77") - else: - ret.append("-fixed") - return ret - def get_opt(self): - return ['-fast', '-dalign'] - def get_arch(self): - return ['-xtarget=generic'] - def get_libraries(self): - opt = [] - opt.extend(['fsu', 'sunmath', 'mvec']) - return opt - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='sun').get_version()) diff --git a/numpy/distutils/fcompiler/vast.py b/numpy/distutils/fcompiler/vast.py deleted file mode 100644 index 92a1647ba437..000000000000 --- a/numpy/distutils/fcompiler/vast.py +++ /dev/null @@ -1,52 +0,0 @@ -import os - -from numpy.distutils.fcompiler.gnu import GnuFCompiler - -compilers = ['VastFCompiler'] - -class VastFCompiler(GnuFCompiler): - compiler_type = 'vast' - compiler_aliases = () - description = 'Pacific-Sierra Research Fortran 90 Compiler' - version_pattern = (r'\s*Pacific-Sierra Research vf90 ' - r'(Personal|Professional)\s+(?P[^\s]*)') - - # VAST f90 does not support -o with -c. So, object files are created - # to the current directory and then moved to build directory - object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' - - executables = { - 'version_cmd' : ["vf90", "-v"], - 'compiler_f77' : ["g77"], - 'compiler_fix' : ["f90", "-Wv,-ya"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def find_executables(self): - pass - - def get_version_cmd(self): - f90 = self.compiler_f90[0] - d, b = os.path.split(f90) - vf90 = os.path.join(d, 'v'+b) - return vf90 - - def get_flags_arch(self): - vast_version = self.get_version() - gnu = GnuFCompiler() - gnu.customize(None) - self.version = gnu.get_version() - opt = GnuFCompiler.get_flags_arch(self) - self.version = vast_version - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='vast').get_version()) diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py deleted file mode 100644 index 90d1f4c384c7..000000000000 --- a/numpy/distutils/from_template.py +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/env python3 -""" - -process_file(filename) - - takes templated file .xxx.src and produces .xxx file where .xxx - is .pyf .f90 or .f using the following template rules: - - '<..>' denotes a template. - - All function and subroutine blocks in a source file with names that - contain '<..>' will be replicated according to the rules in '<..>'. - - The number of comma-separated words in '<..>' will determine the number of - replicates. - - '<..>' may have two different forms, named and short. For example, - - named: - where anywhere inside a block '

' will be replaced with - 'd', 's', 'z', and 'c' for each replicate of the block. - - <_c> is already defined: <_c=s,d,c,z> - <_t> is already defined: <_t=real,double precision,complex,double complex> - - short: - , a short form of the named, useful when no

appears inside - a block. - - In general, '<..>' contains a comma separated list of arbitrary - expressions. If these expression must contain a comma|leftarrow|rightarrow, - then prepend the comma|leftarrow|rightarrow with a backslash. - - If an expression matches '\\' then it will be replaced - by -th expression. - - Note that all '<..>' forms in a block must have the same number of - comma-separated entries. - - Predefined named template rules: - - - - - - -""" -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) -routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) -function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) - -def parse_structure(astr): - """ Return a list of tuples for each function or subroutine each - tuple is the start and end of a subroutine or function to be - expanded. - """ - - spanlist = [] - ind = 0 - while True: - m = routine_start_re.search(astr, ind) - if m is None: - break - start = m.start() - if function_start_re.match(astr, start, m.end()): - while True: - i = astr.rfind('\n', ind, start) - if i==-1: - break - start = i - if astr[i:i+7]!='\n $': - break - start += 1 - m = routine_end_re.search(astr, m.end()) - ind = end = m and m.end()-1 or len(astr) - spanlist.append((start, end)) - return spanlist - -template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") -named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") -list_re = re.compile(r"<\s*((.*?))\s*>") - -def find_repl_patterns(astr): - reps = named_re.findall(astr) - names = {} - for rep in reps: - name = rep[0].strip() or unique_key(names) - repl = rep[1].replace(r'\,', '@comma@') - thelist = conv(repl) - names[name] = thelist - return names - -def find_and_remove_repl_patterns(astr): - names = find_repl_patterns(astr) - astr = re.subn(named_re, '', astr)[0] - return astr, names - -item_re = re.compile(r"\A\\(?P\d+)\Z") -def conv(astr): - b = astr.split(',') - l = [x.strip() for x in b] - for i in range(len(l)): - m = item_re.match(l[i]) - if m: - j = int(m.group('index')) - l[i] = l[j] - return ','.join(l) - -def unique_key(adict): - """ Obtain a unique key given a dictionary.""" - allkeys = list(adict.keys()) - done = False - n = 1 - while not done: - newkey = '__l%s' % (n) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') -def expand_sub(substr, names): - substr = substr.replace(r'\>', '@rightarrow@') - substr = substr.replace(r'\<', '@leftarrow@') - lnames = find_repl_patterns(substr) - substr = named_re.sub(r"<\1>", substr) # get rid of definition templates - - def listrepl(mobj): - thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) - if template_name_re.match(thelist): - return "<%s>" % (thelist) - name = None - for key in lnames.keys(): # see if list is already in dictionary - if lnames[key] == thelist: - name = key - if name is None: # this list is not in the dictionary yet - name = unique_key(lnames) - lnames[name] = thelist - return "<%s>" % name - - substr = list_re.sub(listrepl, substr) # convert all lists to named templates - # newnames are constructed as needed - - numsubs = None - base_rule = None - rules = {} - for r in template_re.findall(substr): - if r not in rules: - thelist = lnames.get(r, names.get(r, None)) - if thelist is None: - raise ValueError('No replicates found for <%s>' % (r)) - if r not in names and not thelist.startswith('_'): - names[r] = thelist - rule = [i.replace('@comma@', ',') for i in thelist.split(',')] - num = len(rule) - - if numsubs is None: - numsubs = num - rules[r] = rule - base_rule = r - elif num == numsubs: - rules[r] = rule - else: - print("Mismatch in number of replacements (base <%s=%s>)" - " for <%s=%s>. Ignoring." % - (base_rule, ','.join(rules[base_rule]), r, thelist)) - if not rules: - return substr - - def namerepl(mobj): - name = mobj.group(1) - return rules.get(name, (k+1)*[name])[k] - - newstr = '' - for k in range(numsubs): - newstr += template_re.sub(namerepl, substr) + '\n\n' - - newstr = newstr.replace('@rightarrow@', '>') - newstr = newstr.replace('@leftarrow@', '<') - return newstr - -def process_str(allstr): - newstr = allstr - writestr = '' - - struct = parse_structure(newstr) - - oldend = 0 - names = {} - names.update(_special_names) - for sub in struct: - cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) - writestr += cleanedstr - names.update(defs) - writestr += expand_sub(newstr[sub[0]:sub[1]], names) - oldend = sub[1] - writestr += newstr[oldend:] - - return writestr - -include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - with open(source) as fid: - lines = [] - for line in fid: - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d, fn) - if os.path.isfile(fn): - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - return lines - -def process_file(source): - lines = resolve_includes(source) - return process_str(''.join(lines)) - -_special_names = find_repl_patterns(''' -<_c=s,d,c,z> -<_t=real,double precision,complex,double complex> - - - - - -''') - -def main(): - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file, 'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname, 'w') - - allstr = fid.read() - writestr = process_str(allstr) - outfile.write(writestr) - - -if __name__ == "__main__": - main() diff --git a/numpy/distutils/fujitsuccompiler.py b/numpy/distutils/fujitsuccompiler.py deleted file mode 100644 index c25900b34f1d..000000000000 --- a/numpy/distutils/fujitsuccompiler.py +++ /dev/null @@ -1,28 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class FujitsuCCompiler(UnixCCompiler): - - """ - Fujitsu compiler. - """ - - compiler_type = 'fujitsu' - cc_exe = 'fcc' - cxx_exe = 'FCC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables( - compiler=cc_compiler + - ' -O3 -Nclang -fPIC', - compiler_so=cc_compiler + - ' -O3 -Nclang -fPIC', - compiler_cxx=cxx_compiler + - ' -O3 -Nclang -fPIC', - linker_exe=cc_compiler + - ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared', - linker_so=cc_compiler + - ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared' - ) diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py deleted file mode 100644 index 77fb39889a29..000000000000 --- a/numpy/distutils/intelccompiler.py +++ /dev/null @@ -1,106 +0,0 @@ -import platform - -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.ccompiler import simple_version_match -if platform.system() == 'Windows': - from numpy.distutils.msvc9compiler import MSVCCompiler - - -class IntelCCompiler(UnixCCompiler): - """A modified Intel compiler compatible with a GCC-built Python.""" - compiler_type = 'intel' - cc_exe = 'icc' - cc_args = 'fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -class IntelItaniumCCompiler(IntelCCompiler): - compiler_type = 'intele' - cc_exe = 'icc' - - -class IntelEM64TCCompiler(UnixCCompiler): - """ - A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. - """ - compiler_type = 'intelem' - cc_exe = 'icc -m64' - cc_args = '-fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -if platform.system() == 'Windows': - class IntelCCompilerW(MSVCCompiler): - """ - A modified Intel compiler compatible with an MSVC-built Python. - """ - compiler_type = 'intelw' - compiler_cxx = 'icl' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?32,') - self.__version = version_match - - def initialize(self, plat_name=None): - MSVCCompiler.initialize(self, plat_name) - self.cc = self.find_exe('icl.exe') - self.lib = self.find_exe('xilib') - self.linker = self.find_exe('xilink') - self.compile_options = ['/nologo', '/O3', '/MD', '/W3', - '/Qstd=c99'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', - '/Qstd=c99', '/Z7', '/D_DEBUG'] - - class IntelEM64TCCompilerW(IntelCCompilerW): - """ - A modified Intel x86_64 compiler compatible with - a 64bit MSVC-built Python. - """ - compiler_type = 'intelemw' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - self.__version = version_match diff --git a/numpy/distutils/lib2def.py b/numpy/distutils/lib2def.py deleted file mode 100644 index 851682c63310..000000000000 --- a/numpy/distutils/lib2def.py +++ /dev/null @@ -1,116 +0,0 @@ -import re -import sys -import subprocess - -__doc__ = """This module generates a DEF file from the symbols in -an MSVC-compiled DLL import library. It correctly discriminates between -data and functions. The data is collected from the output of the program -nm(1). - -Usage: - python lib2def.py [libname.lib] [output.def] -or - python lib2def.py [libname.lib] > output.def - -libname.lib defaults to python.lib and output.def defaults to stdout - -Author: Robert Kern -Last Update: April 30, 1999 -""" - -__version__ = '0.1a' - -py_ver = "%d%d" % tuple(sys.version_info[:2]) - -DEFAULT_NM = ['nm', '-Cs'] - -DEF_HEADER = """LIBRARY python%s.dll -;CODE PRELOAD MOVEABLE DISCARDABLE -;DATA PRELOAD SINGLE - -EXPORTS -""" % py_ver -# the header of the DEF file - -FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) -DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) - -def parse_cmd(): - """Parses the command-line arguments. - -libfile, deffile = parse_cmd()""" - if len(sys.argv) == 3: - if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': - libfile, deffile = sys.argv[1:] - elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': - deffile, libfile = sys.argv[1:] - else: - print("I'm assuming that your first argument is the library") - print("and the second is the DEF file.") - elif len(sys.argv) == 2: - if sys.argv[1][-4:] == '.def': - deffile = sys.argv[1] - libfile = 'python%s.lib' % py_ver - elif sys.argv[1][-4:] == '.lib': - deffile = None - libfile = sys.argv[1] - else: - libfile = 'python%s.lib' % py_ver - deffile = None - return libfile, deffile - -def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True): - """Returns the output of nm_cmd via a pipe. - -nm_output = getnm(nm_cmd = 'nm -Cs py_lib')""" - p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, text=True) - nm_output, nm_err = p.communicate() - if p.returncode != 0: - raise RuntimeError('failed to run "%s": "%s"' % ( - ' '.join(nm_cmd), nm_err)) - return nm_output - -def parse_nm(nm_output): - """Returns a tuple of lists: dlist for the list of data -symbols and flist for the list of function symbols. - -dlist, flist = parse_nm(nm_output)""" - data = DATA_RE.findall(nm_output) - func = FUNC_RE.findall(nm_output) - - flist = [] - for sym in data: - if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): - flist.append(sym) - - dlist = [] - for sym in data: - if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): - dlist.append(sym) - - dlist.sort() - flist.sort() - return dlist, flist - -def output_def(dlist, flist, header, file = sys.stdout): - """Outputs the final DEF file to a file defaulting to stdout. - -output_def(dlist, flist, header, file = sys.stdout)""" - for data_sym in dlist: - header = header + '\t%s DATA\n' % data_sym - header = header + '\n' # blank line - for func_sym in flist: - header = header + '\t%s\n' % func_sym - file.write(header) - -if __name__ == '__main__': - libfile, deffile = parse_cmd() - if deffile is None: - deffile = sys.stdout - else: - deffile = open(deffile, 'w') - nm_cmd = DEFAULT_NM + [str(libfile)] - nm_output = getnm(nm_cmd, shell=False) - dlist, flist = parse_nm(nm_output) - output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/numpy/distutils/line_endings.py b/numpy/distutils/line_endings.py deleted file mode 100644 index 686e5ebd937f..000000000000 --- a/numpy/distutils/line_endings.py +++ /dev/null @@ -1,77 +0,0 @@ -""" Functions for converting from DOS to UNIX line endings - -""" -import os -import re -import sys - - -def dos2unix(file): - "Replace CRLF with LF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - - newdata = re.sub("\r\n", "\n", data) - if newdata != data: - print('dos2unix:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def dos2unix_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - file = dos2unix(full_path) - if file is not None: - modified_files.append(file) - -def dos2unix_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, dos2unix_one_dir, modified_files) - return modified_files -#---------------------------------- - -def unix2dos(file): - "Replace LF with CRLF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - newdata = re.sub("\r\n", "\n", data) - newdata = re.sub("\n", "\r\n", newdata) - if newdata != data: - print('unix2dos:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def unix2dos_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - unix2dos(full_path) - if file is not None: - modified_files.append(file) - -def unix2dos_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, unix2dos_one_dir, modified_files) - return modified_files - -if __name__ == "__main__": - dos2unix_dir(sys.argv[1]) diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py deleted file mode 100644 index 3347f56d6fe9..000000000000 --- a/numpy/distutils/log.py +++ /dev/null @@ -1,111 +0,0 @@ -# Colored log -import sys -from distutils.log import * # noqa: F403 -from distutils.log import Log as old_Log -from distutils.log import _global_log - -from numpy.distutils.misc_util import (red_text, default_text, cyan_text, - green_text, is_sequence, is_string) - - -def _fix_args(args,flag=1): - if is_string(args): - return args.replace('%', '%%') - if flag and is_sequence(args): - return tuple([_fix_args(a, flag=0) for a in args]) - return args - - -class Log(old_Log): - def _log(self, level, msg, args): - if level >= self.threshold: - if args: - msg = msg % _fix_args(args) - if 0: - if msg.startswith('copying ') and msg.find(' -> ') != -1: - return - if msg.startswith('byte-compiling '): - return - print(_global_color_map[level](msg)) - sys.stdout.flush() - - def good(self, msg, *args): - """ - If we log WARN messages, log this message as a 'nice' anti-warn - message. - - """ - if WARN >= self.threshold: - if args: - print(green_text(msg % _fix_args(args))) - else: - print(green_text(msg)) - sys.stdout.flush() - - -_global_log.__class__ = Log - -good = _global_log.good - -def set_threshold(level, force=False): - prev_level = _global_log.threshold - if prev_level > DEBUG or force: - # If we're running at DEBUG, don't change the threshold, as there's - # likely a good reason why we're running at this level. - _global_log.threshold = level - if level <= DEBUG: - info('set_threshold: setting threshold to DEBUG level,' - ' it can be changed only with force argument') - else: - info('set_threshold: not changing threshold from DEBUG level' - ' %s to %s' % (prev_level, level)) - return prev_level - -def get_threshold(): - return _global_log.threshold - -def set_verbosity(v, force=False): - prev_level = _global_log.threshold - if v < 0: - set_threshold(ERROR, force) - elif v == 0: - set_threshold(WARN, force) - elif v == 1: - set_threshold(INFO, force) - elif v >= 2: - set_threshold(DEBUG, force) - return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) - - -_global_color_map = { - DEBUG:cyan_text, - INFO:default_text, - WARN:red_text, - ERROR:red_text, - FATAL:red_text -} - -# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. -set_verbosity(0, force=True) - - -_error = error -_warn = warn -_info = info -_debug = debug - - -def error(msg, *a, **kw): - _error(f"ERROR: {msg}", *a, **kw) - - -def warn(msg, *a, **kw): - _warn(f"WARN: {msg}", *a, **kw) - - -def info(msg, *a, **kw): - _info(f"INFO: {msg}", *a, **kw) - - -def debug(msg, *a, **kw): - _debug(f"DEBUG: {msg}", *a, **kw) diff --git a/numpy/distutils/mingw/gfortran_vs2003_hack.c b/numpy/distutils/mingw/gfortran_vs2003_hack.c deleted file mode 100644 index 485a675d8a1f..000000000000 --- a/numpy/distutils/mingw/gfortran_vs2003_hack.c +++ /dev/null @@ -1,6 +0,0 @@ -int _get_output_format(void) -{ - return 0; -} - -int _imp____lc_codepage = 0; diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py deleted file mode 100644 index 944ba2d03b33..000000000000 --- a/numpy/distutils/mingw32ccompiler.py +++ /dev/null @@ -1,620 +0,0 @@ -""" -Support code for building Python extensions on Windows. - - # NT stuff - # 1. Make sure libpython.a exists for gcc. If not, build it. - # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) - # 3. Force windows to use g77 - -""" -import os -import sys -import subprocess -import re -import textwrap - -# Overwrite certain distutils.ccompiler functions: -import numpy.distutils.ccompiler # noqa: F401 -from numpy.distutils import log -# NT stuff -# 1. Make sure libpython.a exists for gcc. If not, build it. -# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) -# --> this is done in numpy/distutils/ccompiler.py -# 3. Force windows to use g77 - -import distutils.cygwinccompiler -from distutils.unixccompiler import UnixCCompiler - -try: - from distutils.msvccompiler import get_build_version as get_build_msvc_version -except ImportError: - def get_build_msvc_version(): - return None - -from distutils.errors import UnknownFileError -from numpy.distutils.misc_util import (msvc_runtime_library, - msvc_runtime_version, - msvc_runtime_major, - get_build_architecture) - -def get_msvcr_replacement(): - """Replacement for outdated version of get_msvcr from cygwinccompiler""" - msvcr = msvc_runtime_library() - return [] if msvcr is None else [msvcr] - - -# Useful to generate table of symbols from a dll -_START = re.compile(r'\[Ordinal/Name Pointer\] Table') -_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') - -# the same as cygwin plus some additional parameters -class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): - """ A modified MingW32 compiler compatible with an MSVC built Python. - - """ - - compiler_type = 'mingw32' - - def __init__ (self, - verbose=0, - dry_run=0, - force=0): - - distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, - dry_run, force) - - # **changes: eric jones 4/11/01 - # 1. Check for import library on Windows. Build if it doesn't exist. - - build_import_library() - - # Check for custom msvc runtime library on Windows. Build if it doesn't exist. - msvcr_success = build_msvcr_library() - msvcr_dbg_success = build_msvcr_library(debug=True) - if msvcr_success or msvcr_dbg_success: - # add preprocessor statement for using customized msvcr lib - self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') - - # Define the MSVC version as hint for MinGW - msvcr_version = msvc_runtime_version() - if msvcr_version: - self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) - - # MS_WIN64 should be defined when building for amd64 on windows, - # but python headers define it only for MS compilers, which has all - # kind of bad consequences, like using Py_ModuleInit4 instead of - # Py_ModuleInit4_64, etc... So we add it here - if get_build_architecture() == 'AMD64': - self.set_executables( - compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', - compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall ' - '-Wstrict-prototypes', - linker_exe='gcc -g', - linker_so='gcc -g -shared') - else: - self.set_executables( - compiler='gcc -O2 -Wall', - compiler_so='gcc -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ ', - linker_so='g++ -shared') - # added for python2.3 support - # we can't pass it through set_executables because pre 2.2 would fail - self.compiler_cxx = ['g++'] - - # Maybe we should also append -mthreads, but then the finished dlls - # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support - # thread-safe exception handling on `Mingw32') - - # no additional libraries needed - #self.dll_libraries=[] - return - - # __init__ () - - def link(self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - export_symbols = None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - # Include the appropriate MSVC runtime library if Python was built - # with MSVC >= 7.0 (MinGW standard is msvcrt) - runtime_library = msvc_runtime_library() - if runtime_library: - if not libraries: - libraries = [] - libraries.append(runtime_library) - args = (self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - None, #export_symbols, we do this in our def-file - debug, - extra_preargs, - extra_postargs, - build_temp, - target_lang) - func = UnixCCompiler.link - func(*args[:func.__code__.co_argcount]) - return - - def object_filenames (self, - source_filenames, - strip_dir=0, - output_dir=''): - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext (os.path.normcase(src_name)) - - # added these lines to strip off windows drive letters - # without it, .o files are placed next to .c files - # instead of the build directory - drv, base = os.path.splitdrive(base) - if drv: - base = base[1:] - - if ext not in (self.src_extensions + ['.rc', '.res']): - raise UnknownFileError( - "unknown file type '%s' (from '%s')" % \ - (ext, src_name)) - if strip_dir: - base = os.path.basename (base) - if ext == '.res' or ext == '.rc': - # these need to be compiled to object files - obj_names.append (os.path.join (output_dir, - base + ext + self.obj_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - # object_filenames () - - -def find_python_dll(): - # We can't do much here: - # - find it in the virtualenv (sys.prefix) - # - find it in python main dir (sys.base_prefix, if in a virtualenv) - # - in system32, - # - otherwise (Sxs), I don't know how to get it. - stems = [sys.prefix] - if sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - - sub_dirs = ['', 'lib', 'bin'] - # generate possible combinations of directory trees and sub-directories - lib_dirs = [] - for stem in stems: - for folder in sub_dirs: - lib_dirs.append(os.path.join(stem, folder)) - - # add system directory as well - if 'SYSTEMROOT' in os.environ: - lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) - - # search in the file system for possible candidates - major_version, minor_version = tuple(sys.version_info[:2]) - implementation = sys.implementation.name - if implementation == 'cpython': - dllname = f'python{major_version}{minor_version}.dll' - elif implementation == 'pypy': - dllname = f'libpypy{major_version}.{minor_version}-c.dll' - else: - dllname = f'Unknown platform {implementation}' - print("Looking for %s" % dllname) - for folder in lib_dirs: - dll = os.path.join(folder, dllname) - if os.path.exists(dll): - return dll - - raise ValueError("%s not found in %s" % (dllname, lib_dirs)) - -def dump_table(dll): - st = subprocess.check_output(["objdump.exe", "-p", dll]) - return st.split(b'\n') - -def generate_def(dll, dfile): - """Given a dll file location, get all its exported symbols and dump them - into the given def file. - - The .def file will be overwritten""" - dump = dump_table(dll) - for i in range(len(dump)): - if _START.match(dump[i].decode()): - break - else: - raise ValueError("Symbol table not found") - - syms = [] - for j in range(i+1, len(dump)): - m = _TABLE.match(dump[j].decode()) - if m: - syms.append((int(m.group(1).strip()), m.group(2))) - else: - break - - if len(syms) == 0: - log.warn('No symbols found in %s' % dll) - - with open(dfile, 'w') as d: - d.write('LIBRARY %s\n' % os.path.basename(dll)) - d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') - d.write(';DATA PRELOAD SINGLE\n') - d.write('\nEXPORTS\n') - for s in syms: - #d.write('@%d %s\n' % (s[0], s[1])) - d.write('%s\n' % s[1]) - -def find_dll(dll_name): - - arch = {'AMD64' : 'amd64', - 'ARM64' : 'arm64', - 'Intel' : 'x86'}[get_build_architecture()] - - def _find_dll_in_winsxs(dll_name): - # Walk through the WinSxS directory to find the dll. - winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), - 'winsxs') - if not os.path.exists(winsxs_path): - return None - for root, dirs, files in os.walk(winsxs_path): - if dll_name in files and arch in root: - return os.path.join(root, dll_name) - return None - - def _find_dll_in_path(dll_name): - # First, look in the Python directory, then scan PATH for - # the given dll name. - for path in [sys.prefix] + os.environ['PATH'].split(';'): - filepath = os.path.join(path, dll_name) - if os.path.exists(filepath): - return os.path.abspath(filepath) - - return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) - -def build_msvcr_library(debug=False): - if os.name != 'nt': - return False - - # If the version number is None, then we couldn't find the MSVC runtime at - # all, because we are running on a Python distribution which is customed - # compiled; trust that the compiler is the same as the one available to us - # now, and that it is capable of linking with the correct runtime without - # any extra options. - msvcr_ver = msvc_runtime_major() - if msvcr_ver is None: - log.debug('Skip building import library: ' - 'Runtime is not compiled with MSVC') - return False - - # Skip using a custom library for versions < MSVC 8.0 - if msvcr_ver < 80: - log.debug('Skip building msvcr library:' - ' custom functionality not present') - return False - - msvcr_name = msvc_runtime_library() - if debug: - msvcr_name += 'd' - - # Skip if custom library already exists - out_name = "lib%s.a" % msvcr_name - out_file = os.path.join(sys.prefix, 'libs', out_name) - if os.path.isfile(out_file): - log.debug('Skip building msvcr library: "%s" exists' % - (out_file,)) - return True - - # Find the msvcr dll - msvcr_dll_name = msvcr_name + '.dll' - dll_file = find_dll(msvcr_dll_name) - if not dll_file: - log.warn('Cannot build msvcr library: "%s" not found' % - msvcr_dll_name) - return False - - def_name = "lib%s.def" % msvcr_name - def_file = os.path.join(sys.prefix, 'libs', def_name) - - log.info('Building msvcr library: "%s" (from %s)' \ - % (out_file, dll_file)) - - # Generate a symbol definition file from the msvcr dll - generate_def(dll_file, def_file) - - # Create a custom mingw library for the given symbol definitions - cmd = ['dlltool', '-d', def_file, '-l', out_file] - retcode = subprocess.call(cmd) - - # Clean up symbol definitions - os.remove(def_file) - - return (not retcode) - -def build_import_library(): - if os.name != 'nt': - return - - arch = get_build_architecture() - if arch == 'AMD64': - return _build_import_library_amd64() - if arch == 'ARM64': - return _build_import_library_arm64() - elif arch == 'Intel': - return _build_import_library_x86() - else: - raise ValueError("Unhandled arch %s" % arch) - -def _check_for_import_lib(): - """Check if an import library for the Python runtime already exists.""" - major_version, minor_version = tuple(sys.version_info[:2]) - - # patterns for the file name of the library itself - patterns = ['libpython%d%d.a', - 'libpython%d%d.dll.a', - 'libpython%d.%d.dll.a'] - - # directory trees that may contain the library - stems = [sys.prefix] - if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: - stems.append(sys.real_prefix) - - # possible subdirectories within those trees where it is placed - sub_dirs = ['libs', 'lib'] - - # generate a list of candidate locations - candidates = [] - for pat in patterns: - filename = pat % (major_version, minor_version) - for stem_dir in stems: - for folder in sub_dirs: - candidates.append(os.path.join(stem_dir, folder, filename)) - - # test the filesystem to see if we can find any of these - for fullname in candidates: - if os.path.isfile(fullname): - # already exists, in location given - return (True, fullname) - - # needs to be built, preferred location given first - return (False, candidates[0]) - -def _build_import_library_amd64(): - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - # get the runtime dll for which we are building import library - dll_file = find_python_dll() - log.info('Building import library (arch=AMD64): "%s" (from %s)' % - (out_file, dll_file)) - - # generate symbol list from this library - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - generate_def(dll_file, def_file) - - # generate import library from this symbol list - cmd = ['dlltool', '-d', def_file, '-l', out_file] - subprocess.check_call(cmd) - -def _build_import_library_arm64(): - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - # get the runtime dll for which we are building import library - dll_file = find_python_dll() - log.info('Building import library (arch=ARM64): "%s" (from %s)' % - (out_file, dll_file)) - - # generate symbol list from this library - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - generate_def(dll_file, def_file) - - # generate import library from this symbol list - cmd = ['dlltool', '-d', def_file, '-l', out_file] - subprocess.check_call(cmd) - -def _build_import_library_x86(): - """ Build the import libraries for Mingw32-gcc on Windows - """ - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) - lib_file = os.path.join(sys.prefix, 'libs', lib_name) - if not os.path.isfile(lib_file): - # didn't find library file in virtualenv, try base distribution, too, - # and use that instead if found there. for Python 2.7 venvs, the base - # directory is in attribute real_prefix instead of base_prefix. - if hasattr(sys, 'base_prefix'): - base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) - elif hasattr(sys, 'real_prefix'): - base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) - else: - base_lib = '' # os.path.isfile('') == False - - if os.path.isfile(base_lib): - lib_file = base_lib - else: - log.warn('Cannot build import library: "%s" not found', lib_file) - return - log.info('Building import library (ARCH=x86): "%s"', out_file) - - from numpy.distutils import lib2def - - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - nm_output = lib2def.getnm( - lib2def.DEFAULT_NM + [lib_file], shell=False) - dlist, flist = lib2def.parse_nm(nm_output) - with open(def_file, 'w') as fid: - lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid) - - dll_name = find_python_dll () - - cmd = ["dlltool", - "--dllname", dll_name, - "--def", def_file, - "--output-lib", out_file] - status = subprocess.check_output(cmd) - if status: - log.warn('Failed to build import library for gcc. Linking will fail.') - return - -#===================================== -# Dealing with Visual Studio MANIFESTS -#===================================== - -# Functions to deal with visual studio manifests. Manifest are a mechanism to -# enforce strong DLL versioning on windows, and has nothing to do with -# distutils MANIFEST. manifests are XML files with version info, and used by -# the OS loader; they are necessary when linking against a DLL not in the -# system path; in particular, official python 2.6 binary is built against the -# MS runtime 9 (the one from VS 2008), which is not available on most windows -# systems; python 2.6 installer does install it in the Win SxS (Side by side) -# directory, but this requires the manifest for this to work. This is a big -# mess, thanks MS for a wonderful system. - -# XXX: ideally, we should use exactly the same version as used by python. I -# submitted a patch to get this version, but it was only included for python -# 2.6.1 and above. So for versions below, we use a "best guess". -_MSVCRVER_TO_FULLVER = {} -if sys.platform == 'win32': - try: - import msvcrt - # I took one version in my SxS directory: no idea if it is the good - # one, and we can't retrieve it from python - _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" - _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" - # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 - # on Windows XP: - _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" - crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None) - if crt_ver is not None: # Available at least back to Python 3.3 - maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups() - _MSVCRVER_TO_FULLVER[maj + min] = crt_ver - del maj, min - del crt_ver - except ImportError: - # If we are here, means python was not built with MSVC. Not sure what - # to do in that case: manifest building will fail, but it should not be - # used in that case anyway - log.warn('Cannot import msvcrt: using manifest will not be possible') - -def msvc_manifest_xml(maj, min): - """Given a major and minor version of the MSVCR, returns the - corresponding XML file.""" - try: - fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] - except KeyError: - raise ValueError("Version %d,%d of MSVCRT not supported yet" % - (maj, min)) from None - # Don't be fooled, it looks like an XML, but it is not. In particular, it - # should not have any space before starting, and its size should be - # divisible by 4, most likely for alignment constraints when the xml is - # embedded in the binary... - # This template was copied directly from the python 2.6 binary (using - # strings.exe from mingw on python.exe). - template = textwrap.dedent("""\ - - - - - - - - - - - - - - """) - - return template % {'fullver': fullver, 'maj': maj, 'min': min} - -def manifest_rc(name, type='dll'): - """Return the rc file used to generate the res file which will be embedded - as manifest for given manifest file name, of given type ('dll' or - 'exe'). - - Parameters - ---------- - name : str - name of the manifest file to embed - type : str {'dll', 'exe'} - type of the binary which will embed the manifest - - """ - if type == 'dll': - rctype = 2 - elif type == 'exe': - rctype = 1 - else: - raise ValueError("Type %s not supported" % type) - - return """\ -#include "winuser.h" -%d RT_MANIFEST %s""" % (rctype, name) - -def check_embedded_msvcr_match_linked(msver): - """msver is the ms runtime version used for the MANIFEST.""" - # check msvcr major version are the same for linking and - # embedding - maj = msvc_runtime_major() - if maj: - if not maj == int(msver): - raise ValueError( - "Discrepancy between linked msvcr " \ - "(%d) and the one about to be embedded " \ - "(%d)" % (int(msver), maj)) - -def configtest_name(config): - base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) - return os.path.splitext(base)[0] - -def manifest_name(config): - # Get configest name (including suffix) - root = configtest_name(config) - exext = config.compiler.exe_extension - return root + exext + ".manifest" - -def rc_name(config): - # Get configtest name (including suffix) - root = configtest_name(config) - return root + ".rc" - -def generate_manifest(config): - msver = get_build_msvc_version() - if msver is not None: - if msver >= 8: - check_embedded_msvcr_match_linked(msver) - ma_str, mi_str = str(msver).split('.') - # Write the manifest file - manxml = msvc_manifest_xml(int(ma_str), int(mi_str)) - with open(manifest_name(config), "w") as man: - config.temp_files.append(manifest_name(config)) - man.write(manxml) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py deleted file mode 100644 index ca7bcf0fbdd0..000000000000 --- a/numpy/distutils/misc_util.py +++ /dev/null @@ -1,2484 +0,0 @@ -import os -import re -import sys -import copy -import glob -import atexit -import tempfile -import subprocess -import shutil -import multiprocessing -import textwrap -import importlib.util -from threading import local as tlocal -from functools import reduce - -import distutils -from distutils.errors import DistutilsError - -# stores temporary directory of each thread to only create one per thread -_tdata = tlocal() - -# store all created temporary directories so they can be deleted on exit -_tmpdirs = [] -def clean_up_temporary_directory(): - if _tmpdirs is not None: - for d in _tmpdirs: - try: - shutil.rmtree(d) - except OSError: - pass - -atexit.register(clean_up_temporary_directory) - -__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', - 'dict_append', 'appendpath', 'generate_config_py', - 'get_cmd', 'allpath', 'get_mathlibs', - 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', - 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', - 'has_f_sources', 'has_cxx_sources', 'filter_sources', - 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', - 'get_script_files', 'get_lib_source_files', 'get_data_files', - 'dot_join', 'get_frame', 'minrelpath', 'njoin', - 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', - 'get_build_architecture', 'get_info', 'get_pkg_info', - 'get_num_build_jobs', 'sanitize_cxx_flags', - 'exec_mod_from_location'] - -class InstallableLib: - """ - Container to hold information on an installable library. - - Parameters - ---------- - name : str - Name of the installed library. - build_info : dict - Dictionary holding build information. - target_dir : str - Absolute path specifying where to install the library. - - See Also - -------- - Configuration.add_installed_library - - Notes - ----- - The three parameters are stored as attributes with the same names. - - """ - def __init__(self, name, build_info, target_dir): - self.name = name - self.build_info = build_info - self.target_dir = target_dir - - -def get_num_build_jobs(): - """ - Get number of parallel build jobs set by the --parallel command line - argument of setup.py - If the command did not receive a setting the environment variable - NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of - processors on the system, with a maximum of 8 (to prevent - overloading the system if there a lot of CPUs). - - Returns - ------- - out : int - number of parallel jobs that can be run - - """ - from numpy.distutils.core import get_distribution - try: - cpu_count = len(os.sched_getaffinity(0)) - except AttributeError: - cpu_count = multiprocessing.cpu_count() - cpu_count = min(cpu_count, 8) - envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count)) - dist = get_distribution() - # may be None during configuration - if dist is None: - return envjobs - - # any of these three may have the job set, take the largest - cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None), - getattr(dist.get_command_obj('build_ext'), 'parallel', None), - getattr(dist.get_command_obj('build_clib'), 'parallel', None)) - if all(x is None for x in cmdattr): - return envjobs - else: - return max(x for x in cmdattr if x is not None) - -def quote_args(args): - """Quote list of arguments. - - .. deprecated:: 1.22. - """ - import warnings - warnings.warn('"quote_args" is deprecated.', - DeprecationWarning, stacklevel=2) - # don't used _nt_quote_args as it does not check if - # args items already have quotes or not. - args = list(args) - for i in range(len(args)): - a = args[i] - if ' ' in a and a[0] not in '"\'': - args[i] = '"%s"' % (a) - return args - -def allpath(name): - "Convert a /-separated pathname to one using the OS's path separator." - split = name.split('/') - return os.path.join(*split) - -def rel_path(path, parent_path): - """Return path relative to parent_path.""" - # Use realpath to avoid issues with symlinked dirs (see gh-7707) - pd = os.path.realpath(os.path.abspath(parent_path)) - apath = os.path.realpath(os.path.abspath(path)) - if len(apath) < len(pd): - return path - if apath == pd: - return '' - if pd == apath[:len(pd)]: - assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) - path = apath[len(pd)+1:] - return path - -def get_path_from_frame(frame, parent_path=None): - """Return path of the module given a frame object from the call stack. - - Returned path is relative to parent_path when given, - otherwise it is absolute path. - """ - - # First, try to find if the file name is in the frame. - try: - caller_file = eval('__file__', frame.f_globals, frame.f_locals) - d = os.path.dirname(os.path.abspath(caller_file)) - except NameError: - # __file__ is not defined, so let's try __name__. We try this second - # because setuptools spoofs __name__ to be '__main__' even though - # sys.modules['__main__'] might be something else, like easy_install(1). - caller_name = eval('__name__', frame.f_globals, frame.f_locals) - __import__(caller_name) - mod = sys.modules[caller_name] - if hasattr(mod, '__file__'): - d = os.path.dirname(os.path.abspath(mod.__file__)) - else: - # we're probably running setup.py as execfile("setup.py") - # (likely we're building an egg) - d = os.path.abspath('.') - - if parent_path is not None: - d = rel_path(d, parent_path) - - return d or '.' - -def njoin(*path): - """Join two or more pathname components + - - convert a /-separated pathname to one using the OS's path separator. - - resolve `..` and `.` from path. - - Either passing n arguments as in njoin('a','b'), or a sequence - of n names as in njoin(['a','b']) is handled, or a mixture of such arguments. - """ - paths = [] - for p in path: - if is_sequence(p): - # njoin(['a', 'b'], 'c') - paths.append(njoin(*p)) - else: - assert is_string(p) - paths.append(p) - path = paths - if not path: - # njoin() - joined = '' - else: - # njoin('a', 'b') - joined = os.path.join(*path) - if os.path.sep != '/': - joined = joined.replace('/', os.path.sep) - return minrelpath(joined) - -def get_mathlibs(path=None): - """Return the MATHLIB line from numpyconfig.h - """ - if path is not None: - config_file = os.path.join(path, '_numpyconfig.h') - else: - # Look for the file in each of the numpy include directories. - dirs = get_numpy_include_dirs() - for path in dirs: - fn = os.path.join(path, '_numpyconfig.h') - if os.path.exists(fn): - config_file = fn - break - else: - raise DistutilsError('_numpyconfig.h not found in numpy include ' - 'dirs %r' % (dirs,)) - - with open(config_file) as fid: - mathlibs = [] - s = '#define MATHLIB' - for line in fid: - if line.startswith(s): - value = line[len(s):].strip() - if value: - mathlibs.extend(value.split(',')) - return mathlibs - -def minrelpath(path): - """Resolve `..` and '.' from path. - """ - if not is_string(path): - return path - if '.' not in path: - return path - l = path.split(os.sep) - while l: - try: - i = l.index('.', 1) - except ValueError: - break - del l[i] - j = 1 - while l: - try: - i = l.index('..', j) - except ValueError: - break - if l[i-1]=='..': - j += 1 - else: - del l[i], l[i-1] - j = 1 - if not l: - return '' - return os.sep.join(l) - -def sorted_glob(fileglob): - """sorts output of python glob for https://bugs.python.org/issue30461 - to allow extensions to have reproducible build results""" - return sorted(glob.glob(fileglob)) - -def _fix_paths(paths, local_path, include_non_existing): - assert is_sequence(paths), repr(type(paths)) - new_paths = [] - assert not is_string(paths), repr(paths) - for n in paths: - if is_string(n): - if '*' in n or '?' in n: - p = sorted_glob(n) - p2 = sorted_glob(njoin(local_path, n)) - if p2: - new_paths.extend(p2) - elif p: - new_paths.extend(p) - else: - if include_non_existing: - new_paths.append(n) - print('could not resolve pattern in %r: %r' % - (local_path, n)) - else: - n2 = njoin(local_path, n) - if os.path.exists(n2): - new_paths.append(n2) - else: - if os.path.exists(n): - new_paths.append(n) - elif include_non_existing: - new_paths.append(n) - if not os.path.exists(n): - print('non-existing path in %r: %r' % - (local_path, n)) - - elif is_sequence(n): - new_paths.extend(_fix_paths(n, local_path, include_non_existing)) - else: - new_paths.append(n) - return [minrelpath(p) for p in new_paths] - -def gpaths(paths, local_path='', include_non_existing=True): - """Apply glob to paths and prepend local_path if needed. - """ - if is_string(paths): - paths = (paths,) - return _fix_paths(paths, local_path, include_non_existing) - -def make_temp_file(suffix='', prefix='', text=True): - if not hasattr(_tdata, 'tempdir'): - _tdata.tempdir = tempfile.mkdtemp() - _tmpdirs.append(_tdata.tempdir) - fid, name = tempfile.mkstemp(suffix=suffix, - prefix=prefix, - dir=_tdata.tempdir, - text=text) - fo = os.fdopen(fid, 'w') - return fo, name - -# Hooks for colored terminal output. -# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle -def terminal_has_colors(): - if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ: - # Avoid importing curses that causes illegal operation - # with a message: - # PYTHON2 caused an invalid page fault in - # module CYGNURSES7.DLL as 015f:18bbfc28 - # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] - # ssh to Win32 machine from debian - # curses.version is 2.2 - # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) - return 0 - if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): - try: - import curses - curses.setupterm() - if (curses.tigetnum("colors") >= 0 - and curses.tigetnum("pairs") >= 0 - and ((curses.tigetstr("setf") is not None - and curses.tigetstr("setb") is not None) - or (curses.tigetstr("setaf") is not None - and curses.tigetstr("setab") is not None) - or curses.tigetstr("scp") is not None)): - return 1 - except Exception: - pass - return 0 - -if terminal_has_colors(): - _colour_codes = dict(black=0, red=1, green=2, yellow=3, - blue=4, magenta=5, cyan=6, white=7, default=9) - def colour_text(s, fg=None, bg=None, bold=False): - seq = [] - if bold: - seq.append('1') - if fg: - fgcode = 30 + _colour_codes.get(fg.lower(), 0) - seq.append(str(fgcode)) - if bg: - bgcode = 40 + _colour_codes.get(bg.lower(), 7) - seq.append(str(bgcode)) - if seq: - return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) - else: - return s -else: - def colour_text(s, fg=None, bg=None): - return s - -def default_text(s): - return colour_text(s, 'default') -def red_text(s): - return colour_text(s, 'red') -def green_text(s): - return colour_text(s, 'green') -def yellow_text(s): - return colour_text(s, 'yellow') -def cyan_text(s): - return colour_text(s, 'cyan') -def blue_text(s): - return colour_text(s, 'blue') - -######################### - -def cyg2win32(path: str) -> str: - """Convert a path from Cygwin-native to Windows-native. - - Uses the cygpath utility (part of the Base install) to do the - actual conversion. Falls back to returning the original path if - this fails. - - Handles the default ``/cygdrive`` mount prefix as well as the - ``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such - as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or - ``/home/username`` - - Parameters - ---------- - path : str - The path to convert - - Returns - ------- - converted_path : str - The converted path - - Notes - ----- - Documentation for cygpath utility: - https://cygwin.com/cygwin-ug-net/cygpath.html - Documentation for the C function it wraps: - https://cygwin.com/cygwin-api/func-cygwin-conv-path.html - - """ - if sys.platform != "cygwin": - return path - return subprocess.check_output( - ["/usr/bin/cygpath", "--windows", path], text=True - ) - - -def mingw32(): - """Return true when using mingw32 environment. - """ - if sys.platform=='win32': - if os.environ.get('OSTYPE', '')=='msys': - return True - if os.environ.get('MSYSTEM', '')=='MINGW32': - return True - return False - -def msvc_runtime_version(): - "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) - else: - msc_ver = None - return msc_ver - -def msvc_runtime_library(): - "Return name of MSVC runtime library if Python was built with MSVC >= 7" - ver = msvc_runtime_major () - if ver: - if ver < 140: - return "msvcr%i" % ver - else: - return "vcruntime%i" % ver - else: - return None - -def msvc_runtime_major(): - "Return major version of MSVC runtime coded like get_build_msvc_version" - major = {1300: 70, # MSVC 7.0 - 1310: 71, # MSVC 7.1 - 1400: 80, # MSVC 8 - 1500: 90, # MSVC 9 (aka 2008) - 1600: 100, # MSVC 10 (aka 2010) - 1900: 140, # MSVC 14 (aka 2015) - }.get(msvc_runtime_version(), None) - return major - -######################### - -#XXX need support for .C that is also C++ -cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match -fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match -f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match -f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match -def _get_f90_modules(source): - """Return a list of Fortran f90 module names that - given source file defines. - """ - if not f90_ext_match(source): - return [] - modules = [] - with open(source) as f: - for line in f: - m = f90_module_name_match(line) - if m: - name = m.group('name') - modules.append(name) - # break # XXX can we assume that there is one module per file? - return modules - -def is_string(s): - return isinstance(s, str) - -def all_strings(lst): - """Return True if all items in lst are string objects. """ - return all(is_string(item) for item in lst) - -def is_sequence(seq): - if is_string(seq): - return False - try: - len(seq) - except Exception: - return False - return True - -def is_glob_pattern(s): - return is_string(s) and ('*' in s or '?' in s) - -def as_list(seq): - if is_sequence(seq): - return list(seq) - else: - return [seq] - -def get_language(sources): - # not used in numpy/scipy packages, use build_ext.detect_language instead - """Determine language value (c,f77,f90) from sources """ - language = None - for source in sources: - if isinstance(source, str): - if f90_ext_match(source): - language = 'f90' - break - elif fortran_ext_match(source): - language = 'f77' - return language - -def has_f_sources(sources): - """Return True if sources contains Fortran files """ - return any(fortran_ext_match(source) for source in sources) - -def has_cxx_sources(sources): - """Return True if sources contains C++ files """ - return any(cxx_ext_match(source) for source in sources) - -def filter_sources(sources): - """Return four lists of filenames containing - C, C++, Fortran, and Fortran 90 module sources, - respectively. - """ - c_sources = [] - cxx_sources = [] - f_sources = [] - fmodule_sources = [] - for source in sources: - if fortran_ext_match(source): - modules = _get_f90_modules(source) - if modules: - fmodule_sources.append(source) - else: - f_sources.append(source) - elif cxx_ext_match(source): - cxx_sources.append(source) - else: - c_sources.append(source) - return c_sources, cxx_sources, f_sources, fmodule_sources - - -def _get_headers(directory_list): - # get *.h files from list of directories - headers = [] - for d in directory_list: - head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? - headers.extend(head) - return headers - -def _get_directories(list_of_sources): - # get unique directories from list of sources. - direcs = [] - for f in list_of_sources: - d = os.path.split(f) - if d[0] != '' and not d[0] in direcs: - direcs.append(d[0]) - return direcs - -def _commandline_dep_string(cc_args, extra_postargs, pp_opts): - """ - Return commandline representation used to determine if a file needs - to be recompiled - """ - cmdline = 'commandline: ' - cmdline += ' '.join(cc_args) - cmdline += ' '.join(extra_postargs) - cmdline += ' '.join(pp_opts) + '\n' - return cmdline - - -def get_dependencies(sources): - #XXX scan sources for include statements - return _get_headers(_get_directories(sources)) - -def is_local_src_dir(directory): - """Return true if directory is local directory. - """ - if not is_string(directory): - return False - abs_dir = os.path.abspath(directory) - c = os.path.commonprefix([os.getcwd(), abs_dir]) - new_dir = abs_dir[len(c):].split(os.sep) - if new_dir and not new_dir[0]: - new_dir = new_dir[1:] - if new_dir and new_dir[0]=='build': - return False - new_dir = os.sep.join(new_dir) - return os.path.isdir(new_dir) - -def general_source_files(top_path): - pruned_directories = {'CVS':1, '.svn':1, 'build':1} - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for f in filenames: - if not prune_file_pat.search(f): - yield os.path.join(dirpath, f) - -def general_source_directories_files(top_path): - """Return a directory name relative to top_path and - files contained. - """ - pruned_directories = ['CVS', '.svn', 'build'] - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for d in dirnames: - dpath = os.path.join(dirpath, d) - rpath = rel_path(dpath, top_path) - files = [] - for f in os.listdir(dpath): - fn = os.path.join(dpath, f) - if os.path.isfile(fn) and not prune_file_pat.search(fn): - files.append(fn) - yield rpath, files - dpath = top_path - rpath = rel_path(dpath, top_path) - filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ - if not prune_file_pat.search(f)] - files = [f for f in filenames if os.path.isfile(f)] - yield rpath, files - - -def get_ext_source_files(ext): - # Get sources and any include files in the same directory. - filenames = [] - sources = [_m for _m in ext.sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - for d in ext.depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_script_files(scripts): - scripts = [_m for _m in scripts if is_string(_m)] - return scripts - -def get_lib_source_files(lib): - filenames = [] - sources = lib[1].get('sources', []) - sources = [_m for _m in sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - depends = lib[1].get('depends', []) - for d in depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_shared_lib_extension(is_python_ext=False): - """Return the correct file extension for shared libraries. - - Parameters - ---------- - is_python_ext : bool, optional - Whether the shared library is a Python extension. Default is False. - - Returns - ------- - so_ext : str - The shared library extension. - - Notes - ----- - For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, - and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on - POSIX systems according to PEP 3149. - - """ - confvars = distutils.sysconfig.get_config_vars() - so_ext = confvars.get('EXT_SUFFIX', '') - - if not is_python_ext: - # hardcode known values, config vars (including SHLIB_SUFFIX) are - # unreliable (see #3182) - # darwin, windows and debug linux are wrong in 3.3.1 and older - if (sys.platform.startswith('linux') or - sys.platform.startswith('gnukfreebsd')): - so_ext = '.so' - elif sys.platform.startswith('darwin'): - so_ext = '.dylib' - elif sys.platform.startswith('win'): - so_ext = '.dll' - else: - # fall back to config vars for unknown platforms - # fix long extension for Python >=3.2, see PEP 3149. - if 'SOABI' in confvars: - # Does nothing unless SOABI config var exists - so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) - - return so_ext - -def get_data_files(data): - if is_string(data): - return [data] - sources = data[1] - filenames = [] - for s in sources: - if hasattr(s, '__call__'): - continue - if is_local_src_dir(s): - filenames.extend(list(general_source_files(s))) - elif is_string(s): - if os.path.isfile(s): - filenames.append(s) - else: - print('Not existing data file:', s) - else: - raise TypeError(repr(s)) - return filenames - -def dot_join(*args): - return '.'.join([a for a in args if a]) - -def get_frame(level=0): - """Return frame object from call stack with given level. - """ - try: - return sys._getframe(level+1) - except AttributeError: - frame = sys.exc_info()[2].tb_frame - for _ in range(level+1): - frame = frame.f_back - return frame - - -###################### - -class Configuration: - - _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', - 'libraries', 'headers', 'scripts', 'py_modules', - 'installed_libraries', 'define_macros'] - _dict_keys = ['package_dir', 'installed_pkg_config'] - _extra_keys = ['name', 'version'] - - numpy_include_dirs = [] - - def __init__(self, - package_name=None, - parent_name=None, - top_path=None, - package_path=None, - caller_level=1, - setup_name='setup.py', - **attrs): - """Construct configuration instance of a package. - - package_name -- name of the package - Ex.: 'distutils' - parent_name -- name of the parent package - Ex.: 'numpy' - top_path -- directory of the toplevel package - Ex.: the directory where the numpy package source sits - package_path -- directory of package. Will be computed by magic from the - directory of the caller module if not specified - Ex.: the directory where numpy.distutils is - caller_level -- frame level to caller namespace, internal parameter. - """ - self.name = dot_join(parent_name, package_name) - self.version = None - - caller_frame = get_frame(caller_level) - self.local_path = get_path_from_frame(caller_frame, top_path) - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - if top_path is None: - top_path = self.local_path - self.local_path = '' - if package_path is None: - package_path = self.local_path - elif os.path.isdir(njoin(self.local_path, package_path)): - package_path = njoin(self.local_path, package_path) - if not os.path.isdir(package_path or '.'): - raise ValueError("%r is not a directory" % (package_path,)) - self.top_path = top_path - self.package_path = package_path - # this is the relative path in the installed package - self.path_in_package = os.path.join(*self.name.split('.')) - - self.list_keys = self._list_keys[:] - self.dict_keys = self._dict_keys[:] - - for n in self.list_keys: - v = copy.copy(attrs.get(n, [])) - setattr(self, n, as_list(v)) - - for n in self.dict_keys: - v = copy.copy(attrs.get(n, {})) - setattr(self, n, v) - - known_keys = self.list_keys + self.dict_keys - self.extra_keys = self._extra_keys[:] - for n in attrs.keys(): - if n in known_keys: - continue - a = attrs[n] - setattr(self, n, a) - if isinstance(a, list): - self.list_keys.append(n) - elif isinstance(a, dict): - self.dict_keys.append(n) - else: - self.extra_keys.append(n) - - if os.path.exists(njoin(package_path, '__init__.py')): - self.packages.append(self.name) - self.package_dir[self.name] = package_path - - self.options = dict( - ignore_setup_xxx_py = False, - assume_default_configuration = False, - delegate_options_to_subpackages = False, - quiet = False, - ) - - caller_instance = None - for i in range(1, 3): - try: - f = get_frame(i) - except ValueError: - break - try: - caller_instance = eval('self', f.f_globals, f.f_locals) - break - except NameError: - pass - if isinstance(caller_instance, self.__class__): - if caller_instance.options['delegate_options_to_subpackages']: - self.set_options(**caller_instance.options) - - self.setup_name = setup_name - - def todict(self): - """ - Return a dictionary compatible with the keyword arguments of distutils - setup function. - - Examples - -------- - >>> setup(**config.todict()) #doctest: +SKIP - """ - - self._optimize_data_files() - d = {} - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for n in known_keys: - a = getattr(self, n) - if a: - d[n] = a - return d - - def info(self, message): - if not self.options['quiet']: - print(message) - - def warn(self, message): - sys.stderr.write('Warning: %s\n' % (message,)) - - def set_options(self, **options): - """ - Configure Configuration instance. - - The following options are available: - - ignore_setup_xxx_py - - assume_default_configuration - - delegate_options_to_subpackages - - quiet - - """ - for key, value in options.items(): - if key in self.options: - self.options[key] = value - else: - raise ValueError('Unknown option: '+key) - - def get_distribution(self): - """Return the distutils distribution object for self.""" - from numpy.distutils.core import get_distribution - return get_distribution() - - def _wildcard_get_subpackage(self, subpackage_name, - parent_name, - caller_level = 1): - l = subpackage_name.split('.') - subpackage_path = njoin([self.local_path]+l) - dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)] - config_list = [] - for d in dirs: - if not os.path.isfile(njoin(d, '__init__.py')): - continue - if 'build' in d.split(os.sep): - continue - n = '.'.join(d.split(os.sep)[-len(l):]) - c = self.get_subpackage(n, - parent_name = parent_name, - caller_level = caller_level+1) - config_list.extend(c) - return config_list - - def _get_configuration_from_setup_py(self, setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = 1): - # In case setup_py imports local modules: - sys.path.insert(0, os.path.dirname(setup_py)) - try: - setup_name = os.path.splitext(os.path.basename(setup_py))[0] - n = dot_join(self.name, subpackage_name, setup_name) - setup_module = exec_mod_from_location( - '_'.join(n.split('.')), setup_py) - if not hasattr(setup_module, 'configuration'): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s does not define configuration())'\ - % (setup_module)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level + 1) - else: - pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) - args = (pn,) - if setup_module.configuration.__code__.co_argcount > 1: - args = args + (self.top_path,) - config = setup_module.configuration(*args) - if config.name!=dot_join(parent_name, subpackage_name): - self.warn('Subpackage %r configuration returned as %r' % \ - (dot_join(parent_name, subpackage_name), config.name)) - finally: - del sys.path[0] - return config - - def get_subpackage(self,subpackage_name, - subpackage_path=None, - parent_name=None, - caller_level = 1): - """Return list of subpackage configurations. - - Parameters - ---------- - subpackage_name : str or None - Name of the subpackage to get the configuration. '*' in - subpackage_name is handled as a wildcard. - subpackage_path : str - If None, then the path is assumed to be the local path plus the - subpackage_name. If a setup.py file is not found in the - subpackage_path, then a default configuration is used. - parent_name : str - Parent name. - """ - if subpackage_name is None: - if subpackage_path is None: - raise ValueError( - "either subpackage_name or subpackage_path must be specified") - subpackage_name = os.path.basename(subpackage_path) - - # handle wildcards - l = subpackage_name.split('.') - if subpackage_path is None and '*' in subpackage_name: - return self._wildcard_get_subpackage(subpackage_name, - parent_name, - caller_level = caller_level+1) - assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) - if subpackage_path is None: - subpackage_path = njoin([self.local_path] + l) - else: - subpackage_path = njoin([subpackage_path] + l[:-1]) - subpackage_path = self.paths([subpackage_path])[0] - setup_py = njoin(subpackage_path, self.setup_name) - if not self.options['ignore_setup_xxx_py']: - if not os.path.isfile(setup_py): - setup_py = njoin(subpackage_path, - 'setup_%s.py' % (subpackage_name)) - if not os.path.isfile(setup_py): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s/{setup_%s,setup}.py was not found)' \ - % (os.path.dirname(setup_py), subpackage_name)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level+1) - else: - config = self._get_configuration_from_setup_py( - setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = caller_level + 1) - if config: - return [config] - else: - return [] - - def add_subpackage(self,subpackage_name, - subpackage_path=None, - standalone = False): - """Add a sub-package to the current Configuration instance. - - This is useful in a setup.py script for adding sub-packages to a - package. - - Parameters - ---------- - subpackage_name : str - name of the subpackage - subpackage_path : str - if given, the subpackage path such as the subpackage is in - subpackage_path / subpackage_name. If None,the subpackage is - assumed to be located in the local path / subpackage_name. - standalone : bool - """ - - if standalone: - parent_name = None - else: - parent_name = self.name - config_list = self.get_subpackage(subpackage_name, subpackage_path, - parent_name = parent_name, - caller_level = 2) - if not config_list: - self.warn('No configuration returned, assuming unavailable.') - for config in config_list: - d = config - if isinstance(config, Configuration): - d = config.todict() - assert isinstance(d, dict), repr(type(d)) - - self.info('Appending %s configuration to %s' \ - % (d.get('name'), self.name)) - self.dict_append(**d) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a subpackage '+ subpackage_name) - - def add_data_dir(self, data_path): - """Recursively add files under data_path to data_files list. - - Recursively add files under data_path to the list of data_files to be - installed (and distributed). The data_path can be either a relative - path-name, or an absolute path-name, or a 2-tuple where the first - argument shows where in the install directory the data directory - should be installed to. - - Parameters - ---------- - data_path : seq or str - Argument can be either - - * 2-sequence (, ) - * path to data directory where python datadir suffix defaults - to package dir. - - Notes - ----- - Rules for installation paths:: - - foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar - (gun, foo/bar) -> parent/gun - foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b - (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun - (gun/*, foo/*) -> parent/gun/a, parent/gun/b - /foo/bar -> (bar, /foo/bar) -> parent/bar - (gun, /foo/bar) -> parent/gun - (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar - - Examples - -------- - For example suppose the source directory contains fun/foo.dat and - fun/bar/car.dat: - - >>> self.add_data_dir('fun') #doctest: +SKIP - >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP - >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP - - Will install data-files to the locations:: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - car.dat - - """ - if is_sequence(data_path): - d, data_path = data_path - else: - d = None - if is_sequence(data_path): - [self.add_data_dir((d, p)) for p in data_path] - return - if not is_string(data_path): - raise TypeError("not a string: %r" % (data_path,)) - if d is None: - if os.path.isabs(data_path): - return self.add_data_dir((os.path.basename(data_path), data_path)) - return self.add_data_dir((data_path, data_path)) - paths = self.paths(data_path, include_non_existing=False) - if is_glob_pattern(data_path): - if is_glob_pattern(d): - pattern_list = allpath(d).split(os.sep) - pattern_list.reverse() - # /a/*//b/ -> /a/*/b - rl = list(range(len(pattern_list)-1)); rl.reverse() - for i in rl: - if not pattern_list[i]: - del pattern_list[i] - # - for path in paths: - if not os.path.isdir(path): - print('Not a directory, skipping', path) - continue - rpath = rel_path(path, self.local_path) - path_list = rpath.split(os.sep) - path_list.reverse() - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - if i>=len(path_list): - raise ValueError('cannot fill pattern %r with %r' \ - % (d, path)) - target_list.append(path_list[i]) - else: - assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) - target_list.append(s) - i += 1 - if path_list[i:]: - self.warn('mismatch of pattern_list=%s and path_list=%s'\ - % (pattern_list, path_list)) - target_list.reverse() - self.add_data_dir((os.sep.join(target_list), path)) - else: - for path in paths: - self.add_data_dir((d, path)) - return - assert not is_glob_pattern(d), repr(d) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - for path in paths: - for d1, f in list(general_source_directories_files(path)): - target_path = os.path.join(self.path_in_package, d, d1) - data_files.append((target_path, f)) - - def _optimize_data_files(self): - data_dict = {} - for p, files in self.data_files: - if p not in data_dict: - data_dict[p] = set() - for f in files: - data_dict[p].add(f) - self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] - - def add_data_files(self,*files): - """Add data files to configuration data_files. - - Parameters - ---------- - files : sequence - Argument(s) can be either - - * 2-sequence (,) - * paths to data files where python datadir prefix defaults - to package dir. - - Notes - ----- - The form of each element of the files sequence is very flexible - allowing many combinations of where to get the files from the package - and where they should ultimately be installed on the system. The most - basic usage is for an element of the files argument sequence to be a - simple filename. This will cause that file from the local path to be - installed to the installation path of the self.name package (package - path). The file argument can also be a relative path in which case the - entire relative path will be installed into the package directory. - Finally, the file can be an absolute path name in which case the file - will be found at the absolute path name but installed to the package - path. - - This basic behavior can be augmented by passing a 2-tuple in as the - file argument. The first element of the tuple should specify the - relative path (under the package install directory) where the - remaining sequence of files should be installed to (it has nothing to - do with the file-names in the source distribution). The second element - of the tuple is the sequence of files that should be installed. The - files in this sequence can be filenames, relative paths, or absolute - paths. For absolute paths the file will be installed in the top-level - package installation directory (regardless of the first argument). - Filenames and relative path names will be installed in the package - install directory under the path name given as the first element of - the tuple. - - Rules for installation paths: - - #. file.txt -> (., file.txt)-> parent/file.txt - #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt - #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt - #. ``*``.txt -> parent/a.txt, parent/b.txt - #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt - #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt - #. (sun, file.txt) -> parent/sun/file.txt - #. (sun, bar/file.txt) -> parent/sun/file.txt - #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt - #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt - - An additional feature is that the path to a data-file can actually be - a function that takes no arguments and returns the actual path(s) to - the data-files. This is useful when the data files are generated while - building the package. - - Examples - -------- - Add files to the list of data_files to be included with the package. - - >>> self.add_data_files('foo.dat', - ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), - ... 'bar/cat.dat', - ... '/full/path/to/can.dat') #doctest: +SKIP - - will install these data files to:: - - / - foo.dat - fun/ - gun.dat - nun/ - pun.dat - sun.dat - bar/ - car.dat - can.dat - - where is the package (or sub-package) - directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage') or - '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). - """ - - if len(files)>1: - for f in files: - self.add_data_files(f) - return - assert len(files)==1 - if is_sequence(files[0]): - d, files = files[0] - else: - d = None - if is_string(files): - filepat = files - elif is_sequence(files): - if len(files)==1: - filepat = files[0] - else: - for f in files: - self.add_data_files((d, f)) - return - else: - raise TypeError(repr(type(files))) - - if d is None: - if hasattr(filepat, '__call__'): - d = '' - elif os.path.isabs(filepat): - d = '' - else: - d = os.path.dirname(filepat) - self.add_data_files((d, files)) - return - - paths = self.paths(filepat, include_non_existing=False) - if is_glob_pattern(filepat): - if is_glob_pattern(d): - pattern_list = d.split(os.sep) - pattern_list.reverse() - for path in paths: - path_list = path.split(os.sep) - path_list.reverse() - path_list.pop() # filename - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - target_list.append(path_list[i]) - i += 1 - else: - target_list.append(s) - target_list.reverse() - self.add_data_files((os.sep.join(target_list), path)) - else: - self.add_data_files((d, paths)) - return - assert not is_glob_pattern(d), repr((d, filepat)) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - data_files.append((os.path.join(self.path_in_package, d), paths)) - - ### XXX Implement add_py_modules - - def add_define_macros(self, macros): - """Add define macros to configuration - - Add the given sequence of macro name and value duples to the beginning - of the define_macros list This list will be visible to all extension - modules of the current package. - """ - dist = self.get_distribution() - if dist is not None: - if not hasattr(dist, 'define_macros'): - dist.define_macros = [] - dist.define_macros.extend(macros) - else: - self.define_macros.extend(macros) - - - def add_include_dirs(self,*paths): - """Add paths to configuration include directories. - - Add the given sequence of paths to the beginning of the include_dirs - list. This list will be visible to all extension modules of the - current package. - """ - include_dirs = self.paths(paths) - dist = self.get_distribution() - if dist is not None: - if dist.include_dirs is None: - dist.include_dirs = [] - dist.include_dirs.extend(include_dirs) - else: - self.include_dirs.extend(include_dirs) - - def add_headers(self,*files): - """Add installable headers to configuration. - - Add the given sequence of files to the beginning of the headers list. - By default, headers will be installed under // directory. If an item of files - is a tuple, then its first argument specifies the actual installation - location relative to the path. - - Parameters - ---------- - files : str or seq - Argument(s) can be either: - - * 2-sequence (,) - * path(s) to header file(s) where python includedir suffix will - default to package name. - """ - headers = [] - for path in files: - if is_string(path): - [headers.append((self.name, p)) for p in self.paths(path)] - else: - if not isinstance(path, (tuple, list)) or len(path) != 2: - raise TypeError(repr(path)) - [headers.append((path[0], p)) for p in self.paths(path[1])] - dist = self.get_distribution() - if dist is not None: - if dist.headers is None: - dist.headers = [] - dist.headers.extend(headers) - else: - self.headers.extend(headers) - - def paths(self,*paths,**kws): - """Apply glob to paths and prepend local_path if needed. - - Applies glob.glob(...) to each path in the sequence (if needed) and - prepends the local_path if needed. Because this is called on all - source lists, this allows wildcard characters to be specified in lists - of sources for extension modules and libraries and scripts and allows - path-names be relative to the source directory. - - """ - include_non_existing = kws.get('include_non_existing', True) - return gpaths(paths, - local_path = self.local_path, - include_non_existing=include_non_existing) - - def _fix_paths_dict(self, kw): - for k in kw.keys(): - v = kw[k] - if k in ['sources', 'depends', 'include_dirs', 'library_dirs', - 'module_dirs', 'extra_objects']: - new_v = self.paths(v) - kw[k] = new_v - - def add_extension(self,name,sources,**kw): - """Add extension to configuration. - - Create and add an Extension instance to the ext_modules list. This - method also takes the following optional keyword arguments that are - passed on to the Extension constructor. - - Parameters - ---------- - name : str - name of the extension - sources : seq - list of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - include_dirs : - define_macros : - undef_macros : - library_dirs : - libraries : - runtime_library_dirs : - extra_objects : - extra_compile_args : - extra_link_args : - extra_f77_compile_args : - extra_f90_compile_args : - export_symbols : - swig_opts : - depends : - The depends list contains paths to files or directories that the - sources of the extension module depend on. If any path in the - depends list is newer than the extension module, then the module - will be rebuilt. - language : - f2py_options : - module_dirs : - extra_info : dict or list - dict or list of dict of keywords to be appended to keywords. - - Notes - ----- - The self.paths(...) method is applied to all lists that may contain - paths. - """ - ext_args = copy.copy(kw) - ext_args['name'] = dot_join(self.name, name) - ext_args['sources'] = sources - - if 'extra_info' in ext_args: - extra_info = ext_args['extra_info'] - del ext_args['extra_info'] - if isinstance(extra_info, dict): - extra_info = [extra_info] - for info in extra_info: - assert isinstance(info, dict), repr(info) - dict_append(ext_args,**info) - - self._fix_paths_dict(ext_args) - - # Resolve out-of-tree dependencies - libraries = ext_args.get('libraries', []) - libnames = [] - ext_args['libraries'] = [] - for libname in libraries: - if isinstance(libname, tuple): - self._fix_paths_dict(libname[1]) - - # Handle library names of the form libname@relative/path/to/library - if '@' in libname: - lname, lpath = libname.split('@', 1) - lpath = os.path.abspath(njoin(self.local_path, lpath)) - if os.path.isdir(lpath): - c = self.get_subpackage(None, lpath, - caller_level = 2) - if isinstance(c, Configuration): - c = c.todict() - for l in [l[0] for l in c.get('libraries', [])]: - llname = l.split('__OF__', 1)[0] - if llname == lname: - c.pop('name', None) - dict_append(ext_args,**c) - break - continue - libnames.append(libname) - - ext_args['libraries'] = libnames + ext_args['libraries'] - ext_args['define_macros'] = \ - self.define_macros + ext_args.get('define_macros', []) - - from numpy.distutils.core import Extension - ext = Extension(**ext_args) - self.ext_modules.append(ext) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add an extension '+name) - return ext - - def add_library(self,name,sources,**build_info): - """ - Add library to configuration. - - Parameters - ---------- - name : str - Name of the extension. - sources : sequence - List of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - """ - self._add_library(name, sources, None, build_info) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a library '+ name) - - def _add_library(self, name, sources, install_dir, build_info): - """Common implementation for add_library and add_installed_library. Do - not use directly""" - build_info = copy.copy(build_info) - build_info['sources'] = sources - - # Sometimes, depends is not set up to an empty list by default, and if - # depends is not given to add_library, distutils barfs (#1134) - if not 'depends' in build_info: - build_info['depends'] = [] - - self._fix_paths_dict(build_info) - - # Add to libraries list so that it is build with build_clib - self.libraries.append((name, build_info)) - - def add_installed_library(self, name, sources, install_dir, build_info=None): - """ - Similar to add_library, but the specified library is installed. - - Most C libraries used with ``distutils`` are only used to build python - extensions, but libraries built through this method will be installed - so that they can be reused by third-party packages. - - Parameters - ---------- - name : str - Name of the installed library. - sources : sequence - List of the library's source files. See `add_library` for details. - install_dir : str - Path to install the library, relative to the current sub-package. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - Returns - ------- - None - - See Also - -------- - add_library, add_npy_pkg_config, get_info - - Notes - ----- - The best way to encode the options required to link against the specified - C libraries is to use a "libname.ini" file, and use `get_info` to - retrieve the required options (see `add_npy_pkg_config` for more - information). - - """ - if not build_info: - build_info = {} - - install_dir = os.path.join(self.package_path, install_dir) - self._add_library(name, sources, install_dir, build_info) - self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) - - def add_npy_pkg_config(self, template, install_dir, subst_dict=None): - """ - Generate and install a npy-pkg config file from a template. - - The config file generated from `template` is installed in the - given install directory, using `subst_dict` for variable substitution. - - Parameters - ---------- - template : str - The path of the template, relatively to the current package path. - install_dir : str - Where to install the npy-pkg config file, relatively to the current - package path. - subst_dict : dict, optional - If given, any string of the form ``@key@`` will be replaced by - ``subst_dict[key]`` in the template file when installed. The install - prefix is always available through the variable ``@prefix@``, since the - install prefix is not easy to get reliably from setup.py. - - See also - -------- - add_installed_library, get_info - - Notes - ----- - This works for both standard installs and in-place builds, i.e. the - ``@prefix@`` refer to the source directory for in-place builds. - - Examples - -------- - :: - - config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) - - Assuming the foo.ini.in file has the following content:: - - [meta] - Name=@foo@ - Version=1.0 - Description=dummy description - - [default] - Cflags=-I@prefix@/include - Libs= - - The generated file will have the following content:: - - [meta] - Name=bar - Version=1.0 - Description=dummy description - - [default] - Cflags=-Iprefix_dir/include - Libs= - - and will be installed as foo.ini in the 'lib' subpath. - - When cross-compiling with numpy distutils, it might be necessary to - use modified npy-pkg-config files. Using the default/generated files - will link with the host libraries (i.e. libnpymath.a). For - cross-compilation you of-course need to link with target libraries, - while using the host Python installation. - - You can copy out the numpy/_core/lib/npy-pkg-config directory, add a - pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment - variable to point to the directory with the modified npy-pkg-config - files. - - Example npymath.ini modified for cross-compilation:: - - [meta] - Name=npymath - Description=Portable, core math library implementing C99 standard - Version=0.1 - - [variables] - pkgname=numpy._core - pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/_core - prefix=${pkgdir} - libdir=${prefix}/lib - includedir=${prefix}/include - - [default] - Libs=-L${libdir} -lnpymath - Cflags=-I${includedir} - Requires=mlib - - [msvc] - Libs=/LIBPATH:${libdir} npymath.lib - Cflags=/INCLUDE:${includedir} - Requires=mlib - - """ - if subst_dict is None: - subst_dict = {} - template = os.path.join(self.package_path, template) - - if self.name in self.installed_pkg_config: - self.installed_pkg_config[self.name].append((template, install_dir, - subst_dict)) - else: - self.installed_pkg_config[self.name] = [(template, install_dir, - subst_dict)] - - - def add_scripts(self,*files): - """Add scripts to configuration. - - Add the sequence of files to the beginning of the scripts list. - Scripts will be installed under the /bin/ directory. - - """ - scripts = self.paths(files) - dist = self.get_distribution() - if dist is not None: - if dist.scripts is None: - dist.scripts = [] - dist.scripts.extend(scripts) - else: - self.scripts.extend(scripts) - - def dict_append(self,**dict): - for key in self.list_keys: - a = getattr(self, key) - a.extend(dict.get(key, [])) - for key in self.dict_keys: - a = getattr(self, key) - a.update(dict.get(key, {})) - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for key in dict.keys(): - if key not in known_keys: - a = getattr(self, key, None) - if a and a==dict[key]: continue - self.warn('Inheriting attribute %r=%r from %r' \ - % (key, dict[key], dict.get('name', '?'))) - setattr(self, key, dict[key]) - self.extra_keys.append(key) - elif key in self.extra_keys: - self.info('Ignoring attempt to set %r (from %r to %r)' \ - % (key, getattr(self, key), dict[key])) - elif key in known_keys: - # key is already processed above - pass - else: - raise ValueError("Don't know about key=%r" % (key)) - - def __str__(self): - from pprint import pformat - known_keys = self.list_keys + self.dict_keys + self.extra_keys - s = '<'+5*'-' + '\n' - s += 'Configuration of '+self.name+':\n' - known_keys.sort() - for k in known_keys: - a = getattr(self, k, None) - if a: - s += '%s = %s\n' % (k, pformat(a)) - s += 5*'-' + '>' - return s - - def get_config_cmd(self): - """ - Returns the numpy.distutils config command instance. - """ - cmd = get_cmd('config') - cmd.ensure_finalized() - cmd.dump_source = 0 - cmd.noisy = 0 - old_path = os.environ.get('PATH') - if old_path: - path = os.pathsep.join(['.', old_path]) - os.environ['PATH'] = path - return cmd - - def get_build_temp_dir(self): - """ - Return a path to a temporary directory where temporary files should be - placed. - """ - cmd = get_cmd('build') - cmd.ensure_finalized() - return cmd.build_temp - - def have_f77c(self): - """Check for availability of Fortran 77 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 77 compiler is available (because a simple Fortran 77 - code was able to be compiled successfully). - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') - return flag - - def have_f90c(self): - """Check for availability of Fortran 90 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 90 compiler is available (because a simple Fortran - 90 code was able to be compiled successfully) - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') - return flag - - def append_to(self, extlib): - """Append libraries, include_dirs to extension or library item. - """ - if is_sequence(extlib): - lib_name, build_info = extlib - dict_append(build_info, - libraries=self.libraries, - include_dirs=self.include_dirs) - else: - from numpy.distutils.core import Extension - assert isinstance(extlib, Extension), repr(extlib) - extlib.libraries.extend(self.libraries) - extlib.include_dirs.extend(self.include_dirs) - - def _get_svn_revision(self, path): - """Return path's SVN revision number. - """ - try: - output = subprocess.check_output(['svnversion'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): - entries = njoin(path, '_svn', 'entries') - else: - entries = njoin(path, '.svn', 'entries') - if os.path.isfile(entries): - with open(entries) as f: - fstr = f.read() - if fstr[:5] == '\d+)"', fstr) - if m: - return int(m.group('revision')) - else: # non-xml entries file --- check to be sure that - m = re.search(r'dir[\n\r]+(?P\d+)', fstr) - if m: - return int(m.group('revision')) - return None - - def _get_hg_revision(self, path): - """Return path's Mercurial revision number. - """ - try: - output = subprocess.check_output( - ['hg', 'identify', '--num'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - branch_fn = njoin(path, '.hg', 'branch') - branch_cache_fn = njoin(path, '.hg', 'branch.cache') - - if os.path.isfile(branch_fn): - branch0 = None - with open(branch_fn) as f: - revision0 = f.read().strip() - - branch_map = {} - with open(branch_cache_fn) as f: - for line in f: - branch1, revision1 = line.split()[:2] - if revision1==revision0: - branch0 = branch1 - try: - revision1 = int(revision1) - except ValueError: - continue - branch_map[branch1] = revision1 - - return branch_map.get(branch0) - - return None - - - def get_version(self, version_file=None, version_variable=None): - """Try to get version string of a package. - - Return a version string of the current package or None if the version - information could not be detected. - - Notes - ----- - This method scans files named - __version__.py, _version.py, version.py, and - __svn_version__.py for string variables version, __version__, and - _version, until a version number is found. - """ - version = getattr(self, 'version', None) - if version is not None: - return version - - # Get version from version file. - if version_file is None: - files = ['__version__.py', - self.name.split('.')[-1]+'_version.py', - 'version.py', - '__svn_version__.py', - '__hg_version__.py'] - else: - files = [version_file] - if version_variable is None: - version_vars = ['version', - '__version__', - self.name.split('.')[-1]+'_version'] - else: - version_vars = [version_variable] - for f in files: - fn = njoin(self.local_path, f) - if os.path.isfile(fn): - info = ('.py', 'U', 1) - name = os.path.splitext(os.path.basename(fn))[0] - n = dot_join(self.name, name) - try: - version_module = exec_mod_from_location( - '_'.join(n.split('.')), fn) - except ImportError as e: - self.warn(str(e)) - version_module = None - if version_module is None: - continue - - for a in version_vars: - version = getattr(version_module, a, None) - if version is not None: - break - - # Try if versioneer module - try: - version = version_module.get_versions()['version'] - except AttributeError: - pass - - if version is not None: - break - - if version is not None: - self.version = version - return version - - # Get version as SVN or Mercurial revision number - revision = self._get_svn_revision(self.local_path) - if revision is None: - revision = self._get_hg_revision(self.local_path) - - if revision is not None: - version = str(revision) - self.version = version - - return version - - def make_svn_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __svn_version__.py file to the current package directory. - - Generate package __svn_version__.py file from SVN revision number, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __svn_version__.py existed before, nothing is done. - - This is - intended for working with source directories that are in an SVN - repository. - """ - target = njoin(self.local_path, '__svn_version__.py') - revision = self._get_svn_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_svn_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_svn_version_py())) - - def make_hg_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __hg_version__.py file to the current package directory. - - Generate package __hg_version__.py file from Mercurial revision, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __hg_version__.py existed before, nothing is done. - - This is intended for working with source directories that are - in a Mercurial repository. - """ - target = njoin(self.local_path, '__hg_version__.py') - revision = self._get_hg_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_hg_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_hg_version_py())) - - def make_config_py(self,name='__config__'): - """Generate package __config__.py file containing system_info - information used during building the package. - - This file is installed to the - package installation directory. - - """ - self.py_modules.append((self.name, name, generate_config_py)) - - def get_info(self,*names): - """Get resources information. - - Return information (from system_info.get_info) for all of the names in - the argument list in a single dictionary. - """ - from .system_info import get_info, dict_append - info_dict = {} - for a in names: - dict_append(info_dict,**get_info(a)) - return info_dict - - -def get_cmd(cmdname, _cache={}): - if cmdname not in _cache: - import distutils.core - dist = distutils.core._setup_distribution - if dist is None: - from distutils.errors import DistutilsInternalError - raise DistutilsInternalError( - 'setup distribution instance not initialized') - cmd = dist.get_command_obj(cmdname) - _cache[cmdname] = cmd - return _cache[cmdname] - -def get_numpy_include_dirs(): - # numpy_include_dirs are set by numpy/_core/setup.py, otherwise [] - include_dirs = Configuration.numpy_include_dirs[:] - if not include_dirs: - import numpy - include_dirs = [ numpy.get_include() ] - # else running numpy/_core/setup.py - return include_dirs - -def get_npy_pkg_dir(): - """Return the path where to find the npy-pkg-config directory. - - If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that - is returned. Otherwise, a path inside the location of the numpy module is - returned. - - The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining - customized npy-pkg-config .ini files for the cross-compilation - environment, and using them when cross-compiling. - - """ - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d is not None: - return d - spec = importlib.util.find_spec('numpy') - d = os.path.join(os.path.dirname(spec.origin), - '_core', 'lib', 'npy-pkg-config') - return d - -def get_pkg_info(pkgname, dirs=None): - """ - Return library info for the given package. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_info - - """ - from numpy.distutils.npy_pkg_config import read_config - - if dirs: - dirs.append(get_npy_pkg_dir()) - else: - dirs = [get_npy_pkg_dir()] - return read_config(pkgname, dirs) - -def get_info(pkgname, dirs=None): - """ - Return an info dict for a given C library. - - The info dict contains the necessary options to use the C library. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - info : dict - The dictionary with build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_pkg_info - - Examples - -------- - To get the necessary information for the npymath library from NumPy: - - >>> npymath_info = np.distutils.misc_util.get_info('npymath') - >>> npymath_info #doctest: +SKIP - {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': - ['.../numpy/_core/lib'], 'include_dirs': ['.../numpy/_core/include']} - - This info dict can then be used as input to a `Configuration` instance:: - - config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) - - """ - from numpy.distutils.npy_pkg_config import parse_flags - pkg_info = get_pkg_info(pkgname, dirs) - - # Translate LibraryInfo instance into a build_info dict - info = parse_flags(pkg_info.cflags()) - for k, v in parse_flags(pkg_info.libs()).items(): - info[k].extend(v) - - # add_extension extra_info argument is ANAL - info['define_macros'] = info['macros'] - del info['macros'] - del info['ignored'] - - return info - -def is_bootstrapping(): - import builtins - - try: - builtins.__NUMPY_SETUP__ - return True - except AttributeError: - return False - - -######################### - -def default_config_dict(name = None, parent_name = None, local_path=None): - """Return a configuration dictionary for usage in - configuration() function defined in file setup_.py. - """ - import warnings - warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ - 'deprecated default_config_dict(%r,%r,%r)' - % (name, parent_name, local_path, - name, parent_name, local_path, - ), stacklevel=2) - c = Configuration(name, parent_name, local_path) - return c.todict() - - -def dict_append(d, **kws): - for k, v in kws.items(): - if k in d: - ov = d[k] - if isinstance(ov, str): - d[k] = v - else: - d[k].extend(v) - else: - d[k] = v - -def appendpath(prefix, path): - if os.path.sep != '/': - prefix = prefix.replace('/', os.path.sep) - path = path.replace('/', os.path.sep) - drive = '' - if os.path.isabs(path): - drive = os.path.splitdrive(prefix)[0] - absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] - pathdrive, path = os.path.splitdrive(path) - d = os.path.commonprefix([absprefix, path]) - if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ - or os.path.join(path[:len(d)], path[len(d):]) != path: - # Handle invalid paths - d = os.path.dirname(d) - subpath = path[len(d):] - if os.path.isabs(subpath): - subpath = subpath[1:] - else: - subpath = path - return os.path.normpath(njoin(drive + prefix, subpath)) - -def generate_config_py(target): - """Generate config.py file containing system_info information - used during building the package. - - Usage: - config['py_modules'].append((packagename, '__config__',generate_config_py)) - """ - from numpy.distutils.system_info import system_info - from distutils.dir_util import mkpath - mkpath(os.path.dirname(target)) - with open(target, 'w') as f: - f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) - f.write('# It contains system_info results at the time of building this package.\n') - f.write('__all__ = ["get_info","show"]\n\n') - - # For gfortran+msvc combination, extra shared libraries may exist - f.write(textwrap.dedent(""" - import os - import sys - - extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') - - if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): - os.add_dll_directory(extra_dll_dir) - - """)) - - for k, i in system_info.saved_results.items(): - f.write('%s=%r\n' % (k, i)) - f.write(textwrap.dedent(r''' - def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - - def show(): - """ - Show libraries in the system on which NumPy was built. - - Print information about various resources (libraries, library - directories, include directories, etc.) in the system on which - NumPy was built. - - See Also - -------- - get_include : Returns the directory containing NumPy C - header files. - - Notes - ----- - 1. Classes specifying the information to be printed are defined - in the `numpy.distutils.system_info` module. - - Information may include: - - * ``language``: language used to write the libraries (mostly - C or f77) - * ``libraries``: names of libraries found in the system - * ``library_dirs``: directories containing the libraries - * ``include_dirs``: directories containing library header files - * ``src_dirs``: directories containing library source files - * ``define_macros``: preprocessor macros used by - ``distutils.setup`` - * ``baseline``: minimum CPU features required - * ``found``: dispatched features supported in the system - * ``not found``: dispatched features that are not supported - in the system - - 2. NumPy BLAS/LAPACK Installation Notes - - Installing a numpy wheel (``pip install numpy`` or force it - via ``pip install numpy --only-binary :numpy: numpy``) includes - an OpenBLAS implementation of the BLAS and LAPACK linear algebra - APIs. In this case, ``library_dirs`` reports the original build - time configuration as compiled with gcc/gfortran; at run time - the OpenBLAS library is in - ``site-packages/numpy.libs/`` (linux), or - ``site-packages/numpy/.dylibs/`` (macOS), or - ``site-packages/numpy/.libs/`` (windows). - - Installing numpy from source - (``pip install numpy --no-binary numpy``) searches for BLAS and - LAPACK dynamic link libraries at build time as influenced by - environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and - NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER; - or the optional file ``~/.numpy-site.cfg``. - NumPy remembers those locations and expects to load the same - libraries at run-time. - In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS - library) is in the default build-time search order after - 'openblas'. - - Examples - -------- - >>> import numpy as np - >>> np.show_config() - blas_opt_info: - language = c - define_macros = [('HAVE_CBLAS', None)] - libraries = ['openblas', 'openblas'] - library_dirs = ['/usr/local/lib'] - """ - from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__ - ) - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) - - features_found, features_not_found = [], [] - for feature in __cpu_dispatch__: - if __cpu_features__[feature]: - features_found.append(feature) - else: - features_not_found.append(feature) - - print("Supported SIMD extensions in this NumPy install:") - print(" baseline = %s" % (','.join(__cpu_baseline__))) - print(" found = %s" % (','.join(features_found))) - print(" not found = %s" % (','.join(features_not_found))) - - ''')) - - return target - -def msvc_version(compiler): - """Return version major and minor of compiler instance if it is - MSVC, raise an exception otherwise.""" - if not compiler.compiler_type == "msvc": - raise ValueError("Compiler instance is not msvc (%s)"\ - % compiler.compiler_type) - return compiler._MSVCCompiler__version - -def get_build_architecture(): - # Importing distutils.msvccompiler triggers a warning on non-Windows - # systems, so delay the import to here. - from distutils.msvccompiler import get_build_architecture - return get_build_architecture() - - -_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'} - - -def sanitize_cxx_flags(cxxflags): - ''' - Some flags are valid for C but not C++. Prune them. - ''' - return [flag for flag in cxxflags if flag not in _cxx_ignore_flags] - - -def exec_mod_from_location(modname, modfile): - ''' - Use importlib machinery to import a module `modname` from the file - `modfile`. Depending on the `spec.loader`, the module may not be - registered in sys.modules. - ''' - spec = importlib.util.spec_from_file_location(modname, modfile) - foo = importlib.util.module_from_spec(spec) - spec.loader.exec_module(foo) - return foo diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py deleted file mode 100644 index 68239495d6c7..000000000000 --- a/numpy/distutils/msvc9compiler.py +++ /dev/null @@ -1,63 +0,0 @@ -import os -from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if not old: - return new - if new in old: - return old - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self, plat_name=None): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib') - environ_include = os.getenv('include') - _MSVCCompiler.initialize(self, plat_name) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] - - def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): - ld_args.append('/MANIFEST') - _MSVCCompiler.manifest_setup_ldargs(self, output_filename, - build_temp, ld_args) diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py deleted file mode 100644 index 2b93221baac8..000000000000 --- a/numpy/distutils/msvccompiler.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if new in old: - return old - if not old: - return new - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib', '') - environ_include = os.getenv('include', '') - _MSVCCompiler.initialize(self) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] - - -def lib_opts_if_msvc(build_cmd): - """ Add flags if we are using MSVC compiler - - We can't see `build_cmd` in our scope, because we have not initialized - the distutils build command, so use this deferred calculation to run - when we are building the library. - """ - if build_cmd.compiler.compiler_type != 'msvc': - return [] - # Explicitly disable whole-program optimization. - flags = ['/GL-'] - # Disable voltbl section for vc142 to allow link using mingw-w64; see: - # https://github.com/matthew-brett/dll_investigation/issues/1#issuecomment-1100468171 - if build_cmd.compiler_opt.cc_test_flags(['-d2VolatileMetadata-']): - flags.append('-d2VolatileMetadata-') - return flags diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py deleted file mode 100644 index 14e8791b14cd..000000000000 --- a/numpy/distutils/npy_pkg_config.py +++ /dev/null @@ -1,441 +0,0 @@ -import sys -import re -import os - -from configparser import RawConfigParser - -__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', - 'read_config', 'parse_flags'] - -_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}') - -class FormatError(OSError): - """ - Exception thrown when there is a problem parsing a configuration file. - - """ - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -class PkgNotFound(OSError): - """Exception raised when a package can not be located.""" - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -def parse_flags(line): - """ - Parse a line from a config file containing compile flags. - - Parameters - ---------- - line : str - A single line containing one or more compile flags. - - Returns - ------- - d : dict - Dictionary of parsed flags, split into relevant categories. - These categories are the keys of `d`: - - * 'include_dirs' - * 'library_dirs' - * 'libraries' - * 'macros' - * 'ignored' - - """ - d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], - 'macros': [], 'ignored': []} - - flags = (' ' + line).split(' -') - for flag in flags: - flag = '-' + flag - if len(flag) > 0: - if flag.startswith('-I'): - d['include_dirs'].append(flag[2:].strip()) - elif flag.startswith('-L'): - d['library_dirs'].append(flag[2:].strip()) - elif flag.startswith('-l'): - d['libraries'].append(flag[2:].strip()) - elif flag.startswith('-D'): - d['macros'].append(flag[2:].strip()) - else: - d['ignored'].append(flag) - - return d - -def _escape_backslash(val): - return val.replace('\\', '\\\\') - -class LibraryInfo: - """ - Object containing build information about a library. - - Parameters - ---------- - name : str - The library name. - description : str - Description of the library. - version : str - Version string. - sections : dict - The sections of the configuration file for the library. The keys are - the section headers, the values the text under each header. - vars : class instance - A `VariableSet` instance, which contains ``(name, value)`` pairs for - variables defined in the configuration file for the library. - requires : sequence, optional - The required libraries for the library to be installed. - - Notes - ----- - All input parameters (except "sections" which is a method) are available as - attributes of the same name. - - """ - def __init__(self, name, description, version, sections, vars, requires=None): - self.name = name - self.description = description - if requires: - self.requires = requires - else: - self.requires = [] - self.version = version - self._sections = sections - self.vars = vars - - def sections(self): - """ - Return the section headers of the config file. - - Parameters - ---------- - None - - Returns - ------- - keys : list of str - The list of section headers. - - """ - return list(self._sections.keys()) - - def cflags(self, section="default"): - val = self.vars.interpolate(self._sections[section]['cflags']) - return _escape_backslash(val) - - def libs(self, section="default"): - val = self.vars.interpolate(self._sections[section]['libs']) - return _escape_backslash(val) - - def __str__(self): - m = ['Name: %s' % self.name, 'Description: %s' % self.description] - if self.requires: - m.append('Requires:') - else: - m.append('Requires: %s' % ",".join(self.requires)) - m.append('Version: %s' % self.version) - - return "\n".join(m) - -class VariableSet: - """ - Container object for the variables defined in a config file. - - `VariableSet` can be used as a plain dictionary, with the variable names - as keys. - - Parameters - ---------- - d : dict - Dict of items in the "variables" section of the configuration file. - - """ - def __init__(self, d): - self._raw_data = dict([(k, v) for k, v in d.items()]) - - self._re = {} - self._re_sub = {} - - self._init_parse() - - def _init_parse(self): - for k, v in self._raw_data.items(): - self._init_parse_var(k, v) - - def _init_parse_var(self, name, value): - self._re[name] = re.compile(r'\$\{%s\}' % name) - self._re_sub[name] = value - - def interpolate(self, value): - # Brute force: we keep interpolating until there is no '${var}' anymore - # or until interpolated string is equal to input string - def _interpolate(value): - for k in self._re.keys(): - value = self._re[k].sub(self._re_sub[k], value) - return value - while _VAR.search(value): - nvalue = _interpolate(value) - if nvalue == value: - break - value = nvalue - - return value - - def variables(self): - """ - Return the list of variable names. - - Parameters - ---------- - None - - Returns - ------- - names : list of str - The names of all variables in the `VariableSet` instance. - - """ - return list(self._raw_data.keys()) - - # Emulate a dict to set/get variables values - def __getitem__(self, name): - return self._raw_data[name] - - def __setitem__(self, name, value): - self._raw_data[name] = value - self._init_parse_var(name, value) - -def parse_meta(config): - if not config.has_section('meta'): - raise FormatError("No meta section found !") - - d = dict(config.items('meta')) - - for k in ['name', 'description', 'version']: - if not k in d: - raise FormatError("Option %s (section [meta]) is mandatory, " - "but not found" % k) - - if not 'requires' in d: - d['requires'] = [] - - return d - -def parse_variables(config): - if not config.has_section('variables'): - raise FormatError("No variables section found !") - - d = {} - - for name, value in config.items("variables"): - d[name] = value - - return VariableSet(d) - -def parse_sections(config): - return meta_d, r - -def pkg_to_filename(pkg_name): - return "%s.ini" % pkg_name - -def parse_config(filename, dirs=None): - if dirs: - filenames = [os.path.join(d, filename) for d in dirs] - else: - filenames = [filename] - - config = RawConfigParser() - - n = config.read(filenames) - if not len(n) >= 1: - raise PkgNotFound("Could not find file(s) %s" % str(filenames)) - - # Parse meta and variables sections - meta = parse_meta(config) - - vars = {} - if config.has_section('variables'): - for name, value in config.items("variables"): - vars[name] = _escape_backslash(value) - - # Parse "normal" sections - secs = [s for s in config.sections() if not s in ['meta', 'variables']] - sections = {} - - requires = {} - for s in secs: - d = {} - if config.has_option(s, "requires"): - requires[s] = config.get(s, 'requires') - - for name, value in config.items(s): - d[name] = value - sections[s] = d - - return meta, vars, sections, requires - -def _read_config_imp(filenames, dirs=None): - def _read_config(f): - meta, vars, sections, reqs = parse_config(f, dirs) - # recursively add sections and variables of required libraries - for rname, rvalue in reqs.items(): - nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) - - # Update var dict for variables not in 'top' config file - for k, v in nvars.items(): - if not k in vars: - vars[k] = v - - # Update sec dict - for oname, ovalue in nsections[rname].items(): - if ovalue: - sections[rname][oname] += ' %s' % ovalue - - return meta, vars, sections, reqs - - meta, vars, sections, reqs = _read_config(filenames) - - # FIXME: document this. If pkgname is defined in the variables section, and - # there is no pkgdir variable defined, pkgdir is automatically defined to - # the path of pkgname. This requires the package to be imported to work - if not 'pkgdir' in vars and "pkgname" in vars: - pkgname = vars["pkgname"] - if not pkgname in sys.modules: - raise ValueError("You should import %s to get information on %s" % - (pkgname, meta["name"])) - - mod = sys.modules[pkgname] - vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) - - return LibraryInfo(name=meta["name"], description=meta["description"], - version=meta["version"], sections=sections, vars=VariableSet(vars)) - -# Trivial cache to cache LibraryInfo instances creation. To be really -# efficient, the cache should be handled in read_config, since a same file can -# be parsed many time outside LibraryInfo creation, but I doubt this will be a -# problem in practice -_CACHE = {} -def read_config(pkgname, dirs=None): - """ - Return library info for a package from its configuration file. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of directories - usually including - the NumPy base directory - where to look for npy-pkg-config files. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - misc_util.get_info, misc_util.get_pkg_info - - Examples - -------- - >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') - >>> type(npymath_info) - - >>> print(npymath_info) - Name: npymath - Description: Portable, core math library implementing C99 standard - Requires: - Version: 0.1 #random - - """ - try: - return _CACHE[pkgname] - except KeyError: - v = _read_config_imp(pkg_to_filename(pkgname), dirs) - _CACHE[pkgname] = v - return v - -# TODO: -# - implements version comparison (modversion + atleast) - -# pkg-config simple emulator - useful for debugging, and maybe later to query -# the system -if __name__ == '__main__': - from optparse import OptionParser - import glob - - parser = OptionParser() - parser.add_option("--cflags", dest="cflags", action="store_true", - help="output all preprocessor and compiler flags") - parser.add_option("--libs", dest="libs", action="store_true", - help="output all linker flags") - parser.add_option("--use-section", dest="section", - help="use this section instead of default for options") - parser.add_option("--version", dest="version", action="store_true", - help="output version") - parser.add_option("--atleast-version", dest="min_version", - help="Minimal version") - parser.add_option("--list-all", dest="list_all", action="store_true", - help="Minimal version") - parser.add_option("--define-variable", dest="define_variable", - help="Replace variable with the given value") - - (options, args) = parser.parse_args(sys.argv) - - if len(args) < 2: - raise ValueError("Expect package name on the command line:") - - if options.list_all: - files = glob.glob("*.ini") - for f in files: - info = read_config(f) - print("%s\t%s - %s" % (info.name, info.name, info.description)) - - pkg_name = args[1] - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d: - info = read_config( - pkg_name, ['numpy/_core/lib/npy-pkg-config', '.', d] - ) - else: - info = read_config( - pkg_name, ['numpy/_core/lib/npy-pkg-config', '.'] - ) - - if options.section: - section = options.section - else: - section = "default" - - if options.define_variable: - m = re.search(r'([\S]+)=([\S]+)', options.define_variable) - if not m: - raise ValueError("--define-variable option should be of " - "the form --define-variable=foo=bar") - else: - name = m.group(1) - value = m.group(2) - info.vars[name] = value - - if options.cflags: - print(info.cflags(section)) - if options.libs: - print(info.libs(section)) - if options.version: - print(info.version) - if options.min_version: - print(info.version >= options.min_version) diff --git a/numpy/distutils/numpy_distribution.py b/numpy/distutils/numpy_distribution.py deleted file mode 100644 index ea8182659cb1..000000000000 --- a/numpy/distutils/numpy_distribution.py +++ /dev/null @@ -1,17 +0,0 @@ -# XXX: Handle setuptools ? -from distutils.core import Distribution - -# This class is used because we add new files (sconscripts, and so on) with the -# scons command -class NumpyDistribution(Distribution): - def __init__(self, attrs = None): - # A list of (sconscripts, pre_hook, post_hook, src, parent_names) - self.scons_data = [] - # A list of installable libraries - self.installed_libraries = [] - # A dict of pkg_config files to generate/install - self.installed_pkg_config = {} - Distribution.__init__(self, attrs) - - def has_scons_scripts(self): - return bool(self.scons_data) diff --git a/numpy/distutils/pathccompiler.py b/numpy/distutils/pathccompiler.py deleted file mode 100644 index 1f879edf4d21..000000000000 --- a/numpy/distutils/pathccompiler.py +++ /dev/null @@ -1,21 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class PathScaleCCompiler(UnixCCompiler): - - """ - PathScale compiler compatible with a gcc built Python. - """ - - compiler_type = 'pathcc' - cc_exe = 'pathcc' - cxx_exe = 'pathCC' - - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler, - compiler_so=cc_compiler, - compiler_cxx=cxx_compiler, - linker_exe=cc_compiler, - linker_so=cc_compiler + ' -shared') diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py deleted file mode 100644 index e428b47f08d4..000000000000 --- a/numpy/distutils/system_info.py +++ /dev/null @@ -1,3267 +0,0 @@ -""" -This file defines a set of system_info classes for getting -information about various resources (libraries, library directories, -include directories, etc.) in the system. Usage: - info_dict = get_info() - where is a string 'atlas','x11','fftw','lapack','blas', - 'lapack_src', 'blas_src', etc. For a complete list of allowed names, - see the definition of get_info() function below. - - Returned info_dict is a dictionary which is compatible with - distutils.setup keyword arguments. If info_dict == {}, then the - asked resource is not available (system_info could not find it). - - Several *_info classes specify an environment variable to specify - the locations of software. When setting the corresponding environment - variable to 'None' then the software will be ignored, even when it - is available in system. - -Global parameters: - system_info.search_static_first - search static libraries (.a) - in precedence to shared ones (.so, .sl) if enabled. - system_info.verbosity - output the results to stdout if enabled. - -The file 'site.cfg' is looked for in - -1) Directory of main setup.py file being run. -2) Home directory of user running the setup.py file as ~/.numpy-site.cfg -3) System wide directory (location of this file...) - -The first one found is used to get system configuration options The -format is that used by ConfigParser (i.e., Windows .INI style). The -section ALL is not intended for general use. - -Appropriate defaults are used if nothing is specified. - -The order of finding the locations of resources is the following: - 1. environment variable - 2. section in site.cfg - 3. DEFAULT section in site.cfg - 4. System default search paths (see ``default_*`` variables below). -Only the first complete match is returned. - -Currently, the following classes are available, along with their section names: - - Numeric_info:Numeric - _numpy_info:Numeric - _pkg_config_info:None - accelerate_info:accelerate - accelerate_lapack_info:accelerate - agg2_info:agg2 - amd_info:amd - atlas_3_10_blas_info:atlas - atlas_3_10_blas_threads_info:atlas - atlas_3_10_info:atlas - atlas_3_10_threads_info:atlas - atlas_blas_info:atlas - atlas_blas_threads_info:atlas - atlas_info:atlas - atlas_threads_info:atlas - blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix) - blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS) - blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix) - blas_info:blas - blas_mkl_info:mkl - blas_ssl2_info:ssl2 - blas_opt_info:ALL # usage recommended - blas_src_info:blas_src - blis_info:blis - boost_python_info:boost_python - dfftw_info:fftw - dfftw_threads_info:fftw - djbfft_info:djbfft - f2py_info:ALL - fft_opt_info:ALL - fftw2_info:fftw - fftw3_info:fftw3 - fftw_info:fftw - fftw_threads_info:fftw - flame_info:flame - freetype2_info:freetype2 - gdk_2_info:gdk_2 - gdk_info:gdk - gdk_pixbuf_2_info:gdk_pixbuf_2 - gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2 - gdk_x11_2_info:gdk_x11_2 - gtkp_2_info:gtkp_2 - gtkp_x11_2_info:gtkp_x11_2 - lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix) - lapack_atlas_3_10_info:atlas - lapack_atlas_3_10_threads_info:atlas - lapack_atlas_info:atlas - lapack_atlas_threads_info:atlas - lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK) - lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix) - lapack_info:lapack - lapack_mkl_info:mkl - lapack_ssl2_info:ssl2 - lapack_opt_info:ALL # usage recommended - lapack_src_info:lapack_src - mkl_info:mkl - ssl2_info:ssl2 - numarray_info:numarray - numerix_info:numerix - numpy_info:numpy - openblas64__info:openblas64_ - openblas64__lapack_info:openblas64_ - openblas_clapack_info:openblas - openblas_ilp64_info:openblas_ilp64 - openblas_ilp64_lapack_info:openblas_ilp64 - openblas_info:openblas - openblas_lapack_info:openblas - sfftw_info:fftw - sfftw_threads_info:fftw - system_info:ALL - umfpack_info:umfpack - wx_info:wx - x11_info:x11 - xft_info:xft - -Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER -and NPY_LAPACK_ORDER environment variables to determine the order in which -specific BLAS and LAPACK libraries are searched for. - -This search (or autodetection) can be bypassed by defining the environment -variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the -exact linker flags to use (language will be set to F77). Building against -Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK -implementations at runtime. If using this to build NumPy itself, it is -recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a -CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized -otherwise). - -Example: ----------- -[DEFAULT] -# default section -library_dirs = /usr/lib:/usr/local/lib:/opt/lib -include_dirs = /usr/include:/usr/local/include:/opt/include -src_dirs = /usr/local/src:/opt/src -# search static libraries (.a) in preference to shared ones (.so) -search_static_first = 0 - -[fftw] -libraries = rfftw, fftw - -[atlas] -library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas -# for overriding the names of the atlas libraries -libraries = lapack, f77blas, cblas, atlas - -[x11] -library_dirs = /usr/X11R6/lib -include_dirs = /usr/X11R6/include ----------- - -Note that the ``libraries`` key is the default setting for libraries. - -Authors: - Pearu Peterson , February 2002 - David M. Cooke , April 2002 - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -""" -import sys -import os -import re -import copy -import warnings -import subprocess -import textwrap - -from glob import glob -from functools import reduce -from configparser import NoOptionError -from configparser import RawConfigParser as ConfigParser -# It seems that some people are importing ConfigParser from here so is -# good to keep its class name. Use of RawConfigParser is needed in -# order to be able to load path names with percent in them, like -# `feature%2Fcool` which is common on git flow branch names. - -from distutils.errors import DistutilsError -from distutils.dist import Distribution -import sysconfig -from numpy.distutils import log -from distutils.util import get_platform - -from numpy.distutils.exec_command import ( - find_executable, filepath_from_subprocess_output, - ) -from numpy.distutils.misc_util import (is_sequence, is_string, - get_shared_lib_extension) -from numpy.distutils.command.config import config as cmd_config -from numpy.distutils import customized_ccompiler as _customized_ccompiler -from numpy.distutils import _shell_utils -import distutils.ccompiler -import tempfile -import shutil - -__all__ = ['system_info'] - -# Determine number of bits -import platform -_bits = {'32bit': 32, '64bit': 64} -platform_bits = _bits[platform.architecture()[0]] - - -global_compiler = None - -def customized_ccompiler(): - global global_compiler - if not global_compiler: - global_compiler = _customized_ccompiler() - return global_compiler - - -def _c_string_literal(s): - """ - Convert a python string into a literal suitable for inclusion into C code - """ - # only these three characters are forbidden in C strings - s = s.replace('\\', r'\\') - s = s.replace('"', r'\"') - s = s.replace('\n', r'\n') - return '"{}"'.format(s) - - -def libpaths(paths, bits): - """Return a list of library paths valid on 32 or 64 bit systems. - - Inputs: - paths : sequence - A sequence of strings (typically paths) - bits : int - An integer, the only valid values are 32 or 64. A ValueError exception - is raised otherwise. - - Examples: - - Consider a list of directories - >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] - - For a 32-bit platform, this is already valid: - >>> np.distutils.system_info.libpaths(paths,32) - ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] - - On 64 bits, we prepend the '64' postfix - >>> np.distutils.system_info.libpaths(paths,64) - ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', - '/usr/lib64', '/usr/lib'] - """ - if bits not in (32, 64): - raise ValueError("Invalid bit size in libpaths: 32 or 64 only") - - # Handle 32bit case - if bits == 32: - return paths - - # Handle 64bit case - out = [] - for p in paths: - out.extend([p + '64', p]) - - return out - - -if sys.platform == 'win32': - default_lib_dirs = ['C:\\', - os.path.join(sysconfig.get_config_var('exec_prefix'), - 'libs')] - default_runtime_dirs = [] - default_include_dirs = [] - default_src_dirs = ['.'] - default_x11_lib_dirs = [] - default_x11_include_dirs = [] - _include_dirs = [ - 'include', - 'include/suitesparse', - ] - _lib_dirs = [ - 'lib', - ] - - _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] - _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] - def add_system_root(library_root): - """Add a package manager root to the include directories""" - global default_lib_dirs - global default_include_dirs - - library_root = os.path.normpath(library_root) - - default_lib_dirs.extend( - os.path.join(library_root, d) for d in _lib_dirs) - default_include_dirs.extend( - os.path.join(library_root, d) for d in _include_dirs) - - # VCpkg is the de-facto package manager on windows for C/C++ - # libraries. If it is on the PATH, then we append its paths here. - vcpkg = shutil.which('vcpkg') - if vcpkg: - vcpkg_dir = os.path.dirname(vcpkg) - if platform.architecture()[0] == '32bit': - specifier = 'x86' - else: - specifier = 'x64' - - vcpkg_installed = os.path.join(vcpkg_dir, 'installed') - for vcpkg_root in [ - os.path.join(vcpkg_installed, specifier + '-windows'), - os.path.join(vcpkg_installed, specifier + '-windows-static'), - ]: - add_system_root(vcpkg_root) - - # Conda is another popular package manager that provides libraries - conda = shutil.which('conda') - if conda: - conda_dir = os.path.dirname(conda) - add_system_root(os.path.join(conda_dir, '..', 'Library')) - add_system_root(os.path.join(conda_dir, 'Library')) - -else: - default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', - '/opt/local/lib', '/sw/lib'], platform_bits) - default_runtime_dirs = [] - default_include_dirs = ['/usr/local/include', - '/opt/include', - # path of umfpack under macports - '/opt/local/include/ufsparse', - '/opt/local/include', '/sw/include', - '/usr/include/suitesparse'] - default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] - - default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', - '/usr/lib'], platform_bits) - default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include'] - - if os.path.exists('/usr/lib/X11'): - globbed_x11_dir = glob('/usr/lib/*/libX11.so') - if globbed_x11_dir: - x11_so_dir = os.path.split(globbed_x11_dir[0])[0] - default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) - default_x11_include_dirs.extend(['/usr/lib/X11/include', - '/usr/include/X11']) - - with open(os.devnull, 'w') as tmp: - try: - p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE, - stderr=tmp) - except (OSError, DistutilsError): - # OSError if gcc is not installed, or SandboxViolation (DistutilsError - # subclass) if an old setuptools bug is triggered (see gh-3160). - pass - else: - triplet = str(p.communicate()[0].decode().strip()) - if p.returncode == 0: - # gcc supports the "-print-multiarch" option - default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] - default_lib_dirs += [os.path.join("/usr/lib/", triplet)] - - -if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: - default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) - default_include_dirs.append(os.path.join(sys.prefix, 'include')) - default_src_dirs.append(os.path.join(sys.prefix, 'src')) - -default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] -default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] -default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] -default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] - -so_ext = get_shared_lib_extension() - - -def get_standard_file(fname): - """Returns a list of files named 'fname' from - 1) System-wide directory (directory-location of this module) - 2) Users HOME directory (os.environ['HOME']) - 3) Local directory - """ - # System-wide file - filenames = [] - try: - f = __file__ - except NameError: - f = sys.argv[0] - sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], - fname) - if os.path.isfile(sysfile): - filenames.append(sysfile) - - # Home directory - # And look for the user config file - try: - f = os.path.expanduser('~') - except KeyError: - pass - else: - user_file = os.path.join(f, fname) - if os.path.isfile(user_file): - filenames.append(user_file) - - # Local file - if os.path.isfile(fname): - filenames.append(os.path.abspath(fname)) - - return filenames - - -def _parse_env_order(base_order, env): - """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order` - - This method will sequence the environment variable and check for their - individual elements in `base_order`. - - The items in the environment variable may be negated via '^item' or '!itema,itemb'. - It must start with ^/! to negate all options. - - Raises - ------ - ValueError: for mixed negated and non-negated orders or multiple negated orders - - Parameters - ---------- - base_order : list of str - the base list of orders - env : str - the environment variable to be parsed, if none is found, `base_order` is returned - - Returns - ------- - allow_order : list of str - allowed orders in lower-case - unknown_order : list of str - for values not overlapping with `base_order` - """ - order_str = os.environ.get(env, None) - - # ensure all base-orders are lower-case (for easier comparison) - base_order = [order.lower() for order in base_order] - if order_str is None: - return base_order, [] - - neg = order_str.startswith(('^', '!')) - # Check format - order_str_l = list(order_str) - sum_neg = order_str_l.count('^') + order_str_l.count('!') - if neg: - if sum_neg > 1: - raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}") - # remove prefix - order_str = order_str[1:] - elif sum_neg > 0: - raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}") - - # Split and lower case - orders = order_str.lower().split(',') - - # to inform callee about non-overlapping elements - unknown_order = [] - - # if negated, we have to remove from the order - if neg: - allow_order = base_order.copy() - - for order in orders: - if not order: - continue - - if order not in base_order: - unknown_order.append(order) - continue - - if order in allow_order: - allow_order.remove(order) - - else: - allow_order = [] - - for order in orders: - if not order: - continue - - if order not in base_order: - unknown_order.append(order) - continue - - if order not in allow_order: - allow_order.append(order) - - return allow_order, unknown_order - - -def get_info(name, notfound_action=0): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'armpl': armpl_info, - 'blas_armpl': blas_armpl_info, - 'lapack_armpl': lapack_armpl_info, - 'fftw3_armpl': fftw3_armpl_info, - 'atlas': atlas_info, # use lapack_opt or blas_opt instead - 'atlas_threads': atlas_threads_info, # ditto - 'atlas_blas': atlas_blas_info, - 'atlas_blas_threads': atlas_blas_threads_info, - 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead - 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto - 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead - 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto - 'atlas_3_10_blas': atlas_3_10_blas_info, - 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, - 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead - 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto - 'flame': flame_info, # use lapack_opt instead - 'mkl': mkl_info, - 'ssl2': ssl2_info, - # openblas which may or may not have embedded lapack - 'openblas': openblas_info, # use blas_opt instead - # openblas with embedded lapack - 'openblas_lapack': openblas_lapack_info, # use blas_opt instead - 'openblas_clapack': openblas_clapack_info, # use blas_opt instead - 'blis': blis_info, # use blas_opt instead - 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead - 'blas_mkl': blas_mkl_info, # use blas_opt instead - 'lapack_ssl2': lapack_ssl2_info, - 'blas_ssl2': blas_ssl2_info, - 'accelerate': accelerate_info, # use blas_opt instead - 'accelerate_lapack': accelerate_lapack_info, - 'openblas64_': openblas64__info, - 'openblas64__lapack': openblas64__lapack_info, - 'openblas_ilp64': openblas_ilp64_info, - 'openblas_ilp64_lapack': openblas_ilp64_lapack_info, - 'x11': x11_info, - 'fft_opt': fft_opt_info, - 'fftw': fftw_info, - 'fftw2': fftw2_info, - 'fftw3': fftw3_info, - 'dfftw': dfftw_info, - 'sfftw': sfftw_info, - 'fftw_threads': fftw_threads_info, - 'dfftw_threads': dfftw_threads_info, - 'sfftw_threads': sfftw_threads_info, - 'djbfft': djbfft_info, - 'blas': blas_info, # use blas_opt instead - 'lapack': lapack_info, # use lapack_opt instead - 'lapack_src': lapack_src_info, - 'blas_src': blas_src_info, - 'numpy': numpy_info, - 'f2py': f2py_info, - 'Numeric': Numeric_info, - 'numeric': Numeric_info, - 'numarray': numarray_info, - 'numerix': numerix_info, - 'lapack_opt': lapack_opt_info, - 'lapack_ilp64_opt': lapack_ilp64_opt_info, - 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info, - 'lapack64__opt': lapack64__opt_info, - 'blas_opt': blas_opt_info, - 'blas_ilp64_opt': blas_ilp64_opt_info, - 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info, - 'blas64__opt': blas64__opt_info, - 'boost_python': boost_python_info, - 'agg2': agg2_info, - 'wx': wx_info, - 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, - 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, - 'gdk_pixbuf_2': gdk_pixbuf_2_info, - 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, - 'gdk': gdk_info, - 'gdk_2': gdk_2_info, - 'gdk-2.0': gdk_2_info, - 'gdk_x11_2': gdk_x11_2_info, - 'gdk-x11-2.0': gdk_x11_2_info, - 'gtkp_x11_2': gtkp_x11_2_info, - 'gtk+-x11-2.0': gtkp_x11_2_info, - 'gtkp_2': gtkp_2_info, - 'gtk+-2.0': gtkp_2_info, - 'xft': xft_info, - 'freetype2': freetype2_info, - 'umfpack': umfpack_info, - 'amd': amd_info, - }.get(name.lower(), system_info) - return cl().get_info(notfound_action) - - -class NotFoundError(DistutilsError): - """Some third-party program or library is not found.""" - - -class AliasedOptionError(DistutilsError): - """ - Aliases entries in config files should not be existing. - In section '{section}' we found multiple appearances of options {options}.""" - - -class AtlasNotFoundError(NotFoundError): - """ - Atlas (http://github.com/math-atlas/math-atlas) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [atlas]) or by setting - the ATLAS environment variable.""" - - -class FlameNotFoundError(NotFoundError): - """ - FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [flame]).""" - - -class LapackNotFoundError(NotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [lapack]) or by setting - the LAPACK environment variable.""" - - -class LapackSrcNotFoundError(LapackNotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [lapack_src]) or by setting - the LAPACK_SRC environment variable.""" - - -class LapackILP64NotFoundError(NotFoundError): - """ - 64-bit Lapack libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasOptNotFoundError(NotFoundError): - """ - Optimized (vendor) Blas libraries are not found. - Falls back to netlib Blas library which has worse performance. - A better performance should be easily gained by switching - Blas library.""" - -class BlasNotFoundError(NotFoundError): - """ - Blas (http://www.netlib.org/blas/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [blas]) or by setting - the BLAS environment variable.""" - -class BlasILP64NotFoundError(NotFoundError): - """ - 64-bit Blas libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasSrcNotFoundError(BlasNotFoundError): - """ - Blas (http://www.netlib.org/blas/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [blas_src]) or by setting - the BLAS_SRC environment variable.""" - - -class FFTWNotFoundError(NotFoundError): - """ - FFTW (http://www.fftw.org/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [fftw]) or by setting - the FFTW environment variable.""" - - -class DJBFFTNotFoundError(NotFoundError): - """ - DJBFFT (https://cr.yp.to/djbfft.html) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [djbfft]) or by setting - the DJBFFT environment variable.""" - - -class NumericNotFoundError(NotFoundError): - """ - Numeric (https://www.numpy.org/) module not found. - Get it from above location, install it, and retry setup.py.""" - - -class X11NotFoundError(NotFoundError): - """X11 libraries not found.""" - - -class UmfpackNotFoundError(NotFoundError): - """ - UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/) - not found. Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [umfpack]) or by setting - the UMFPACK environment variable.""" - - -class system_info: - - """ get_info() is the only public method. Don't use others. - """ - dir_env_var = None - # XXX: search_static_first is disabled by default, may disappear in - # future unless it is proved to be useful. - search_static_first = 0 - # The base-class section name is a random word "ALL" and is not really - # intended for general use. It cannot be None nor can it be DEFAULT as - # these break the ConfigParser. See gh-15338 - section = 'ALL' - saved_results = {} - - notfounderror = NotFoundError - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), - 'include_dirs': os.pathsep.join(default_include_dirs), - 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), - 'rpath': '', - 'src_dirs': os.pathsep.join(default_src_dirs), - 'search_static_first': str(self.search_static_first), - 'extra_compile_args': '', 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - self.files = [] - self.files.extend(get_standard_file('.numpy-site.cfg')) - self.files.extend(get_standard_file('site.cfg')) - self.parse_config_files() - - if self.section is not None: - self.search_static_first = self.cp.getboolean( - self.section, 'search_static_first') - assert isinstance(self.search_static_first, int) - - def parse_config_files(self): - self.cp.read(self.files) - if not self.cp.has_section(self.section): - if self.section is not None: - self.cp.add_section(self.section) - - def calc_libraries_info(self): - libs = self.get_libraries() - dirs = self.get_lib_dirs() - # The extensions use runtime_library_dirs - r_dirs = self.get_runtime_lib_dirs() - # Intrinsic distutils use rpath, we simply append both entries - # as though they were one entry - r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) - info = {} - for lib in libs: - i = self.check_libs(dirs, [lib]) - if i is not None: - dict_append(info, **i) - else: - log.info('Library %s was not found. Ignoring' % (lib)) - - if r_dirs: - i = self.check_libs(r_dirs, [lib]) - if i is not None: - # Swap library keywords found to runtime_library_dirs - # the libraries are insisting on the user having defined - # them using the library_dirs, and not necessarily by - # runtime_library_dirs - del i['libraries'] - i['runtime_library_dirs'] = i.pop('library_dirs') - dict_append(info, **i) - else: - log.info('Runtime library %s was not found. Ignoring' % (lib)) - - return info - - def set_info(self, **info): - if info: - lib_info = self.calc_libraries_info() - dict_append(info, **lib_info) - # Update extra information - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - self.saved_results[self.__class__.__name__] = info - - def get_option_single(self, *options): - """ Ensure that only one of `options` are found in the section - - Parameters - ---------- - *options : list of str - a list of options to be found in the section (``self.section``) - - Returns - ------- - str : - the option that is uniquely found in the section - - Raises - ------ - AliasedOptionError : - in case more than one of the options are found - """ - found = [self.cp.has_option(self.section, opt) for opt in options] - if sum(found) == 1: - return options[found.index(True)] - elif sum(found) == 0: - # nothing is found anyways - return options[0] - - # Else we have more than 1 key found - if AliasedOptionError.__doc__ is None: - raise AliasedOptionError() - raise AliasedOptionError(AliasedOptionError.__doc__.format( - section=self.section, options='[{}]'.format(', '.join(options)))) - - - def has_info(self): - return self.__class__.__name__ in self.saved_results - - def calc_extra_info(self): - """ Updates the information in the current information with - respect to these flags: - extra_compile_args - extra_link_args - """ - info = {} - for key in ['extra_compile_args', 'extra_link_args']: - # Get values - opt = self.cp.get(self.section, key) - opt = _shell_utils.NativeParser.split(opt) - if opt: - tmp = {key: opt} - dict_append(info, **tmp) - return info - - def get_info(self, notfound_action=0): - """ Return a dictionary with items that are compatible - with numpy.distutils.setup keyword arguments. - """ - flag = 0 - if not self.has_info(): - flag = 1 - log.info(self.__class__.__name__ + ':') - if hasattr(self, 'calc_info'): - self.calc_info() - if notfound_action: - if not self.has_info(): - if notfound_action == 1: - warnings.warn(self.notfounderror.__doc__, stacklevel=2) - elif notfound_action == 2: - raise self.notfounderror(self.notfounderror.__doc__) - else: - raise ValueError(repr(notfound_action)) - - if not self.has_info(): - log.info(' NOT AVAILABLE') - self.set_info() - else: - log.info(' FOUND:') - - res = self.saved_results.get(self.__class__.__name__) - if log.get_threshold() <= log.INFO and flag: - for k, v in res.items(): - v = str(v) - if k in ['sources', 'libraries'] and len(v) > 270: - v = v[:120] + '...\n...\n...' + v[-120:] - log.info(' %s = %s', k, v) - log.info('') - - return copy.deepcopy(res) - - def get_paths(self, section, key): - dirs = self.cp.get(section, key).split(os.pathsep) - env_var = self.dir_env_var - if env_var: - if is_sequence(env_var): - e0 = env_var[-1] - for e in env_var: - if e in os.environ: - e0 = e - break - if not env_var[0] == e0: - log.info('Setting %s=%s' % (env_var[0], e0)) - env_var = e0 - if env_var and env_var in os.environ: - d = os.environ[env_var] - if d == 'None': - log.info('Disabled %s: %s', - self.__class__.__name__, '(%s is None)' - % (env_var,)) - return [] - if os.path.isfile(d): - dirs = [os.path.dirname(d)] + dirs - l = getattr(self, '_lib_names', []) - if len(l) == 1: - b = os.path.basename(d) - b = os.path.splitext(b)[0] - if b[:3] == 'lib': - log.info('Replacing _lib_names[0]==%r with %r' \ - % (self._lib_names[0], b[3:])) - self._lib_names[0] = b[3:] - else: - ds = d.split(os.pathsep) - ds2 = [] - for d in ds: - if os.path.isdir(d): - ds2.append(d) - for dd in ['include', 'lib']: - d1 = os.path.join(d, dd) - if os.path.isdir(d1): - ds2.append(d1) - dirs = ds2 + dirs - default_dirs = self.cp.get(self.section, key).split(os.pathsep) - dirs.extend(default_dirs) - ret = [] - for d in dirs: - if len(d) > 0 and not os.path.isdir(d): - warnings.warn('Specified path %s is invalid.' % d, stacklevel=2) - continue - - if d not in ret: - ret.append(d) - - log.debug('( %s = %s )', key, ':'.join(ret)) - return ret - - def get_lib_dirs(self, key='library_dirs'): - return self.get_paths(self.section, key) - - def get_runtime_lib_dirs(self, key='runtime_library_dirs'): - path = self.get_paths(self.section, key) - if path == ['']: - path = [] - return path - - def get_include_dirs(self, key='include_dirs'): - return self.get_paths(self.section, key) - - def get_src_dirs(self, key='src_dirs'): - return self.get_paths(self.section, key) - - def get_libs(self, key, default): - try: - libs = self.cp.get(self.section, key) - except NoOptionError: - if not default: - return [] - if is_string(default): - return [default] - return default - return [b for b in [a.strip() for a in libs.split(',')] if b] - - def get_libraries(self, key='libraries'): - if hasattr(self, '_lib_names'): - return self.get_libs(key, default=self._lib_names) - else: - return self.get_libs(key, '') - - def library_extensions(self): - c = customized_ccompiler() - static_exts = [] - if c.compiler_type != 'msvc': - # MSVC doesn't understand binutils - static_exts.append('.a') - if sys.platform == 'win32': - static_exts.append('.lib') # .lib is used by MSVC and others - if self.search_static_first: - exts = static_exts + [so_ext] - else: - exts = [so_ext] + static_exts - if sys.platform == 'cygwin': - exts.append('.dll.a') - if sys.platform == 'darwin': - exts.append('.dylib') - return exts - - def check_libs(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks for all libraries as shared libraries first, then - static (or vice versa if self.search_static_first is True). - """ - exts = self.library_extensions() - info = None - for ext in exts: - info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) - if info is not None: - break - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - return info - - def check_libs2(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks each library for shared or static. - """ - exts = self.library_extensions() - info = self._check_libs(lib_dirs, libs, opt_libs, exts) - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - - return info - - def _find_lib(self, lib_dir, lib, exts): - assert is_string(lib_dir) - # under windows first try without 'lib' prefix - if sys.platform == 'win32': - lib_prefixes = ['', 'lib'] - else: - lib_prefixes = ['lib'] - # for each library name, see if we can find a file for it. - for ext in exts: - for prefix in lib_prefixes: - p = self.combine_paths(lib_dir, prefix + lib + ext) - if p: - break - if p: - assert len(p) == 1 - # ??? splitext on p[0] would do this for cygwin - # doesn't seem correct - if ext == '.dll.a': - lib += '.dll' - if ext == '.lib': - lib = prefix + lib - return lib - - return False - - def _find_libs(self, lib_dirs, libs, exts): - # make sure we preserve the order of libs, as it can be important - found_dirs, found_libs = [], [] - for lib in libs: - for lib_dir in lib_dirs: - found_lib = self._find_lib(lib_dir, lib, exts) - if found_lib: - found_libs.append(found_lib) - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - break - return found_dirs, found_libs - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Find mandatory and optional libs in expected paths. - - Missing optional libraries are silently forgotten. - """ - if not is_sequence(lib_dirs): - lib_dirs = [lib_dirs] - # First, try to find the mandatory libraries - found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts) - if len(found_libs) > 0 and len(found_libs) == len(libs): - # Now, check for optional libraries - opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts) - found_libs.extend(opt_found_libs) - for lib_dir in opt_found_dirs: - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - info = {'libraries': found_libs, 'library_dirs': found_dirs} - return info - else: - return None - - def combine_paths(self, *args): - """Return a list of existing paths composed by all combinations - of items from the arguments. - """ - return combine_paths(*args) - - -class fft_opt_info(system_info): - - def calc_info(self): - info = {} - fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') - djbfft_info = get_info('djbfft') - if fftw_info: - dict_append(info, **fftw_info) - if djbfft_info: - dict_append(info, **djbfft_info) - self.set_info(**info) - return - - -class fftw_info(system_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - {'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]}] - - def calc_ver_info(self, ver_param): - """Returns True on successful version detection, else False""" - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - - opt = self.get_option_single(self.section + '_libs', 'libraries') - libs = self.get_libs(opt, ver_param['libs']) - info = self.check_libs(lib_dirs, libs) - if info is not None: - flag = 0 - for d in incl_dirs: - if len(self.combine_paths(d, ver_param['includes'])) \ - == len(ver_param['includes']): - dict_append(info, include_dirs=[d]) - flag = 1 - break - if flag: - dict_append(info, define_macros=ver_param['macros']) - else: - info = None - if info is not None: - self.set_info(**info) - return True - else: - log.info(' %s not found' % (ver_param['name'])) - return False - - def calc_info(self): - for i in self.ver_info: - if self.calc_ver_info(i): - break - - -class fftw2_info(fftw_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]} - ] - - -class fftw3_info(fftw_info): - #variables to override - section = 'fftw3' - dir_env_var = 'FFTW3' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - ] - - -class fftw3_armpl_info(fftw_info): - section = 'fftw3' - dir_env_var = 'ARMPL_DIR' - notfounderror = FFTWNotFoundError - ver_info = [{'name': 'fftw3', - 'libs': ['armpl_lp64_mp'], - 'includes': ['fftw3.h'], - 'macros': [('SCIPY_FFTW3_H', None)]}] - - -class dfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw', - 'libs':['drfftw', 'dfftw'], - 'includes':['dfftw.h', 'drfftw.h'], - 'macros':[('SCIPY_DFFTW_H', None)]}] - - -class sfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw', - 'libs':['srfftw', 'sfftw'], - 'includes':['sfftw.h', 'srfftw.h'], - 'macros':[('SCIPY_SFFTW_H', None)]}] - - -class fftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'fftw threads', - 'libs':['rfftw_threads', 'fftw_threads'], - 'includes':['fftw_threads.h', 'rfftw_threads.h'], - 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] - - -class dfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw threads', - 'libs':['drfftw_threads', 'dfftw_threads'], - 'includes':['dfftw_threads.h', 'drfftw_threads.h'], - 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] - - -class sfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw threads', - 'libs':['srfftw_threads', 'sfftw_threads'], - 'includes':['sfftw_threads.h', 'srfftw_threads.h'], - 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] - - -class djbfft_info(system_info): - section = 'djbfft' - dir_env_var = 'DJBFFT' - notfounderror = DJBFFTNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - info = None - for d in lib_dirs: - p = self.combine_paths(d, ['djbfft.a']) - if p: - info = {'extra_objects': p} - break - p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) - if p: - info = {'libraries': ['djbfft'], 'library_dirs': [d]} - break - if info is None: - return - for d in incl_dirs: - if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: - dict_append(info, include_dirs=[d], - define_macros=[('SCIPY_DJBFFT_H', None)]) - self.set_info(**info) - return - return - - -class mkl_info(system_info): - section = 'mkl' - dir_env_var = 'MKLROOT' - _lib_mkl = ['mkl_rt'] - - def get_mkl_rootdir(self): - mklroot = os.environ.get('MKLROOT', None) - if mklroot is not None: - return mklroot - paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) - ld_so_conf = '/etc/ld.so.conf' - if os.path.isfile(ld_so_conf): - with open(ld_so_conf) as f: - for d in f: - d = d.strip() - if d: - paths.append(d) - intel_mkl_dirs = [] - for path in paths: - path_atoms = path.split(os.sep) - for m in path_atoms: - if m.startswith('mkl'): - d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) - intel_mkl_dirs.append(d) - break - for d in paths: - dirs = glob(os.path.join(d, 'mkl', '*')) - dirs += glob(os.path.join(d, 'mkl*')) - for sub_dir in dirs: - if os.path.isdir(os.path.join(sub_dir, 'lib')): - return sub_dir - return None - - def __init__(self): - mklroot = self.get_mkl_rootdir() - if mklroot is None: - system_info.__init__(self) - else: - from .cpuinfo import cpu - if cpu.is_Itanium(): - plt = '64' - elif cpu.is_Intel() and cpu.is_64bit(): - plt = 'intel64' - else: - plt = '32' - system_info.__init__( - self, - default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], - default_include_dirs=[os.path.join(mklroot, 'include')]) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - opt = self.get_option_single('mkl_libs', 'libraries') - mkl_libs = self.get_libs(opt, self._lib_mkl) - info = self.check_libs2(lib_dirs, mkl_libs) - if info is None: - return - dict_append(info, - define_macros=[('SCIPY_MKL_H', None), - ('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - if sys.platform == 'win32': - pass # win32 has no pthread library - else: - dict_append(info, libraries=['pthread']) - self.set_info(**info) - - -class lapack_mkl_info(mkl_info): - pass - - -class blas_mkl_info(mkl_info): - pass - - -class ssl2_info(system_info): - section = 'ssl2' - dir_env_var = 'SSL2_DIR' - # Multi-threaded version. Python itself must be built by Fujitsu compiler. - _lib_ssl2 = ['fjlapackexsve'] - # Single-threaded version - #_lib_ssl2 = ['fjlapacksve'] - - def get_tcsds_rootdir(self): - tcsdsroot = os.environ.get('TCSDS_PATH', None) - if tcsdsroot is not None: - return tcsdsroot - return None - - def __init__(self): - tcsdsroot = self.get_tcsds_rootdir() - if tcsdsroot is None: - system_info.__init__(self) - else: - system_info.__init__( - self, - default_lib_dirs=[os.path.join(tcsdsroot, 'lib64')], - default_include_dirs=[os.path.join(tcsdsroot, - 'clang-comp/include')]) - - def calc_info(self): - tcsdsroot = self.get_tcsds_rootdir() - - lib_dirs = self.get_lib_dirs() - if lib_dirs is None: - lib_dirs = os.path.join(tcsdsroot, 'lib64') - - incl_dirs = self.get_include_dirs() - if incl_dirs is None: - incl_dirs = os.path.join(tcsdsroot, 'clang-comp/include') - - ssl2_libs = self.get_libs('ssl2_libs', self._lib_ssl2) - - info = self.check_libs2(lib_dirs, ssl2_libs) - if info is None: - return - dict_append(info, - define_macros=[('HAVE_CBLAS', None), - ('HAVE_SSL2', 1)], - include_dirs=incl_dirs,) - self.set_info(**info) - - -class lapack_ssl2_info(ssl2_info): - pass - - -class blas_ssl2_info(ssl2_info): - pass - - - -class armpl_info(system_info): - section = 'armpl' - dir_env_var = 'ARMPL_DIR' - _lib_armpl = ['armpl_lp64_mp'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - armpl_libs = self.get_libs('armpl_libs', self._lib_armpl) - info = self.check_libs2(lib_dirs, armpl_libs) - if info is None: - return - dict_append(info, - define_macros=[('SCIPY_MKL_H', None), - ('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - self.set_info(**info) - -class lapack_armpl_info(armpl_info): - pass - -class blas_armpl_info(armpl_info): - pass - - -class atlas_info(system_info): - section = 'atlas' - dir_env_var = 'ATLAS' - _lib_names = ['f77blas', 'cblas'] - if sys.platform[:7] == 'freebsd': - _lib_atlas = ['atlas_r'] - _lib_lapack = ['alapack_r'] - else: - _lib_atlas = ['atlas'] - _lib_lapack = ['lapack'] - - notfounderror = AtlasNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', - 'sse', '3dnow', 'sse2']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) - atlas = None - lapack = None - atlas_1 = None - for d in lib_dirs: - atlas = self.check_libs2(d, atlas_libs, []) - if atlas is not None: - lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) - lapack = self.check_libs2(lib_dirs2, lapack_libs, []) - if lapack is not None: - break - if atlas: - atlas_1 = atlas - log.info(self.__class__) - if atlas is None: - atlas = atlas_1 - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - if lapack is not None: - dict_append(info, **lapack) - dict_append(info, **atlas) - elif 'lapack_atlas' in atlas['libraries']: - dict_append(info, **atlas) - dict_append(info, - define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) - self.set_info(**info) - return - else: - dict_append(info, **atlas) - dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) - message = textwrap.dedent(""" - ********************************************************************* - Could not find lapack library within the ATLAS installation. - ********************************************************************* - """) - warnings.warn(message, stacklevel=2) - self.set_info(**info) - return - - # Check if lapack library is complete, only warn if it is not. - lapack_dir = lapack['library_dirs'][0] - lapack_name = lapack['libraries'][0] - lapack_lib = None - lib_prefixes = ['lib'] - if sys.platform == 'win32': - lib_prefixes.append('') - for e in self.library_extensions(): - for prefix in lib_prefixes: - fn = os.path.join(lapack_dir, prefix + lapack_name + e) - if os.path.exists(fn): - lapack_lib = fn - break - if lapack_lib: - break - if lapack_lib is not None: - sz = os.stat(lapack_lib)[6] - if sz <= 4000 * 1024: - message = textwrap.dedent(""" - ********************************************************************* - Lapack library (from ATLAS) is probably incomplete: - size of %s is %sk (expected >4000k) - - Follow the instructions in the KNOWN PROBLEMS section of the file - numpy/INSTALL.txt. - ********************************************************************* - """) % (lapack_lib, sz / 1024) - warnings.warn(message, stacklevel=2) - else: - info['language'] = 'f77' - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(info, **atlas_extra_info) - - self.set_info(**info) - - -class atlas_blas_info(atlas_info): - _lib_names = ['f77blas', 'cblas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_threads_info(atlas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class atlas_blas_threads_info(atlas_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class lapack_atlas_info(atlas_info): - _lib_names = ['lapack_atlas'] + atlas_info._lib_names - - -class lapack_atlas_threads_info(atlas_threads_info): - _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names - - -class atlas_3_10_info(atlas_info): - _lib_names = ['satlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_info(atlas_3_10_info): - _lib_names = ['satlas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_lib', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_3_10_threads_info(atlas_3_10_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - - -class lapack_atlas_3_10_info(atlas_3_10_info): - pass - - -class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): - pass - - -class lapack_info(system_info): - section = 'lapack' - dir_env_var = 'LAPACK' - _lib_names = ['lapack'] - notfounderror = LapackNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('lapack_libs', 'libraries') - lapack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, lapack_libs, []) - if info is None: - return - info['language'] = 'f77' - self.set_info(**info) - - -class lapack_src_info(system_info): - # LAPACK_SRC is deprecated, please do not use this! - # Build or install a BLAS library via your package manager or from - # source separately. - section = 'lapack_src' - dir_env_var = 'LAPACK_SRC' - notfounderror = LapackSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'dgesv.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - # The following is extracted from LAPACK-3.0/SRC/Makefile. - # Added missing names from lapack-lite-3.1.1/SRC/Makefile - # while keeping removed names for Lapack-3.0 compatibility. - allaux = ''' - ilaenv ieeeck lsame lsamen xerbla - iparmq - ''' # *.f - laux = ''' - bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 - laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 - lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre - larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 - lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 - lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf - stebz stedc steqr sterf - - larra larrc larrd larr larrk larrj larrr laneg laisnan isnan - lazq3 lazq4 - ''' # [s|d]*.f - lasrc = ''' - gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak - gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv - gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 - geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd - gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal - gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd - ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein - hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 - lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb - lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp - laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv - lartv larz larzb larzt laswp lasyf latbs latdf latps latrd - latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv - pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 - potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri - pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs - spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv - sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 - tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs - trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs - tzrqf tzrzf - - lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 - ''' # [s|c|d|z]*.f - sd_lasrc = ''' - laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l - org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr - orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 - ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx - sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd - stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd - sygvx sytd2 sytrd - ''' # [s|d]*.f - cz_lasrc = ''' - bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev - heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv - hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd - hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf - hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 - laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe - laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv - spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq - ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 - unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr - ''' # [c|z]*.f - ####### - sclaux = laux + ' econd ' # s*.f - dzlaux = laux + ' secnd ' # d*.f - slasrc = lasrc + sd_lasrc # s*.f - dlasrc = lasrc + sd_lasrc # d*.f - clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f - zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f - oclasrc = ' icmax1 scsum1 ' # *.f - ozlasrc = ' izmax1 dzsum1 ' # *.f - sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ - + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ - + ['c%s.f' % f for f in (clasrc).split()] \ - + ['z%s.f' % f for f in (zlasrc).split()] \ - + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] - sources = [os.path.join(src_dir, f) for f in sources] - # Lapack 3.1: - src_dir2 = os.path.join(src_dir, '..', 'INSTALL') - sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] - # Lapack 3.2.1: - sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] - # Should we check here actual existence of source files? - # Yes, the file listing is different between 3.0 and 3.1 - # versions. - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - -atlas_version_c_text = r''' -/* This file is generated from numpy/distutils/system_info.py */ -void ATL_buildinfo(void); -int main(void) { - ATL_buildinfo(); - return 0; -} -''' - -_cached_atlas_version = {} - - -def get_atlas_version(**config): - libraries = config.get('libraries', []) - library_dirs = config.get('library_dirs', []) - key = (tuple(libraries), tuple(library_dirs)) - if key in _cached_atlas_version: - return _cached_atlas_version[key] - c = cmd_config(Distribution()) - atlas_version = None - info = {} - try: - s, o = c.get_output(atlas_version_c_text, - libraries=libraries, library_dirs=library_dirs, - ) - if s and re.search(r'undefined reference to `_gfortran', o, re.M): - s, o = c.get_output(atlas_version_c_text, - libraries=libraries + ['gfortran'], - library_dirs=library_dirs, - ) - if not s: - warnings.warn(textwrap.dedent(""" - ***************************************************** - Linkage with ATLAS requires gfortran. Use - - python setup.py config_fc --fcompiler=gnu95 ... - - when building extension libraries that use ATLAS. - Make sure that -lgfortran is used for C++ extensions. - ***************************************************** - """), stacklevel=2) - dict_append(info, language='f90', - define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) - except Exception: # failed to get version from file -- maybe on Windows - # look at directory name - for o in library_dirs: - m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) - if m: - atlas_version = m.group('version') - if atlas_version is not None: - break - - # final choice --- look at ATLAS_VERSION environment - # variable - if atlas_version is None: - atlas_version = os.environ.get('ATLAS_VERSION', None) - if atlas_version: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - else: - dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) - return atlas_version or '?.?.?', info - - if not s: - m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) - if m: - atlas_version = m.group('version') - if atlas_version is None: - if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): - atlas_version = '3.2.1_pre3.3.6' - else: - log.info('Status: %d', s) - log.info('Output: %s', o) - - elif atlas_version == '3.2.1_pre3.3.6': - dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) - else: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - result = _cached_atlas_version[key] = atlas_version, info - return result - - -class lapack_opt_info(system_info): - notfounderror = LapackNotFoundError - - # List of all known LAPACK libraries, in the default order - lapack_order = ['armpl', 'mkl', 'ssl2', 'openblas', 'flame', - 'accelerate', 'atlas', 'lapack'] - order_env_var_name = 'NPY_LAPACK_ORDER' - - def _calc_info_armpl(self): - info = get_info('lapack_armpl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_mkl(self): - info = get_info('lapack_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_ssl2(self): - info = get_info('lapack_ssl2') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas_lapack') - if info: - self.set_info(**info) - return True - info = get_info('openblas_clapack') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_flame(self): - info = get_info('flame') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_threads') - if not info: - info = get_info('atlas_3_10') - if not info: - info = get_info('atlas_threads') - if not info: - info = get_info('atlas') - if info: - # Figure out if ATLAS has lapack... - # If not we need the lapack library, but not BLAS! - l = info.get('define_macros', []) - if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ - or ('ATLAS_WITHOUT_LAPACK', None) in l: - # Get LAPACK (with possible warnings) - # If not found we don't accept anything - # since we can't use ATLAS with LAPACK! - lapack_info = self._get_info_lapack() - if not lapack_info: - return False - dict_append(info, **lapack_info) - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _get_info_blas(self): - # Default to get the optimized BLAS implementation - info = get_info('blas_opt') - if not info: - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('blas_src') - if not info_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('fblas_src', info_src)]) - return info - - def _get_info_lapack(self): - info = get_info('lapack') - if not info: - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('lapack_src') - if not info_src: - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('flapack_src', info_src)]) - return info - - def _calc_info_lapack(self): - info = self._get_info_lapack() - if info: - info_blas = self._get_info_blas() - dict_append(info, **info_blas) - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - self.set_info(**info) - return True - return False - - def _calc_info_from_envvar(self): - info = {} - info['language'] = 'f77' - info['libraries'] = [] - info['include_dirs'] = [] - info['define_macros'] = [] - info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split() - self.set_info(**info) - return True - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name) - if len(unknown_order) > 0: - raise ValueError("lapack_opt_info user defined " - "LAPACK order has unacceptable " - "values: {}".format(unknown_order)) - - if 'NPY_LAPACK_LIBS' in os.environ: - # Bypass autodetection, set language to F77 and use env var linker - # flags directly - self._calc_info_from_envvar() - return - - for lapack in lapack_order: - if self._calc_info(lapack): - return - - if 'lapack' not in lapack_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class _ilp64_opt_info_mixin: - symbol_suffix = None - symbol_prefix = None - - def _check_info(self, info): - macros = dict(info.get('define_macros', [])) - prefix = macros.get('BLAS_SYMBOL_PREFIX', '') - suffix = macros.get('BLAS_SYMBOL_SUFFIX', '') - - if self.symbol_prefix not in (None, prefix): - return False - - if self.symbol_suffix not in (None, suffix): - return False - - return bool(info) - - -class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin): - notfounderror = LapackILP64NotFoundError - lapack_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] - order_env_var_name = 'NPY_LAPACK_ILP64_ORDER' - - def _calc_info(self, name): - print('lapack_ilp64_opt_info._calc_info(name=%s)' % (name)) - info = get_info(name + '_lapack') - if self._check_info(info): - self.set_info(**info) - return True - else: - print('%s_lapack does not exist' % (name)) - return False - - -class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info): - # Same as lapack_ilp64_opt_info, but fix symbol names - symbol_prefix = '' - symbol_suffix = '' - - -class lapack64__opt_info(lapack_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class blas_opt_info(system_info): - notfounderror = BlasNotFoundError - # List of all known BLAS libraries, in the default order - - blas_order = ['armpl', 'mkl', 'ssl2', 'blis', 'openblas', - 'accelerate', 'atlas', 'blas'] - order_env_var_name = 'NPY_BLAS_ORDER' - - def _calc_info_armpl(self): - info = get_info('blas_armpl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_mkl(self): - info = get_info('blas_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_ssl2(self): - info = get_info('blas_ssl2') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blis(self): - info = get_info('blis') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_blas_threads') - if not info: - info = get_info('atlas_3_10_blas') - if not info: - info = get_info('atlas_blas_threads') - if not info: - info = get_info('atlas_blas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blas(self): - # Warn about a non-optimized BLAS library - warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) - info = {} - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - - blas = get_info('blas') - if blas: - dict_append(info, **blas) - else: - # Not even BLAS was found! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - - blas_src = get_info('blas_src') - if not blas_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return False - dict_append(info, libraries=[('fblas_src', blas_src)]) - - self.set_info(**info) - return True - - def _calc_info_from_envvar(self): - info = {} - info['language'] = 'f77' - info['libraries'] = [] - info['include_dirs'] = [] - info['define_macros'] = [] - info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split() - if 'NPY_CBLAS_LIBS' in os.environ: - info['define_macros'].append(('HAVE_CBLAS', None)) - info['extra_link_args'].extend( - os.environ['NPY_CBLAS_LIBS'].split()) - self.set_info(**info) - return True - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name) - if len(unknown_order) > 0: - raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order)) - - if 'NPY_BLAS_LIBS' in os.environ: - # Bypass autodetection, set language to F77 and use env var linker - # flags directly - self._calc_info_from_envvar() - return - - for blas in blas_order: - if self._calc_info(blas): - return - - if 'blas' not in blas_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin): - notfounderror = BlasILP64NotFoundError - blas_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] - order_env_var_name = 'NPY_BLAS_ILP64_ORDER' - - def _calc_info(self, name): - info = get_info(name) - if self._check_info(info): - self.set_info(**info) - return True - return False - - -class blas_ilp64_plain_opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '' - - -class blas64__opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class cblas_info(system_info): - section = 'cblas' - dir_env_var = 'CBLAS' - # No default as it's used only in blas_info - _lib_names = [] - notfounderror = BlasNotFoundError - - -class blas_info(system_info): - section = 'blas' - dir_env_var = 'BLAS' - _lib_names = ['blas'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blas_libs', 'libraries') - blas_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, blas_libs, []) - if info is None: - return - else: - info['include_dirs'] = self.get_include_dirs() - if platform.system() == 'Windows': - # The check for windows is needed because get_cblas_libs uses the - # same compiler that was used to compile Python and msvc is - # often not installed when mingw is being used. This rough - # treatment is not desirable, but windows is tricky. - info['language'] = 'f77' # XXX: is it generally true? - # If cblas is given as an option, use those - cblas_info_obj = cblas_info() - cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries') - cblas_libs = cblas_info_obj.get_libs(cblas_opt, None) - if cblas_libs: - info['libraries'] = cblas_libs + blas_libs - info['define_macros'] = [('HAVE_CBLAS', None)] - else: - lib = self.get_cblas_libs(info) - if lib is not None: - info['language'] = 'c' - info['libraries'] = lib - info['define_macros'] = [('HAVE_CBLAS', None)] - self.set_info(**info) - - def get_cblas_libs(self, info): - """ Check whether we can link with CBLAS interface - - This method will search through several combinations of libraries - to check whether CBLAS is present: - - 1. Libraries in ``info['libraries']``, as is - 2. As 1. but also explicitly adding ``'cblas'`` as a library - 3. As 1. but also explicitly adding ``'blas'`` as a library - 4. Check only library ``'cblas'`` - 5. Check only library ``'blas'`` - - Parameters - ---------- - info : dict - system information dictionary for compilation and linking - - Returns - ------- - libraries : list of str or None - a list of libraries that enables the use of CBLAS interface. - Returns None if not found or a compilation error occurs. - - Since 1.17 returns a list. - """ - # primitive cblas check by looking for the header and trying to link - # cblas or blas - c = customized_ccompiler() - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - #include - int main(int argc, const char *argv[]) - { - double a[4] = {1,2,3,4}; - double b[4] = {5,6,7,8}; - return cblas_ddot(4, a, 1, b, 1) > 10; - }""") - src = os.path.join(tmpdir, 'source.c') - try: - with open(src, 'w') as f: - f.write(s) - - try: - # check we can compile (find headers) - obj = c.compile([src], output_dir=tmpdir, - include_dirs=self.get_include_dirs()) - except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): - return None - - # check we can link (find library) - # some systems have separate cblas and blas libs. - for libs in [info['libraries'], ['cblas'] + info['libraries'], - ['blas'] + info['libraries'], ['cblas'], ['blas']]: - try: - c.link_executable(obj, os.path.join(tmpdir, "a.out"), - libraries=libs, - library_dirs=info['library_dirs'], - extra_postargs=info.get('extra_link_args', [])) - return libs - except distutils.ccompiler.LinkError: - pass - finally: - shutil.rmtree(tmpdir) - return None - - -class openblas_info(blas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = [] - notfounderror = BlasNotFoundError - - @property - def symbol_prefix(self): - try: - return self.cp.get(self.section, 'symbol_prefix') - except NoOptionError: - return '' - - @property - def symbol_suffix(self): - try: - return self.cp.get(self.section, 'symbol_suffix') - except NoOptionError: - return '' - - def _calc_info(self): - c = customized_ccompiler() - - lib_dirs = self.get_lib_dirs() - - # Prefer to use libraries over openblas_libs - opt = self.get_option_single('openblas_libs', 'libraries') - openblas_libs = self.get_libs(opt, self._lib_names) - - info = self.check_libs(lib_dirs, openblas_libs, []) - - if c.compiler_type == "msvc" and info is None: - from numpy.distutils.fcompiler import new_fcompiler - f = new_fcompiler(c_compiler=c) - if f and f.compiler_type == 'gnu95': - # Try gfortran-compatible library files - info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) - # Skip lapack check, we'd need build_ext to do it - skip_symbol_check = True - elif info: - skip_symbol_check = False - info['language'] = 'c' - - if info is None: - return None - - # Add extra info for OpenBLAS - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - - if not (skip_symbol_check or self.check_symbols(info)): - return None - - info['define_macros'] = [('HAVE_CBLAS', None)] - if self.symbol_prefix: - info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)] - if self.symbol_suffix: - info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)] - - return info - - def calc_info(self): - info = self._calc_info() - if info is not None: - self.set_info(**info) - - def check_msvc_gfortran_libs(self, library_dirs, libraries): - # First, find the full path to each library directory - library_paths = [] - for library in libraries: - for library_dir in library_dirs: - # MinGW static ext will be .a - fullpath = os.path.join(library_dir, library + '.a') - if os.path.isfile(fullpath): - library_paths.append(fullpath) - break - else: - return None - - # Generate numpy.distutils virtual static library file - basename = self.__class__.__name__ - tmpdir = os.path.join(os.getcwd(), 'build', basename) - if not os.path.isdir(tmpdir): - os.makedirs(tmpdir) - - info = {'library_dirs': [tmpdir], - 'libraries': [basename], - 'language': 'f77'} - - fake_lib_file = os.path.join(tmpdir, basename + '.fobjects') - fake_clib_file = os.path.join(tmpdir, basename + '.cobjects') - with open(fake_lib_file, 'w') as f: - f.write("\n".join(library_paths)) - with open(fake_clib_file, 'w') as f: - pass - - return info - - def check_symbols(self, info): - res = False - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - - prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - calls = "\n".join("%s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - s = textwrap.dedent("""\ - %(prototypes)s - int main(int argc, const char *argv[]) - { - %(calls)s - return 0; - }""") % dict(prototypes=prototypes, calls=calls) - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - try: - extra_args = info['extra_link_args'] - except Exception: - extra_args = [] - try: - with open(src, 'w') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - res = True - except distutils.ccompiler.LinkError: - res = False - finally: - shutil.rmtree(tmpdir) - return res - -class openblas_lapack_info(openblas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = ['zungqr_'] - notfounderror = BlasNotFoundError - -class openblas_clapack_info(openblas_lapack_info): - _lib_names = ['openblas', 'lapack'] - -class openblas_ilp64_info(openblas_info): - section = 'openblas_ilp64' - dir_env_var = 'OPENBLAS_ILP64' - _lib_names = ['openblas64'] - _require_symbols = ['dgemm_', 'cblas_dgemm'] - notfounderror = BlasILP64NotFoundError - - def _calc_info(self): - info = super()._calc_info() - if info is not None: - info['define_macros'] += [('HAVE_BLAS_ILP64', None)] - return info - -class openblas_ilp64_lapack_info(openblas_ilp64_info): - _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr'] - - def _calc_info(self): - info = super()._calc_info() - if info: - info['define_macros'] += [('HAVE_LAPACKE', None)] - return info - -class openblas64__info(openblas_ilp64_info): - # ILP64 Openblas, with default symbol suffix - section = 'openblas64_' - dir_env_var = 'OPENBLAS64_' - _lib_names = ['openblas64_'] - symbol_suffix = '64_' - symbol_prefix = '' - -class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info): - pass - -class blis_info(blas_info): - section = 'blis' - dir_env_var = 'BLIS' - _lib_names = ['blis'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blis_libs', 'libraries') - blis_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs2(lib_dirs, blis_libs, []) - if info is None: - return - - # Add include dirs - incl_dirs = self.get_include_dirs() - dict_append(info, - language='c', - define_macros=[('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - self.set_info(**info) - - -class flame_info(system_info): - """ Usage of libflame for LAPACK operations - - This requires libflame to be compiled with lapack wrappers: - - ./configure --enable-lapack2flame ... - - Be aware that libflame 5.1.0 has some missing names in the shared library, so - if you have problems, try the static flame library. - """ - section = 'flame' - _lib_names = ['flame'] - notfounderror = FlameNotFoundError - - def check_embedded_lapack(self, info): - """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """ - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - void zungqr_(); - int main(int argc, const char *argv[]) - { - zungqr_(); - return 0; - }""") - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - extra_args = info.get('extra_link_args', []) - try: - with open(src, 'w') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - return True - except distutils.ccompiler.LinkError: - return False - finally: - shutil.rmtree(tmpdir) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - flame_libs = self.get_libs('libraries', self._lib_names) - - info = self.check_libs2(lib_dirs, flame_libs, []) - if info is None: - return - - # Add the extra flag args to info - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - - if self.check_embedded_lapack(info): - # check if the user has supplied all information required - self.set_info(**info) - else: - # Try and get the BLAS lib to see if we can get it to work - blas_info = get_info('blas_opt') - if not blas_info: - # since we already failed once, this ain't going to work either - return - - # Now we need to merge the two dictionaries - for key in blas_info: - if isinstance(blas_info[key], list): - info[key] = info.get(key, []) + blas_info[key] - elif isinstance(blas_info[key], tuple): - info[key] = info.get(key, ()) + blas_info[key] - else: - info[key] = info.get(key, '') + blas_info[key] - - # Now check again - if self.check_embedded_lapack(info): - self.set_info(**info) - - -class accelerate_info(system_info): - section = 'accelerate' - _lib_names = ['accelerate', 'veclib'] - notfounderror = BlasNotFoundError - - def calc_info(self): - # Make possible to enable/disable from config file/env var - libraries = os.environ.get('ACCELERATE') - if libraries: - libraries = [libraries] - else: - libraries = self.get_libs('libraries', self._lib_names) - libraries = [lib.strip().lower() for lib in libraries] - - if (sys.platform == 'darwin' and - not os.getenv('_PYTHON_HOST_PLATFORM', None)): - # Use the system BLAS from Accelerate or vecLib under OSX - args = [] - link_args = [] - if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ - 'x86_64' in get_platform() or \ - 'i386' in platform.platform(): - intel = 1 - else: - intel = 0 - if (os.path.exists('/System/Library/Frameworks' - '/Accelerate.framework/') and - 'accelerate' in libraries): - if intel: - args.extend(['-msse3']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) - elif (os.path.exists('/System/Library/Frameworks' - '/vecLib.framework/') and - 'veclib' in libraries): - if intel: - args.extend(['-msse3']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,vecLib']) - - if args: - macros = [ - ('NO_ATLAS_INFO', 3), - ('HAVE_CBLAS', None), - ('ACCELERATE_NEW_LAPACK', None), - ] - if(os.getenv('NPY_USE_BLAS_ILP64', None)): - print('Setting HAVE_BLAS_ILP64') - macros += [ - ('HAVE_BLAS_ILP64', None), - ('ACCELERATE_LAPACK_ILP64', None), - ] - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=macros) - - return - -class accelerate_lapack_info(accelerate_info): - def _calc_info(self): - return super()._calc_info() - -class blas_src_info(system_info): - # BLAS_SRC is deprecated, please do not use this! - # Build or install a BLAS library via your package manager or from - # source separately. - section = 'blas_src' - dir_env_var = 'BLAS_SRC' - notfounderror = BlasSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['blas'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'daxpy.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - blas1 = ''' - caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot - dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 - srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg - dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax - snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap - scabs1 - ''' - blas2 = ''' - cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv - chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv - dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv - sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger - stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc - zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 - ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv - ''' - blas3 = ''' - cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k - dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm - ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm - ''' - sources = [os.path.join(src_dir, f + '.f') \ - for f in (blas1 + blas2 + blas3).split()] - #XXX: should we check here actual existence of source files? - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - - -class x11_info(system_info): - section = 'x11' - notfounderror = X11NotFoundError - _lib_names = ['X11'] - - def __init__(self): - system_info.__init__(self, - default_lib_dirs=default_x11_lib_dirs, - default_include_dirs=default_x11_include_dirs) - - def calc_info(self): - if sys.platform in ['win32']: - return - lib_dirs = self.get_lib_dirs() - include_dirs = self.get_include_dirs() - opt = self.get_option_single('x11_libs', 'libraries') - x11_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, x11_libs, []) - if info is None: - return - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, 'X11/X.h'): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - self.set_info(**info) - - -class _numpy_info(system_info): - section = 'Numeric' - modulename = 'Numeric' - notfounderror = NumericNotFoundError - - def __init__(self): - include_dirs = [] - try: - module = __import__(self.modulename) - prefix = [] - for name in module.__file__.split(os.sep): - if name == 'lib': - break - prefix.append(name) - - # Ask numpy for its own include path before attempting - # anything else - try: - include_dirs.append(getattr(module, 'get_include')()) - except AttributeError: - pass - - include_dirs.append(sysconfig.get_path('include')) - except ImportError: - pass - py_incl_dir = sysconfig.get_path('include') - include_dirs.append(py_incl_dir) - py_pincl_dir = sysconfig.get_path('platinclude') - if py_pincl_dir not in include_dirs: - include_dirs.append(py_pincl_dir) - for d in default_include_dirs: - d = os.path.join(d, os.path.basename(py_incl_dir)) - if d not in include_dirs: - include_dirs.append(d) - system_info.__init__(self, - default_lib_dirs=[], - default_include_dirs=include_dirs) - - def calc_info(self): - try: - module = __import__(self.modulename) - except ImportError: - return - info = {} - macros = [] - for v in ['__version__', 'version']: - vrs = getattr(module, v, None) - if vrs is None: - continue - macros = [(self.modulename.upper() + '_VERSION', - _c_string_literal(vrs)), - (self.modulename.upper(), None)] - break - dict_append(info, define_macros=macros) - include_dirs = self.get_include_dirs() - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, - os.path.join(self.modulename, - 'arrayobject.h')): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - if info: - self.set_info(**info) - return - - -class numarray_info(_numpy_info): - section = 'numarray' - modulename = 'numarray' - - -class Numeric_info(_numpy_info): - section = 'Numeric' - modulename = 'Numeric' - - -class numpy_info(_numpy_info): - section = 'numpy' - modulename = 'numpy' - - -class numerix_info(system_info): - section = 'numerix' - - def calc_info(self): - which = None, None - if os.getenv("NUMERIX"): - which = os.getenv("NUMERIX"), "environment var" - # If all the above fail, default to numpy. - if which[0] is None: - which = "numpy", "defaulted" - try: - import numpy # noqa: F401 - which = "numpy", "defaulted" - except ImportError as e: - msg1 = str(e) - try: - import Numeric # noqa: F401 - which = "numeric", "defaulted" - except ImportError as e: - msg2 = str(e) - try: - import numarray # noqa: F401 - which = "numarray", "defaulted" - except ImportError as e: - msg3 = str(e) - log.info(msg1) - log.info(msg2) - log.info(msg3) - which = which[0].strip().lower(), which[1] - if which[0] not in ["numeric", "numarray", "numpy"]: - raise ValueError("numerix selector must be either 'Numeric' " - "or 'numarray' or 'numpy' but the value obtained" - " from the %s was '%s'." % (which[1], which[0])) - os.environ['NUMERIX'] = which[0] - self.set_info(**get_info(which[0])) - - -class f2py_info(system_info): - def calc_info(self): - try: - import numpy.f2py as f2py - except ImportError: - return - f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') - self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], - include_dirs=[f2py_dir]) - return - - -class boost_python_info(system_info): - section = 'boost_python' - dir_env_var = 'BOOST' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['boost*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', - 'module.cpp')): - src_dir = d - break - if not src_dir: - return - py_incl_dirs = [sysconfig.get_path('include')] - py_pincl_dir = sysconfig.get_path('platinclude') - if py_pincl_dir not in py_incl_dirs: - py_incl_dirs.append(py_pincl_dir) - srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') - bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) - bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) - info = {'libraries': [('boost_python_src', - {'include_dirs': [src_dir] + py_incl_dirs, - 'sources':bpl_srcs} - )], - 'include_dirs': [src_dir], - } - if info: - self.set_info(**info) - return - - -class agg2_info(system_info): - section = 'agg2' - dir_env_var = 'AGG2' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['agg2*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): - src_dir = d - break - if not src_dir: - return - if sys.platform == 'win32': - agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', - 'win32', 'agg_win32_bmp.cpp')) - else: - agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) - agg2_srcs += [os.path.join(src_dir, 'src', 'platform', - 'X11', - 'agg_platform_support.cpp')] - - info = {'libraries': - [('agg2_src', - {'sources': agg2_srcs, - 'include_dirs': [os.path.join(src_dir, 'include')], - } - )], - 'include_dirs': [os.path.join(src_dir, 'include')], - } - if info: - self.set_info(**info) - return - - -class _pkg_config_info(system_info): - section = None - config_env_var = 'PKG_CONFIG' - default_config_exe = 'pkg-config' - append_config_exe = '' - version_macro_name = None - release_macro_name = None - version_flag = '--modversion' - cflags_flag = '--cflags' - - def get_config_exe(self): - if self.config_env_var in os.environ: - return os.environ[self.config_env_var] - return self.default_config_exe - - def get_config_output(self, config_exe, option): - cmd = config_exe + ' ' + self.append_config_exe + ' ' + option - try: - o = subprocess.check_output(cmd) - except (OSError, subprocess.CalledProcessError): - pass - else: - o = filepath_from_subprocess_output(o) - return o - - def calc_info(self): - config_exe = find_executable(self.get_config_exe()) - if not config_exe: - log.warn('File not found: %s. Cannot determine %s info.' \ - % (config_exe, self.section)) - return - info = {} - macros = [] - libraries = [] - library_dirs = [] - include_dirs = [] - extra_link_args = [] - extra_compile_args = [] - version = self.get_config_output(config_exe, self.version_flag) - if version: - macros.append((self.__class__.__name__.split('.')[-1].upper(), - _c_string_literal(version))) - if self.version_macro_name: - macros.append((self.version_macro_name + '_%s' - % (version.replace('.', '_')), None)) - if self.release_macro_name: - release = self.get_config_output(config_exe, '--release') - if release: - macros.append((self.release_macro_name + '_%s' - % (release.replace('.', '_')), None)) - opts = self.get_config_output(config_exe, '--libs') - if opts: - for opt in opts.split(): - if opt[:2] == '-l': - libraries.append(opt[2:]) - elif opt[:2] == '-L': - library_dirs.append(opt[2:]) - else: - extra_link_args.append(opt) - opts = self.get_config_output(config_exe, self.cflags_flag) - if opts: - for opt in opts.split(): - if opt[:2] == '-I': - include_dirs.append(opt[2:]) - elif opt[:2] == '-D': - if '=' in opt: - n, v = opt[2:].split('=') - macros.append((n, v)) - else: - macros.append((opt[2:], None)) - else: - extra_compile_args.append(opt) - if macros: - dict_append(info, define_macros=macros) - if libraries: - dict_append(info, libraries=libraries) - if library_dirs: - dict_append(info, library_dirs=library_dirs) - if include_dirs: - dict_append(info, include_dirs=include_dirs) - if extra_link_args: - dict_append(info, extra_link_args=extra_link_args) - if extra_compile_args: - dict_append(info, extra_compile_args=extra_compile_args) - if info: - self.set_info(**info) - return - - -class wx_info(_pkg_config_info): - section = 'wx' - config_env_var = 'WX_CONFIG' - default_config_exe = 'wx-config' - append_config_exe = '' - version_macro_name = 'WX_VERSION' - release_macro_name = 'WX_RELEASE' - version_flag = '--version' - cflags_flag = '--cxxflags' - - -class gdk_pixbuf_xlib_2_info(_pkg_config_info): - section = 'gdk_pixbuf_xlib_2' - append_config_exe = 'gdk-pixbuf-xlib-2.0' - version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' - - -class gdk_pixbuf_2_info(_pkg_config_info): - section = 'gdk_pixbuf_2' - append_config_exe = 'gdk-pixbuf-2.0' - version_macro_name = 'GDK_PIXBUF_VERSION' - - -class gdk_x11_2_info(_pkg_config_info): - section = 'gdk_x11_2' - append_config_exe = 'gdk-x11-2.0' - version_macro_name = 'GDK_X11_VERSION' - - -class gdk_2_info(_pkg_config_info): - section = 'gdk_2' - append_config_exe = 'gdk-2.0' - version_macro_name = 'GDK_VERSION' - - -class gdk_info(_pkg_config_info): - section = 'gdk' - append_config_exe = 'gdk' - version_macro_name = 'GDK_VERSION' - - -class gtkp_x11_2_info(_pkg_config_info): - section = 'gtkp_x11_2' - append_config_exe = 'gtk+-x11-2.0' - version_macro_name = 'GTK_X11_VERSION' - - -class gtkp_2_info(_pkg_config_info): - section = 'gtkp_2' - append_config_exe = 'gtk+-2.0' - version_macro_name = 'GTK_VERSION' - - -class xft_info(_pkg_config_info): - section = 'xft' - append_config_exe = 'xft' - version_macro_name = 'XFT_VERSION' - - -class freetype2_info(_pkg_config_info): - section = 'freetype2' - append_config_exe = 'freetype2' - version_macro_name = 'FREETYPE2_VERSION' - - -class amd_info(system_info): - section = 'amd' - dir_env_var = 'AMD' - _lib_names = ['amd'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('amd_libs', 'libraries') - amd_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, amd_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, 'amd.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_AMD_H', None)], - swig_opts=['-I' + inc_dir]) - - self.set_info(**info) - return - - -class umfpack_info(system_info): - section = 'umfpack' - dir_env_var = 'UMFPACK' - notfounderror = UmfpackNotFoundError - _lib_names = ['umfpack'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('umfpack_libs', 'libraries') - umfpack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, umfpack_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_UMFPACK_H', None)], - swig_opts=['-I' + inc_dir]) - - dict_append(info, **get_info('amd')) - - self.set_info(**info) - return - - -def combine_paths(*args, **kws): - """ Return a list of existing paths composed by all combinations of - items from arguments. - """ - r = [] - for a in args: - if not a: - continue - if is_string(a): - a = [a] - r.append(a) - args = r - if not args: - return [] - if len(args) == 1: - result = reduce(lambda a, b: a + b, map(glob, args[0]), []) - elif len(args) == 2: - result = [] - for a0 in args[0]: - for a1 in args[1]: - result.extend(glob(os.path.join(a0, a1))) - else: - result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) - log.debug('(paths: %s)', ','.join(result)) - return result - -language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} -inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} - - -def dict_append(d, **kws): - languages = [] - for k, v in kws.items(): - if k == 'language': - languages.append(v) - continue - if k in d: - if k in ['library_dirs', 'include_dirs', - 'extra_compile_args', 'extra_link_args', - 'runtime_library_dirs', 'define_macros']: - [d[k].append(vv) for vv in v if vv not in d[k]] - else: - d[k].extend(v) - else: - d[k] = v - if languages: - l = inv_language_map[max([language_map.get(l, 0) for l in languages])] - d['language'] = l - return - - -def parseCmdLine(argv=(None,)): - import optparse - parser = optparse.OptionParser("usage: %prog [-v] [info objs]") - parser.add_option('-v', '--verbose', action='store_true', dest='verbose', - default=False, - help='be verbose and print more messages') - - opts, args = parser.parse_args(args=argv[1:]) - return opts, args - - -def show_all(argv=None): - import inspect - if argv is None: - argv = sys.argv - opts, args = parseCmdLine(argv) - if opts.verbose: - log.set_threshold(log.DEBUG) - else: - log.set_threshold(log.INFO) - show_only = [] - for n in args: - if n[-5:] != '_info': - n = n + '_info' - show_only.append(n) - show_all = not show_only - _gdict_ = globals().copy() - for name, c in _gdict_.items(): - if not inspect.isclass(c): - continue - if not issubclass(c, system_info) or c is system_info: - continue - if not show_all: - if name not in show_only: - continue - del show_only[show_only.index(name)] - conf = c() - conf.verbosity = 2 - # we don't need the result, but we want - # the side effect of printing diagnostics - conf.get_info() - if show_only: - log.info('Info classes not defined: %s', ','.join(show_only)) - -if __name__ == "__main__": - show_all() diff --git a/numpy/distutils/tests/__init__.py b/numpy/distutils/tests/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/numpy/distutils/tests/test_build_ext.py b/numpy/distutils/tests/test_build_ext.py deleted file mode 100644 index 7124cc407a2f..000000000000 --- a/numpy/distutils/tests/test_build_ext.py +++ /dev/null @@ -1,74 +0,0 @@ -'''Tests for numpy.distutils.build_ext.''' - -import os -import subprocess -import sys -from textwrap import indent, dedent -import pytest -from numpy.testing import IS_WASM - -@pytest.mark.skipif(IS_WASM, reason="cannot start subprocess in wasm") -@pytest.mark.slow -def test_multi_fortran_libs_link(tmp_path): - ''' - Ensures multiple "fake" static libraries are correctly linked. - see gh-18295 - ''' - - # We need to make sure we actually have an f77 compiler. - # This is nontrivial, so we'll borrow the utilities - # from f2py tests: - from numpy.distutils.tests.utilities import has_f77_compiler - if not has_f77_compiler(): - pytest.skip('No F77 compiler found') - - # make some dummy sources - with open(tmp_path / '_dummy1.f', 'w') as fid: - fid.write(indent(dedent('''\ - FUNCTION dummy_one() - RETURN - END FUNCTION'''), prefix=' '*6)) - with open(tmp_path / '_dummy2.f', 'w') as fid: - fid.write(indent(dedent('''\ - FUNCTION dummy_two() - RETURN - END FUNCTION'''), prefix=' '*6)) - with open(tmp_path / '_dummy.c', 'w') as fid: - # doesn't need to load - just needs to exist - fid.write('int PyInit_dummyext;') - - # make a setup file - with open(tmp_path / 'setup.py', 'w') as fid: - srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..') - fid.write(dedent(f'''\ - def configuration(parent_package="", top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration("", parent_package, top_path) - config.add_library("dummy1", sources=["_dummy1.f"]) - config.add_library("dummy2", sources=["_dummy2.f"]) - config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"]) - return config - - - if __name__ == "__main__": - import sys - sys.path.insert(0, r"{srctree}") - from numpy.distutils.core import setup - setup(**configuration(top_path="").todict())''')) - - # build the test extension and "install" into a temporary directory - build_dir = tmp_path - subprocess.check_call([sys.executable, 'setup.py', 'build', 'install', - '--prefix', str(tmp_path / 'installdir'), - '--record', str(tmp_path / 'tmp_install_log.txt'), - ], - cwd=str(build_dir), - ) - # get the path to the so - so = None - with open(tmp_path /'tmp_install_log.txt') as fid: - for line in fid: - if 'dummyext' in line: - so = line.strip() - break - assert so is not None diff --git a/numpy/distutils/tests/test_ccompiler_opt.py b/numpy/distutils/tests/test_ccompiler_opt.py deleted file mode 100644 index 3714aea0e12e..000000000000 --- a/numpy/distutils/tests/test_ccompiler_opt.py +++ /dev/null @@ -1,808 +0,0 @@ -import re, textwrap, os -from os import sys, path -from distutils.errors import DistutilsError - -is_standalone = __name__ == '__main__' and __package__ is None -if is_standalone: - import unittest, contextlib, tempfile, shutil - sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) - from ccompiler_opt import CCompilerOpt - - # from numpy/testing/_private/utils.py - @contextlib.contextmanager - def tempdir(*args, **kwargs): - tmpdir = tempfile.mkdtemp(*args, **kwargs) - try: - yield tmpdir - finally: - shutil.rmtree(tmpdir) - - def assert_(expr, msg=''): - if not expr: - raise AssertionError(msg) -else: - from numpy.distutils.ccompiler_opt import CCompilerOpt - from numpy.testing import assert_, tempdir - -# architectures and compilers to test -arch_compilers = dict( - x86 = ("gcc", "clang", "icc", "iccw", "msvc"), - x64 = ("gcc", "clang", "icc", "iccw", "msvc"), - ppc64 = ("gcc", "clang"), - ppc64le = ("gcc", "clang"), - armhf = ("gcc", "clang"), - aarch64 = ("gcc", "clang", "fcc"), - s390x = ("gcc", "clang"), - noarch = ("gcc",) -) - -class FakeCCompilerOpt(CCompilerOpt): - fake_info = "" - def __init__(self, trap_files="", trap_flags="", *args, **kwargs): - self.fake_trap_files = trap_files - self.fake_trap_flags = trap_flags - CCompilerOpt.__init__(self, None, **kwargs) - - def __repr__(self): - return textwrap.dedent("""\ - <<<< - march : {} - compiler : {} - ---------------- - {} - >>>> - """).format(self.cc_march, self.cc_name, self.report()) - - def dist_compile(self, sources, flags, **kwargs): - assert(isinstance(sources, list)) - assert(isinstance(flags, list)) - if self.fake_trap_files: - for src in sources: - if re.match(self.fake_trap_files, src): - self.dist_error("source is trapped by a fake interface") - if self.fake_trap_flags: - for f in flags: - if re.match(self.fake_trap_flags, f): - self.dist_error("flag is trapped by a fake interface") - # fake objects - return zip(sources, [' '.join(flags)] * len(sources)) - - def dist_info(self): - return FakeCCompilerOpt.fake_info - - @staticmethod - def dist_log(*args, stderr=False): - pass - -class _Test_CCompilerOpt: - arch = None # x86_64 - cc = None # gcc - - def setup_class(self): - FakeCCompilerOpt.conf_nocache = True - self._opt = None - - def nopt(self, *args, **kwargs): - FakeCCompilerOpt.fake_info = (self.arch, self.cc, "") - return FakeCCompilerOpt(*args, **kwargs) - - def opt(self): - if not self._opt: - self._opt = self.nopt() - return self._opt - - def march(self): - return self.opt().cc_march - - def cc_name(self): - return self.opt().cc_name - - def get_targets(self, targets, groups, **kwargs): - FakeCCompilerOpt.conf_target_groups = groups - opt = self.nopt( - cpu_baseline=kwargs.get("baseline", "min"), - cpu_dispatch=kwargs.get("dispatch", "max"), - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - with tempdir() as tmpdir: - file = os.path.join(tmpdir, "test_targets.c") - with open(file, 'w') as f: - f.write(targets) - gtargets = [] - gflags = {} - fake_objects = opt.try_dispatch([file]) - for source, flags in fake_objects: - gtar = path.basename(source).split('.')[1:-1] - glen = len(gtar) - if glen == 0: - gtar = "baseline" - elif glen == 1: - gtar = gtar[0].upper() - else: - # converting multi-target into parentheses str format to be equivalent - # to the configuration statements syntax. - gtar = ('('+' '.join(gtar)+')').upper() - gtargets.append(gtar) - gflags[gtar] = flags - - has_baseline, targets = opt.sources_status[file] - targets = targets + ["baseline"] if has_baseline else targets - # convert tuple that represent multi-target into parentheses str format - targets = [ - '('+' '.join(tar)+')' if isinstance(tar, tuple) else tar - for tar in targets - ] - if len(targets) != len(gtargets) or not all(t in gtargets for t in targets): - raise AssertionError( - "'sources_status' returns different targets than the compiled targets\n" - "%s != %s" % (targets, gtargets) - ) - # return targets from 'sources_status' since the order is matters - return targets, gflags - - def arg_regex(self, **kwargs): - map2origin = dict( - x64 = "x86", - ppc64le = "ppc64", - aarch64 = "armhf", - clang = "gcc", - ) - march = self.march(); cc_name = self.cc_name() - map_march = map2origin.get(march, march) - map_cc = map2origin.get(cc_name, cc_name) - for key in ( - march, cc_name, map_march, map_cc, - march + '_' + cc_name, - map_march + '_' + cc_name, - march + '_' + map_cc, - map_march + '_' + map_cc, - ) : - regex = kwargs.pop(key, None) - if regex is not None: - break - if regex: - if isinstance(regex, dict): - for k, v in regex.items(): - if v[-1:] not in ')}$?\\.+*': - regex[k] = v + '$' - else: - assert(isinstance(regex, str)) - if regex[-1:] not in ')}$?\\.+*': - regex += '$' - return regex - - def expect(self, dispatch, baseline="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - features = ' '.join(opt.cpu_dispatch_names()) - if not match: - if len(features) != 0: - raise AssertionError( - 'expected empty features, not "%s"' % features - ) - return - if not re.match(match, features, re.IGNORECASE): - raise AssertionError( - 'dispatch features "%s" not match "%s"' % (features, match) - ) - - def expect_baseline(self, baseline, dispatch="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - features = ' '.join(opt.cpu_baseline_names()) - if not match: - if len(features) != 0: - raise AssertionError( - 'expected empty features, not "%s"' % features - ) - return - if not re.match(match, features, re.IGNORECASE): - raise AssertionError( - 'baseline features "%s" not match "%s"' % (features, match) - ) - - def expect_flags(self, baseline, dispatch="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - flags = ' '.join(opt.cpu_baseline_flags()) - if not match: - if len(flags) != 0: - raise AssertionError( - 'expected empty flags not "%s"' % flags - ) - return - if not re.match(match, flags): - raise AssertionError( - 'flags "%s" not match "%s"' % (flags, match) - ) - - def expect_targets(self, targets, groups={}, **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - targets, _ = self.get_targets(targets=targets, groups=groups, **kwargs) - targets = ' '.join(targets) - if not match: - if len(targets) != 0: - raise AssertionError( - 'expected empty targets, not "%s"' % targets - ) - return - if not re.match(match, targets, re.IGNORECASE): - raise AssertionError( - 'targets "%s" not match "%s"' % (targets, match) - ) - - def expect_target_flags(self, targets, groups={}, **kwargs): - match_dict = self.arg_regex(**kwargs) - if match_dict is None: - return - assert(isinstance(match_dict, dict)) - _, tar_flags = self.get_targets(targets=targets, groups=groups) - - for match_tar, match_flags in match_dict.items(): - if match_tar not in tar_flags: - raise AssertionError( - 'expected to find target "%s"' % match_tar - ) - flags = tar_flags[match_tar] - if not match_flags: - if len(flags) != 0: - raise AssertionError( - 'expected to find empty flags in target "%s"' % match_tar - ) - if not re.match(match_flags, flags): - raise AssertionError( - '"%s" flags "%s" not match "%s"' % (match_tar, flags, match_flags) - ) - - def test_interface(self): - wrong_arch = "ppc64" if self.arch != "ppc64" else "x86" - wrong_cc = "clang" if self.cc != "clang" else "icc" - opt = self.opt() - assert_(getattr(opt, "cc_on_" + self.arch)) - assert_(not getattr(opt, "cc_on_" + wrong_arch)) - assert_(getattr(opt, "cc_is_" + self.cc)) - assert_(not getattr(opt, "cc_is_" + wrong_cc)) - - def test_args_empty(self): - for baseline, dispatch in ( - ("", "none"), - (None, ""), - ("none +none", "none - none"), - ("none -max", "min - max"), - ("+vsx2 -VSX2", "vsx avx2 avx512f -max"), - ("max -vsx - avx + avx512f neon -MAX ", - "min -min + max -max -vsx + avx2 -avx2 +NONE") - ) : - opt = self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) - assert(len(opt.cpu_baseline_names()) == 0) - assert(len(opt.cpu_dispatch_names()) == 0) - - def test_args_validation(self): - if self.march() == "unknown": - return - # check sanity of argument's validation - for baseline, dispatch in ( - ("unkown_feature - max +min", "unknown max min"), # unknowing features - ("#avx2", "$vsx") # groups and polices aren't acceptable - ) : - try: - self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) - raise AssertionError("excepted an exception for invalid arguments") - except DistutilsError: - pass - - def test_skip(self): - # only takes what platform supports and skip the others - # without casing exceptions - self.expect( - "sse vsx neon", - x86="sse", ppc64="vsx", armhf="neon", unknown="" - ) - self.expect( - "sse41 avx avx2 vsx2 vsx3 neon_vfpv4 asimd", - x86 = "sse41 avx avx2", - ppc64 = "vsx2 vsx3", - armhf = "neon_vfpv4 asimd", - unknown = "" - ) - # any features in cpu_dispatch must be ignored if it's part of baseline - self.expect( - "sse neon vsx", baseline="sse neon vsx", - x86="", ppc64="", armhf="" - ) - self.expect( - "avx2 vsx3 asimdhp", baseline="avx2 vsx3 asimdhp", - x86="", ppc64="", armhf="" - ) - - def test_implies(self): - # baseline combining implied features, so we count - # on it instead of testing 'feature_implies()'' directly - self.expect_baseline( - "fma3 avx2 asimd vsx3", - # .* between two spaces can validate features in between - x86 = "sse .* sse41 .* fma3.*avx2", - ppc64 = "vsx vsx2 vsx3", - armhf = "neon neon_fp16 neon_vfpv4 asimd" - ) - """ - special cases - """ - # in icc and msvc, FMA3 and AVX2 can't be separated - # both need to implies each other, same for avx512f & cd - for f0, f1 in ( - ("fma3", "avx2"), - ("avx512f", "avx512cd"), - ): - diff = ".* sse42 .* %s .*%s$" % (f0, f1) - self.expect_baseline(f0, - x86_gcc=".* sse42 .* %s$" % f0, - x86_icc=diff, x86_iccw=diff - ) - self.expect_baseline(f1, - x86_gcc=".* avx .* %s$" % f1, - x86_icc=diff, x86_iccw=diff - ) - # in msvc, following features can't be separated too - for f in (("fma3", "avx2"), ("avx512f", "avx512cd", "avx512_skx")): - for ff in f: - self.expect_baseline(ff, - x86_msvc=".*%s" % ' '.join(f) - ) - - # in ppc64le VSX and VSX2 can't be separated - self.expect_baseline("vsx", ppc64le="vsx vsx2") - # in aarch64 following features can't be separated - for f in ("neon", "neon_fp16", "neon_vfpv4", "asimd"): - self.expect_baseline(f, aarch64="neon neon_fp16 neon_vfpv4 asimd") - - def test_args_options(self): - # max & native - for o in ("max", "native"): - if o == "native" and self.cc_name() == "msvc": - continue - self.expect(o, - trap_files=".*cpu_(sse|vsx|neon|vx).c", - x86="", ppc64="", armhf="", s390x="" - ) - self.expect(o, - trap_files=".*cpu_(sse3|vsx2|neon_vfpv4|vxe).c", - x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16", - aarch64="", ppc64le="", s390x="vx" - ) - self.expect(o, - trap_files=".*cpu_(popcnt|vsx3).c", - x86="sse .* sse41", ppc64="vsx vsx2", - armhf="neon neon_fp16 .* asimd .*", - s390x="vx vxe vxe2" - ) - self.expect(o, - x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*", - # in icc, xop and fam4 aren't supported - x86_icc=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", - x86_iccw=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", - # in msvc, avx512_knl avx512_knm aren't supported - x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*", - armhf=".* asimd asimdhp asimddp .*", - ppc64="vsx vsx2 vsx3 vsx4.*", - s390x="vx vxe vxe2.*" - ) - # min - self.expect("min", - x86="sse sse2", x64="sse sse2 sse3", - armhf="", aarch64="neon neon_fp16 .* asimd", - ppc64="", ppc64le="vsx vsx2", s390x="" - ) - self.expect( - "min", trap_files=".*cpu_(sse2|vsx2).c", - x86="", ppc64le="" - ) - # an exception must triggered if native flag isn't supported - # when option "native" is activated through the args - try: - self.expect("native", - trap_flags=".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", - x86=".*", ppc64=".*", armhf=".*", s390x=".*", aarch64=".*", - ) - if self.march() != "unknown": - raise AssertionError( - "excepted an exception for %s" % self.march() - ) - except DistutilsError: - if self.march() == "unknown": - raise AssertionError("excepted no exceptions") - - def test_flags(self): - self.expect_flags( - "sse sse2 vsx vsx2 neon neon_fp16 vx vxe", - x86_gcc="-msse -msse2", x86_icc="-msse -msse2", - x86_iccw="/arch:SSE2", - x86_msvc="/arch:SSE2" if self.march() == "x86" else "", - ppc64_gcc= "-mcpu=power8", - ppc64_clang="-mcpu=power8", - armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee", - aarch64="", - s390x="-mzvector -march=arch12" - ) - # testing normalize -march - self.expect_flags( - "asimd", - aarch64="", - armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8-a\+simd" - ) - self.expect_flags( - "asimdhp", - aarch64_gcc=r"-march=armv8.2-a\+fp16", - armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8.2-a\+fp16" - ) - self.expect_flags( - "asimddp", aarch64_gcc=r"-march=armv8.2-a\+dotprod" - ) - self.expect_flags( - # asimdfhm implies asimdhp - "asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+fp16\+fp16fml" - ) - self.expect_flags( - "asimddp asimdhp asimdfhm", - aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml" - ) - self.expect_flags( - "vx vxe vxe2", - s390x=r"-mzvector -march=arch13" - ) - - def test_targets_exceptions(self): - for targets in ( - "bla bla", "/*@targets", - "/*@targets */", - "/*@targets unknown */", - "/*@targets $unknown_policy avx2 */", - "/*@targets #unknown_group avx2 */", - "/*@targets $ */", - "/*@targets # vsx */", - "/*@targets #$ vsx */", - "/*@targets vsx avx2 ) */", - "/*@targets vsx avx2 (avx2 */", - "/*@targets vsx avx2 () */", - "/*@targets vsx avx2 ($autovec) */", # no features - "/*@targets vsx avx2 (xxx) */", - "/*@targets vsx avx2 (baseline) */", - ) : - try: - self.expect_targets( - targets, - x86="", armhf="", ppc64="", s390x="" - ) - if self.march() != "unknown": - raise AssertionError( - "excepted an exception for %s" % self.march() - ) - except DistutilsError: - if self.march() == "unknown": - raise AssertionError("excepted no exceptions") - - def test_targets_syntax(self): - for targets in ( - "/*@targets $keep_baseline sse vsx neon vx*/", - "/*@targets,$keep_baseline,sse,vsx,neon vx*/", - "/*@targets*$keep_baseline*sse*vsx*neon*vx*/", - """ - /* - ** @targets - ** $keep_baseline, sse vsx,neon, vx - */ - """, - """ - /* - ************@targets**************** - ** $keep_baseline, sse vsx, neon, vx - ************************************ - */ - """, - """ - /* - /////////////@targets///////////////// - //$keep_baseline//sse//vsx//neon//vx - ///////////////////////////////////// - */ - """, - """ - /* - @targets - $keep_baseline - SSE VSX NEON VX*/ - """ - ) : - self.expect_targets(targets, - x86="sse", ppc64="vsx", armhf="neon", s390x="vx", unknown="" - ) - - def test_targets(self): - # test skipping baseline features - self.expect_targets( - """ - /*@targets - sse sse2 sse41 avx avx2 avx512f - vsx vsx2 vsx3 vsx4 - neon neon_fp16 asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="avx vsx2 asimd vx vxe", - x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx4 vsx3", - s390x="vxe2" - ) - # test skipping non-dispatch features - self.expect_targets( - """ - /*@targets - sse41 avx avx2 avx512f - vsx2 vsx3 vsx4 - asimd asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp vxe2", - x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2", s390x="vxe2" - ) - # test skipping features that not supported - self.expect_targets( - """ - /*@targets - sse2 sse41 avx2 avx512f - vsx2 vsx3 vsx4 - neon asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="", - trap_files=".*(avx2|avx512f|vsx3|vsx4|asimddp|vxe2).c", - x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon", - s390x="vxe vx" - ) - # test skipping features that implies each other - self.expect_targets( - """ - /*@targets - sse sse2 avx fma3 avx2 avx512f avx512cd - vsx vsx2 vsx3 - neon neon_vfpv4 neon_fp16 neon_fp16 asimd asimdhp - asimddp asimdfhm - */ - """, - baseline="", - x86_gcc="avx512cd avx512f avx2 fma3 avx sse2", - x86_msvc="avx512cd avx2 avx sse2", - x86_icc="avx512cd avx2 avx sse2", - x86_iccw="avx512cd avx2 avx sse2", - ppc64="vsx3 vsx2 vsx", - ppc64le="vsx3 vsx2", - armhf="asimdfhm asimddp asimdhp asimd neon_vfpv4 neon_fp16 neon", - aarch64="asimdfhm asimddp asimdhp asimd" - ) - - def test_targets_policies(self): - # 'keep_baseline', generate objects for baseline features - self.expect_targets( - """ - /*@targets - $keep_baseline - sse2 sse42 avx2 avx512f - vsx2 vsx3 - neon neon_vfpv4 asimd asimddp - vx vxe vxe2 - */ - """, - baseline="sse41 avx2 vsx2 asimd vsx3 vxe", - x86="avx512f avx2 sse42 sse2", - ppc64="vsx3 vsx2", - armhf="asimddp asimd neon_vfpv4 neon", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimddp asimd", - s390x="vxe2 vxe vx" - ) - # 'keep_sort', leave the sort as-is - self.expect_targets( - """ - /*@targets - $keep_baseline $keep_sort - avx512f sse42 avx2 sse2 - vsx2 vsx3 - asimd neon neon_vfpv4 asimddp - vxe vxe2 - */ - """, - x86="avx512f sse42 avx2 sse2", - ppc64="vsx2 vsx3", - armhf="asimd neon neon_vfpv4 asimddp", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimd asimddp", - s390x="vxe vxe2" - ) - # 'autovec', skipping features that can't be - # vectorized by the compiler - self.expect_targets( - """ - /*@targets - $keep_baseline $keep_sort $autovec - avx512f avx2 sse42 sse41 sse2 - vsx3 vsx2 - asimddp asimd neon_vfpv4 neon - */ - """, - x86_gcc="avx512f avx2 sse42 sse41 sse2", - x86_icc="avx512f avx2 sse42 sse41 sse2", - x86_iccw="avx512f avx2 sse42 sse41 sse2", - x86_msvc="avx512f avx2 sse2" - if self.march() == 'x86' else "avx512f avx2", - ppc64="vsx3 vsx2", - armhf="asimddp asimd neon_vfpv4 neon", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimddp asimd" - ) - for policy in ("$maxopt", "$autovec"): - # 'maxopt' and autovec set the max acceptable optimization flags - self.expect_target_flags( - "/*@targets baseline %s */" % policy, - gcc={"baseline":".*-O3.*"}, icc={"baseline":".*-O3.*"}, - iccw={"baseline":".*/O3.*"}, msvc={"baseline":".*/O2.*"}, - unknown={"baseline":".*"} - ) - - # 'werror', force compilers to treat warnings as errors - self.expect_target_flags( - "/*@targets baseline $werror */", - gcc={"baseline":".*-Werror.*"}, icc={"baseline":".*-Werror.*"}, - iccw={"baseline":".*/Werror.*"}, msvc={"baseline":".*/WX.*"}, - unknown={"baseline":".*"} - ) - - def test_targets_groups(self): - self.expect_targets( - """ - /*@targets $keep_baseline baseline #test_group */ - """, - groups=dict( - test_group=(""" - $keep_baseline - asimddp sse2 vsx2 avx2 vsx3 - avx512f asimdhp - """) - ), - x86="avx512f avx2 sse2 baseline", - ppc64="vsx3 vsx2 baseline", - armhf="asimddp asimdhp baseline" - ) - # test skip duplicating and sorting - self.expect_targets( - """ - /*@targets - * sse42 avx avx512f - * #test_group_1 - * vsx2 - * #test_group_2 - * asimddp asimdfhm - */ - """, - groups=dict( - test_group_1=(""" - VSX2 vsx3 asimd avx2 SSE41 - """), - test_group_2=(""" - vsx2 vsx3 asImd aVx2 sse41 - """) - ), - x86="avx512f avx2 avx sse42 sse41", - ppc64="vsx3 vsx2", - # vsx2 part of the default baseline of ppc64le, option ("min") - ppc64le="vsx3", - armhf="asimdfhm asimddp asimd", - # asimd part of the default baseline of aarch64, option ("min") - aarch64="asimdfhm asimddp" - ) - - def test_targets_multi(self): - self.expect_targets( - """ - /*@targets - (avx512_clx avx512_cnl) (asimdhp asimddp) - */ - """, - x86=r"\(avx512_clx avx512_cnl\)", - armhf=r"\(asimdhp asimddp\)", - ) - # test skipping implied features and auto-sort - self.expect_targets( - """ - /*@targets - f16c (sse41 avx sse42) (sse3 avx2 avx512f) - vsx2 (vsx vsx3 vsx2) - (neon neon_vfpv4 asimd asimdhp asimddp) - */ - """, - x86="avx512f f16c avx", - ppc64="vsx3 vsx2", - ppc64le="vsx3", # vsx2 part of baseline - armhf=r"\(asimdhp asimddp\)", - ) - # test skipping implied features and keep sort - self.expect_targets( - """ - /*@targets $keep_sort - (sse41 avx sse42) (sse3 avx2 avx512f) - (vsx vsx3 vsx2) - (asimddp neon neon_vfpv4 asimd asimdhp) - (vx vxe vxe2) - */ - """, - x86="avx avx512f", - ppc64="vsx3", - armhf=r"\(asimdhp asimddp\)", - s390x="vxe2" - ) - # test compiler variety and avoiding duplicating - self.expect_targets( - """ - /*@targets $keep_sort - fma3 avx2 (fma3 avx2) (avx2 fma3) avx2 fma3 - */ - """, - x86_gcc=r"fma3 avx2 \(fma3 avx2\)", - x86_icc="avx2", x86_iccw="avx2", - x86_msvc="avx2" - ) - -def new_test(arch, cc): - if is_standalone: return textwrap.dedent("""\ - class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt, unittest.TestCase): - arch = '{arch}' - cc = '{cc}' - def __init__(self, methodName="runTest"): - unittest.TestCase.__init__(self, methodName) - self.setup_class() - """).format( - class_name=arch + '_' + cc, arch=arch, cc=cc - ) - return textwrap.dedent("""\ - class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt): - arch = '{arch}' - cc = '{cc}' - """).format( - class_name=arch + '_' + cc, arch=arch, cc=cc - ) -""" -if 1 and is_standalone: - FakeCCompilerOpt.fake_info = "x86_icc" - cco = FakeCCompilerOpt(None, cpu_baseline="avx2") - print(' '.join(cco.cpu_baseline_names())) - print(cco.cpu_baseline_flags()) - unittest.main() - sys.exit() -""" -for arch, compilers in arch_compilers.items(): - for cc in compilers: - exec(new_test(arch, cc)) - -if is_standalone: - unittest.main() diff --git a/numpy/distutils/tests/test_ccompiler_opt_conf.py b/numpy/distutils/tests/test_ccompiler_opt_conf.py deleted file mode 100644 index d9e8b2b0a834..000000000000 --- a/numpy/distutils/tests/test_ccompiler_opt_conf.py +++ /dev/null @@ -1,176 +0,0 @@ -import unittest -from os import sys, path - -is_standalone = __name__ == '__main__' and __package__ is None -if is_standalone: - sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) - from ccompiler_opt import CCompilerOpt -else: - from numpy.distutils.ccompiler_opt import CCompilerOpt - -arch_compilers = dict( - x86 = ("gcc", "clang", "icc", "iccw", "msvc"), - x64 = ("gcc", "clang", "icc", "iccw", "msvc"), - ppc64 = ("gcc", "clang"), - ppc64le = ("gcc", "clang"), - armhf = ("gcc", "clang"), - aarch64 = ("gcc", "clang"), - narch = ("gcc",) -) - -class FakeCCompilerOpt(CCompilerOpt): - fake_info = ("arch", "compiler", "extra_args") - def __init__(self, *args, **kwargs): - CCompilerOpt.__init__(self, None, **kwargs) - def dist_compile(self, sources, flags, **kwargs): - return sources - def dist_info(self): - return FakeCCompilerOpt.fake_info - @staticmethod - def dist_log(*args, stderr=False): - pass - -class _TestConfFeatures(FakeCCompilerOpt): - """A hook to check the sanity of configured features -- before it called by the abstract class '_Feature' - """ - - def conf_features_partial(self): - conf_all = self.conf_features - for feature_name, feature in conf_all.items(): - self.test_feature( - "attribute conf_features", - conf_all, feature_name, feature - ) - - conf_partial = FakeCCompilerOpt.conf_features_partial(self) - for feature_name, feature in conf_partial.items(): - self.test_feature( - "conf_features_partial()", - conf_partial, feature_name, feature - ) - return conf_partial - - def test_feature(self, log, search_in, feature_name, feature_dict): - error_msg = ( - "during validate '{}' within feature '{}', " - "march '{}' and compiler '{}'\n>> " - ).format(log, feature_name, self.cc_march, self.cc_name) - - if not feature_name.isupper(): - raise AssertionError(error_msg + "feature name must be in uppercase") - - for option, val in feature_dict.items(): - self.test_option_types(error_msg, option, val) - self.test_duplicates(error_msg, option, val) - - self.test_implies(error_msg, search_in, feature_name, feature_dict) - self.test_group(error_msg, search_in, feature_name, feature_dict) - self.test_extra_checks(error_msg, search_in, feature_name, feature_dict) - - def test_option_types(self, error_msg, option, val): - for tp, available in ( - ((str, list), ( - "implies", "headers", "flags", "group", "detect", "extra_checks" - )), - ((str,), ("disable",)), - ((int,), ("interest",)), - ((bool,), ("implies_detect",)), - ((bool, type(None)), ("autovec",)), - ) : - found_it = option in available - if not found_it: - continue - if not isinstance(val, tp): - error_tp = [t.__name__ for t in (*tp,)] - error_tp = ' or '.join(error_tp) - raise AssertionError(error_msg + - "expected '%s' type for option '%s' not '%s'" % ( - error_tp, option, type(val).__name__ - )) - break - - if not found_it: - raise AssertionError(error_msg + "invalid option name '%s'" % option) - - def test_duplicates(self, error_msg, option, val): - if option not in ( - "implies", "headers", "flags", "group", "detect", "extra_checks" - ) : return - - if isinstance(val, str): - val = val.split() - - if len(val) != len(set(val)): - raise AssertionError(error_msg + "duplicated values in option '%s'" % option) - - def test_implies(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - implies = feature_dict.get("implies", "") - if not implies: - return - if isinstance(implies, str): - implies = implies.split() - - if feature_name in implies: - raise AssertionError(error_msg + "feature implies itself") - - for impl in implies: - impl_dict = search_in.get(impl) - if impl_dict is not None: - if "disable" in impl_dict: - raise AssertionError(error_msg + "implies disabled feature '%s'" % impl) - continue - raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl) - - def test_group(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - group = feature_dict.get("group", "") - if not group: - return - if isinstance(group, str): - group = group.split() - - for f in group: - impl_dict = search_in.get(f) - if not impl_dict or "disable" in impl_dict: - continue - raise AssertionError(error_msg + - "in option 'group', '%s' already exists as a feature name" % f - ) - - def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - extra_checks = feature_dict.get("extra_checks", "") - if not extra_checks: - return - if isinstance(extra_checks, str): - extra_checks = extra_checks.split() - - for f in extra_checks: - impl_dict = search_in.get(f) - if not impl_dict or "disable" in impl_dict: - continue - raise AssertionError(error_msg + - "in option 'extra_checks', extra test case '%s' already exists as a feature name" % f - ) - -class TestConfFeatures(unittest.TestCase): - def __init__(self, methodName="runTest"): - unittest.TestCase.__init__(self, methodName) - self._setup() - - def _setup(self): - FakeCCompilerOpt.conf_nocache = True - - def test_features(self): - for arch, compilers in arch_compilers.items(): - for cc in compilers: - FakeCCompilerOpt.fake_info = (arch, cc, "") - _TestConfFeatures() - -if is_standalone: - unittest.main() diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py deleted file mode 100644 index 749523528e63..000000000000 --- a/numpy/distutils/tests/test_exec_command.py +++ /dev/null @@ -1,217 +0,0 @@ -import os -import pytest -import sys -from tempfile import TemporaryFile - -from numpy.distutils import exec_command -from numpy.distutils.exec_command import get_pythonexe -from numpy.testing import tempdir, assert_, IS_WASM - - -# In python 3 stdout, stderr are text (unicode compliant) devices, so to -# emulate them import StringIO from the io module. -from io import StringIO - -class redirect_stdout: - """Context manager to redirect stdout for exec_command test.""" - def __init__(self, stdout=None): - self._stdout = stdout or sys.stdout - - def __enter__(self): - self.old_stdout = sys.stdout - sys.stdout = self._stdout - - def __exit__(self, exc_type, exc_value, traceback): - self._stdout.flush() - sys.stdout = self.old_stdout - # note: closing sys.stdout won't close it. - self._stdout.close() - -class redirect_stderr: - """Context manager to redirect stderr for exec_command test.""" - def __init__(self, stderr=None): - self._stderr = stderr or sys.stderr - - def __enter__(self): - self.old_stderr = sys.stderr - sys.stderr = self._stderr - - def __exit__(self, exc_type, exc_value, traceback): - self._stderr.flush() - sys.stderr = self.old_stderr - # note: closing sys.stderr won't close it. - self._stderr.close() - -class emulate_nonposix: - """Context manager to emulate os.name != 'posix' """ - def __init__(self, osname='non-posix'): - self._new_name = osname - - def __enter__(self): - self._old_name = os.name - os.name = self._new_name - - def __exit__(self, exc_type, exc_value, traceback): - os.name = self._old_name - - -def test_exec_command_stdout(): - # Regression test for gh-2999 and gh-2915. - # There are several packages (nose, scipy.weave.inline, Sage inline - # Fortran) that replace stdout, in which case it doesn't have a fileno - # method. This is tested here, with a do-nothing command that fails if the - # presence of fileno() is assumed in exec_command. - - # The code has a special case for posix systems, so if we are on posix test - # both that the special case works and that the generic code works. - - # Test posix version: - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with pytest.warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with pytest.warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - -def test_exec_command_stderr(): - # Test posix version: - with redirect_stdout(TemporaryFile(mode='w+')): - with redirect_stderr(StringIO()): - with pytest.warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(TemporaryFile()): - with redirect_stderr(StringIO()): - with pytest.warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -class TestExecCommand: - def setup_method(self): - self.pyexe = get_pythonexe() - - def check_nt(self, **kws): - s, o = exec_command.exec_command('cmd /C echo path=%path%') - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe) - assert_(s == 0) - assert_(o == 'win32') - - def check_posix(self, **kws): - s, o = exec_command.exec_command("echo Hello", **kws) - assert_(s == 0) - assert_(o == 'Hello') - - s, o = exec_command.exec_command('echo $AAA', **kws) - assert_(s == 0) - assert_(o == '') - - s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws) - assert_(s == 0) - assert_(o == 'Tere') - - s, o = exec_command.exec_command('echo "$AAA"', **kws) - assert_(s == 0) - assert_(o == '') - - if 'BBB' not in os.environ: - os.environ['BBB'] = 'Hi' - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws) - assert_(s == 0) - assert_(o == 'Hey') - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - del os.environ['BBB'] - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == '') - - - s, o = exec_command.exec_command('this_is_not_a_command', **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command('echo path=$PATH', **kws) - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys,os;sys.stderr.write(os.name)"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'posix') - - def check_basic(self, *kws): - s, o = exec_command.exec_command( - '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(\'0\');' - 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == '012') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws) - assert_(s == 15) - assert_(o == '') - - s, o = exec_command.exec_command( - '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'Heipa') - - def check_execute_in(self, **kws): - with tempdir() as tmpdir: - fn = "file" - tmpfile = os.path.join(tmpdir, fn) - with open(tmpfile, 'w') as f: - f.write('Hello') - - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % - (self.pyexe, fn), **kws) - assert_(s != 0) - assert_(o != '') - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); ' - 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws) - assert_(s == 0) - assert_(o == 'Hello') - - def test_basic(self): - with redirect_stdout(StringIO()): - with redirect_stderr(StringIO()): - with pytest.warns(DeprecationWarning): - if os.name == "posix": - self.check_posix(use_tee=0) - self.check_posix(use_tee=1) - elif os.name == "nt": - self.check_nt(use_tee=0) - self.check_nt(use_tee=1) - self.check_execute_in(use_tee=0) - self.check_execute_in(use_tee=1) diff --git a/numpy/distutils/tests/test_fcompiler.py b/numpy/distutils/tests/test_fcompiler.py deleted file mode 100644 index dd97f1e72afc..000000000000 --- a/numpy/distutils/tests/test_fcompiler.py +++ /dev/null @@ -1,43 +0,0 @@ -from numpy.testing import assert_ -import numpy.distutils.fcompiler - -customizable_flags = [ - ('f77', 'F77FLAGS'), - ('f90', 'F90FLAGS'), - ('free', 'FREEFLAGS'), - ('arch', 'FARCH'), - ('debug', 'FDEBUG'), - ('flags', 'FFLAGS'), - ('linker_so', 'LDFLAGS'), -] - - -def test_fcompiler_flags(monkeypatch): - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') - flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - assert_(new_flags == [new_flag]) - - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - if prev_flags is None: - assert_(new_flags == [new_flag]) - else: - assert_(new_flags == prev_flags + [new_flag]) - diff --git a/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy/distutils/tests/test_fcompiler_gnu.py deleted file mode 100644 index 0817ae58c214..000000000000 --- a/numpy/distutils/tests/test_fcompiler_gnu.py +++ /dev/null @@ -1,55 +0,0 @@ -from numpy.testing import assert_ - -import numpy.distutils.fcompiler - -g77_version_strings = [ - ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), - ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), - ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), - ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), - ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' - ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), -] - -gfortran_version_strings = [ - ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', - '4.0.3'), - ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), - ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), - ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), - ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'), - ('4.8.0', '4.8.0'), - ('4.0.3-7', '4.0.3'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1", - '4.9.1'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n" - "gfortran: warning: yet another warning\n4.9.1", - '4.9.1'), - ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0') -] - -class TestG77Versions: - def test_g77_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, version in g77_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_g77(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, _ in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) - -class TestGFortranVersions: - def test_gfortran_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, version in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_gfortran(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, _ in g77_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) diff --git a/numpy/distutils/tests/test_fcompiler_intel.py b/numpy/distutils/tests/test_fcompiler_intel.py deleted file mode 100644 index 45c9cdac1910..000000000000 --- a/numpy/distutils/tests/test_fcompiler_intel.py +++ /dev/null @@ -1,30 +0,0 @@ -import numpy.distutils.fcompiler -from numpy.testing import assert_ - - -intel_32bit_version_strings = [ - ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications" - "running on Intel(R) 32, Version 11.1", '11.1'), -] - -intel_64bit_version_strings = [ - ("Intel(R) Fortran IA-64 Compiler Professional for applications" - "running on IA-64, Version 11.0", '11.0'), - ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications" - "running on Intel(R) 64, Version 11.1", '11.1') -] - -class TestIntelFCompilerVersions: - def test_32bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') - for vs, version in intel_32bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) - - -class TestIntelEM64TFCompilerVersions: - def test_64bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') - for vs, version in intel_64bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) diff --git a/numpy/distutils/tests/test_fcompiler_nagfor.py b/numpy/distutils/tests/test_fcompiler_nagfor.py deleted file mode 100644 index 2e04f5266dc1..000000000000 --- a/numpy/distutils/tests/test_fcompiler_nagfor.py +++ /dev/null @@ -1,22 +0,0 @@ -from numpy.testing import assert_ -import numpy.distutils.fcompiler - -nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release ' - '6.2(Chiyoda) Build 6200', '6.2'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.1(Tozai) Build 6136', '6.1'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.0(Hibiya) Build 1021', '6.0'), - ('nagfor', 'NAG Fortran Compiler Release ' - '5.3.2(971)', '5.3.2'), - ('nag', 'NAGWare Fortran 95 compiler Release 5.1' - '(347,355-367,375,380-383,389,394,399,401-402,407,' - '431,435,437,446,459-460,463,472,494,496,503,508,' - '511,517,529,555,557,565)', '5.1')] - -class TestNagFCompilerVersions: - def test_version_match(self): - for comp, vs, version in nag_version_strings: - fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp) - v = fc.version_match(vs) - assert_(v == version) diff --git a/numpy/distutils/tests/test_from_template.py b/numpy/distutils/tests/test_from_template.py deleted file mode 100644 index 588175496299..000000000000 --- a/numpy/distutils/tests/test_from_template.py +++ /dev/null @@ -1,44 +0,0 @@ - -from numpy.distutils.from_template import process_str -from numpy.testing import assert_equal - - -pyf_src = """ -python module foo - <_rd=real,double precision> - interface - subroutine foosub(tol) - <_rd>, intent(in,out) :: tol - end subroutine foosub - end interface -end python module foo -""" - -expected_pyf = """ -python module foo - interface - subroutine sfoosub(tol) - real, intent(in,out) :: tol - end subroutine sfoosub - subroutine dfoosub(tol) - double precision, intent(in,out) :: tol - end subroutine dfoosub - end interface -end python module foo -""" - - -def normalize_whitespace(s): - """ - Remove leading and trailing whitespace, and convert internal - stretches of whitespace to a single space. - """ - return ' '.join(s.split()) - - -def test_from_template(): - """Regression test for gh-10712.""" - pyf = process_str(pyf_src) - normalized_pyf = normalize_whitespace(pyf) - normalized_expected_pyf = normalize_whitespace(expected_pyf) - assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/numpy/distutils/tests/test_log.py b/numpy/distutils/tests/test_log.py deleted file mode 100644 index 72fddf37370f..000000000000 --- a/numpy/distutils/tests/test_log.py +++ /dev/null @@ -1,34 +0,0 @@ -import io -import re -from contextlib import redirect_stdout - -import pytest - -from numpy.distutils import log - - -def setup_module(): - f = io.StringIO() # changing verbosity also logs here, capture that - with redirect_stdout(f): - log.set_verbosity(2, force=True) # i.e. DEBUG - - -def teardown_module(): - log.set_verbosity(0, force=True) # the default - - -r_ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") - - -@pytest.mark.parametrize("func_name", ["error", "warn", "info", "debug"]) -def test_log_prefix(func_name): - func = getattr(log, func_name) - msg = f"{func_name} message" - f = io.StringIO() - with redirect_stdout(f): - func(msg) - out = f.getvalue() - assert out # sanity check - clean_out = r_ansi.sub("", out) - line = next(line for line in clean_out.splitlines()) - assert line == f"{func_name.upper()}: {msg}" diff --git a/numpy/distutils/tests/test_mingw32ccompiler.py b/numpy/distutils/tests/test_mingw32ccompiler.py deleted file mode 100644 index c4eac7b72de1..000000000000 --- a/numpy/distutils/tests/test_mingw32ccompiler.py +++ /dev/null @@ -1,47 +0,0 @@ -import shutil -import subprocess -import sys -import pytest -import os -import sysconfig - -from numpy.distutils import mingw32ccompiler - - -@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test') -@pytest.mark.skipif(not os.path.exists(os.path.join(sys.prefix, 'libs')), - reason="test requires mingw library layout") -@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='mingw GNU objdump does not understand arm64 binary format yet') -def test_build_import(): - '''Test the mingw32ccompiler.build_import_library, which builds a - `python.a` from the MSVC `python.lib` - ''' - - # make sure `nm.exe` exists and supports the current python version. This - # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit - try: - out = subprocess.check_output(['nm.exe', '--help']) - except FileNotFoundError: - pytest.skip("'nm.exe' not on path, is mingw installed?") - supported = out[out.find(b'supported targets:'):] - if sys.maxsize < 2**32: - if b'pe-i386' not in supported: - raise ValueError("'nm.exe' found but it does not support 32-bit " - "dlls when using 32-bit python. Supported " - "formats: '%s'" % supported) - elif b'pe-x86-64' not in supported: - raise ValueError("'nm.exe' found but it does not support 64-bit " - "dlls when using 64-bit python. Supported " - "formats: '%s'" % supported) - # Hide the import library to force a build - has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib() - if has_import_lib: - shutil.move(fullpath, fullpath + '.bak') - - try: - # Whew, now we can actually test the function - mingw32ccompiler.build_import_library() - - finally: - if has_import_lib: - shutil.move(fullpath + '.bak', fullpath) diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py deleted file mode 100644 index 40e7606eeb76..000000000000 --- a/numpy/distutils/tests/test_misc_util.py +++ /dev/null @@ -1,88 +0,0 @@ -from os.path import join, sep, dirname - -import pytest - -from numpy.distutils.misc_util import ( - appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info - ) -from numpy.testing import ( - assert_, assert_equal, IS_EDITABLE - ) - -ajoin = lambda *paths: join(*((sep,)+paths)) - -class TestAppendpath: - - def test_1(self): - assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) - assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) - assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) - assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) - - def test_2(self): - assert_equal(appendpath('prefix/sub', 'name'), - join('prefix', 'sub', 'name')) - assert_equal(appendpath('prefix/sub', 'sup/name'), - join('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub', '/prefix/name'), - ajoin('prefix', 'sub', 'name')) - - def test_3(self): - assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), - ajoin('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) - -class TestMinrelpath: - - def test_1(self): - n = lambda path: path.replace('/', sep) - assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) - assert_equal(minrelpath('..'), '..') - assert_equal(minrelpath(n('aa/..')), '') - assert_equal(minrelpath(n('aa/../bb')), 'bb') - assert_equal(minrelpath(n('aa/bb/..')), 'aa') - assert_equal(minrelpath(n('aa/bb/../..')), '') - assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) - assert_equal(minrelpath(n('.././..')), n('../..')) - assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) - -class TestGpaths: - - def test_gpaths(self): - local_path = minrelpath(join(dirname(__file__), '..')) - ls = gpaths('command/*.py', local_path) - assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) - f = gpaths('system_info.py', local_path) - assert_(join(local_path, 'system_info.py') == f[0], repr(f)) - -class TestSharedExtension: - - def test_get_shared_lib_extension(self): - import sys - ext = get_shared_lib_extension(is_python_ext=False) - if sys.platform.startswith('linux'): - assert_equal(ext, '.so') - elif sys.platform.startswith('gnukfreebsd'): - assert_equal(ext, '.so') - elif sys.platform.startswith('darwin'): - assert_equal(ext, '.dylib') - elif sys.platform.startswith('win'): - assert_equal(ext, '.dll') - # just check for no crash - assert_(get_shared_lib_extension(is_python_ext=True)) - - -@pytest.mark.skipif( - IS_EDITABLE, - reason="`get_info` .ini lookup method incompatible with editable install" -) -def test_installed_npymath_ini(): - # Regression test for gh-7707. If npymath.ini wasn't installed, then this - # will give an error. - info = get_info('npymath') - - assert isinstance(info, dict) - assert "define_macros" in info diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py deleted file mode 100644 index b287ebe2e832..000000000000 --- a/numpy/distutils/tests/test_npy_pkg_config.py +++ /dev/null @@ -1,84 +0,0 @@ -import os - -from numpy.distutils.npy_pkg_config import read_config, parse_flags -from numpy.testing import temppath, assert_ - -simple = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[default] -cflags = -I/usr/include -libs = -L/usr/lib -""" -simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', - 'version': '0.1', 'name': 'foo'} - -simple_variable = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[variables] -prefix = /foo/bar -libdir = ${prefix}/lib -includedir = ${prefix}/include - -[default] -cflags = -I${includedir} -libs = -L${libdir} -""" -simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', - 'version': '0.1', 'name': 'foo'} - -class TestLibraryInfo: - def test_simple(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_d['cflags']) - assert_(out.libs() == simple_d['libflags']) - assert_(out.name == simple_d['name']) - assert_(out.version == simple_d['version']) - - def test_simple_variable(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple_variable) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_variable_d['cflags']) - assert_(out.libs() == simple_variable_d['libflags']) - assert_(out.name == simple_variable_d['name']) - assert_(out.version == simple_variable_d['version']) - out.vars['prefix'] = '/Users/david' - assert_(out.cflags() == '-I/Users/david/include') - -class TestParseFlags: - def test_simple_cflags(self): - d = parse_flags("-I/usr/include") - assert_(d['include_dirs'] == ['/usr/include']) - - d = parse_flags("-I/usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - d = parse_flags("-I /usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - def test_simple_lflags(self): - d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) - - d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) diff --git a/numpy/distutils/tests/test_shell_utils.py b/numpy/distutils/tests/test_shell_utils.py deleted file mode 100644 index 696d38ddd66a..000000000000 --- a/numpy/distutils/tests/test_shell_utils.py +++ /dev/null @@ -1,79 +0,0 @@ -import pytest -import subprocess -import json -import sys - -from numpy.distutils import _shell_utils -from numpy.testing import IS_WASM - -argv_cases = [ - [r'exe'], - [r'path/exe'], - [r'path\exe'], - [r'\\server\path\exe'], - [r'path to/exe'], - [r'path to\exe'], - - [r'exe', '--flag'], - [r'path/exe', '--flag'], - [r'path\exe', '--flag'], - [r'path to/exe', '--flag'], - [r'path to\exe', '--flag'], - - # flags containing literal quotes in their name - [r'path to/exe', '--flag-"quoted"'], - [r'path to\exe', '--flag-"quoted"'], - [r'path to/exe', '"--flag-quoted"'], - [r'path to\exe', '"--flag-quoted"'], -] - - -@pytest.fixture(params=[ - _shell_utils.WindowsParser, - _shell_utils.PosixParser -]) -def Parser(request): - return request.param - - -@pytest.fixture -def runner(Parser): - if Parser != _shell_utils.NativeParser: - pytest.skip('Unable to run with non-native parser') - - if Parser == _shell_utils.WindowsParser: - return lambda cmd: subprocess.check_output(cmd) - elif Parser == _shell_utils.PosixParser: - # posix has no non-shell string parsing - return lambda cmd: subprocess.check_output(cmd, shell=True) - else: - raise NotImplementedError - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -@pytest.mark.parametrize('argv', argv_cases) -def test_join_matches_subprocess(Parser, runner, argv): - """ - Test that join produces strings understood by subprocess - """ - # invoke python to return its arguments as json - cmd = [ - sys.executable, '-c', - 'import json, sys; print(json.dumps(sys.argv[1:]))' - ] - joined = Parser.join(cmd + argv) - json_out = runner(joined).decode() - assert json.loads(json_out) == argv - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -@pytest.mark.parametrize('argv', argv_cases) -def test_roundtrip(Parser, argv): - """ - Test that split is the inverse operation of join - """ - try: - joined = Parser.join(argv) - assert argv == Parser.split(joined) - except NotImplementedError: - pytest.skip("Not implemented") diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py deleted file mode 100644 index 5887abea76bd..000000000000 --- a/numpy/distutils/tests/test_system_info.py +++ /dev/null @@ -1,334 +0,0 @@ -import os -import shutil -import pytest -from tempfile import mkstemp, mkdtemp -from subprocess import Popen, PIPE -import importlib.metadata -from distutils.errors import DistutilsError - -from numpy.testing import assert_, assert_equal, assert_raises -from numpy.distutils import ccompiler, customized_ccompiler -from numpy.distutils.system_info import system_info, ConfigParser, mkl_info -from numpy.distutils.system_info import AliasedOptionError -from numpy.distutils.system_info import default_lib_dirs, default_include_dirs -from numpy.distutils import _shell_utils - - -try: - if importlib.metadata.version('setuptools') >= '60': - # pkg-resources gives deprecation warnings, and there may be more - # issues. We only support setuptools <60 - pytest.skip("setuptools is too new", allow_module_level=True) -except importlib.metadata.PackageNotFoundError: - # we don't require `setuptools`; if it is not found, continue - pass - - -def get_class(name, notfound_action=1): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'temp1': Temp1Info, - 'temp2': Temp2Info, - 'duplicate_options': DuplicateOptionInfo, - }.get(name.lower(), _system_info) - return cl() - -simple_site = """ -[ALL] -library_dirs = {dir1:s}{pathsep:s}{dir2:s} -libraries = {lib1:s},{lib2:s} -extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os -runtime_library_dirs = {dir1:s} - -[temp1] -library_dirs = {dir1:s} -libraries = {lib1:s} -runtime_library_dirs = {dir1:s} - -[temp2] -library_dirs = {dir2:s} -libraries = {lib2:s} -extra_link_args = -Wl,-rpath={lib2_escaped:s} -rpath = {dir2:s} - -[duplicate_options] -mylib_libs = {lib1:s} -libraries = {lib2:s} -""" -site_cfg = simple_site - -fakelib_c_text = """ -/* This file is generated from numpy/distutils/testing/test_system_info.py */ -#include -void foo(void) { - printf("Hello foo"); -} -void bar(void) { - printf("Hello bar"); -} -""" - -def have_compiler(): - """ Return True if there appears to be an executable compiler - """ - compiler = customized_ccompiler() - try: - cmd = compiler.compiler # Unix compilers - except AttributeError: - try: - if not compiler.initialized: - compiler.initialize() # MSVC is different - except (DistutilsError, ValueError): - return False - cmd = [compiler.cc] - try: - p = Popen(cmd, stdout=PIPE, stderr=PIPE) - p.stdout.close() - p.stderr.close() - p.wait() - except OSError: - return False - return True - - -HAVE_COMPILER = have_compiler() - - -class _system_info(system_info): - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - verbosity=1, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': '', - 'include_dirs': '', - 'runtime_library_dirs': '', - 'rpath': '', - 'src_dirs': '', - 'search_static_first': "0", - 'extra_compile_args': '', - 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - # We have to parse the config files afterwards - # to have a consistent temporary filepath - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Override _check_libs to return with all dirs """ - info = {'libraries': libs, 'library_dirs': lib_dirs} - return info - - -class Temp1Info(_system_info): - """For testing purposes""" - section = 'temp1' - - -class Temp2Info(_system_info): - """For testing purposes""" - section = 'temp2' - -class DuplicateOptionInfo(_system_info): - """For testing purposes""" - section = 'duplicate_options' - - -class TestSystemInfoReading: - - def setup_method(self): - """ Create the libraries """ - # Create 2 sources and 2 libraries - self._dir1 = mkdtemp() - self._src1 = os.path.join(self._dir1, 'foo.c') - self._lib1 = os.path.join(self._dir1, 'libfoo.so') - self._dir2 = mkdtemp() - self._src2 = os.path.join(self._dir2, 'bar.c') - self._lib2 = os.path.join(self._dir2, 'libbar.so') - # Update local site.cfg - global simple_site, site_cfg - site_cfg = simple_site.format( - dir1=self._dir1, - lib1=self._lib1, - dir2=self._dir2, - lib2=self._lib2, - pathsep=os.pathsep, - lib2_escaped=_shell_utils.NativeParser.join([self._lib2]) - ) - # Write site.cfg - fd, self._sitecfg = mkstemp() - os.close(fd) - with open(self._sitecfg, 'w') as fd: - fd.write(site_cfg) - # Write the sources - with open(self._src1, 'w') as fd: - fd.write(fakelib_c_text) - with open(self._src2, 'w') as fd: - fd.write(fakelib_c_text) - # We create all class-instances - - def site_and_parse(c, site_cfg): - c.files = [site_cfg] - c.parse_config_files() - return c - self.c_default = site_and_parse(get_class('default'), self._sitecfg) - self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) - self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) - self.c_dup_options = site_and_parse(get_class('duplicate_options'), - self._sitecfg) - - def teardown_method(self): - # Do each removal separately - try: - shutil.rmtree(self._dir1) - except Exception: - pass - try: - shutil.rmtree(self._dir2) - except Exception: - pass - try: - os.remove(self._sitecfg) - except Exception: - pass - - def test_all(self): - # Read in all information in the ALL block - tsi = self.c_default - assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os']) - - def test_temp1(self): - # Read in all information in the temp1 block - tsi = self.c_temp1 - assert_equal(tsi.get_lib_dirs(), [self._dir1]) - assert_equal(tsi.get_libraries(), [self._lib1]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - - def test_temp2(self): - # Read in all information in the temp2 block - tsi = self.c_temp2 - assert_equal(tsi.get_lib_dirs(), [self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib2]) - # Now from rpath and not runtime_library_dirs - assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) - - def test_duplicate_options(self): - # Ensure that duplicates are raising an AliasedOptionError - tsi = self.c_dup_options - assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries") - assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1]) - assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2]) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - def test_compile1(self): - # Compile source and link the first source - c = customized_ccompiler() - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir1) - c.compile([os.path.basename(self._src1)], output_dir=self._dir1) - # Ensure that the object exists - assert_(os.path.isfile(self._src1.replace('.c', '.o')) or - os.path.isfile(self._src1.replace('.c', '.obj'))) - finally: - os.chdir(previousDir) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()), - reason="Fails with MSVC compiler ") - def test_compile2(self): - # Compile source and link the second source - tsi = self.c_temp2 - c = customized_ccompiler() - extra_link_args = tsi.calc_extra_info()['extra_link_args'] - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir2) - c.compile([os.path.basename(self._src2)], output_dir=self._dir2, - extra_postargs=extra_link_args) - # Ensure that the object exists - assert_(os.path.isfile(self._src2.replace('.c', '.o'))) - finally: - os.chdir(previousDir) - - HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", []) - - @pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if " - "numpy is built with MKL support")) - def test_overrides(self): - previousDir = os.getcwd() - cfg = os.path.join(self._dir1, 'site.cfg') - shutil.copy(self._sitecfg, cfg) - try: - os.chdir(self._dir1) - # Check that the '[ALL]' section does not override - # missing values from other sections - info = mkl_info() - lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep) - assert info.get_lib_dirs() != lib_dirs - - # But if we copy the values to a '[mkl]' section the value - # is correct - with open(cfg) as fid: - mkl = fid.read().replace('[ALL]', '[mkl]', 1) - with open(cfg, 'w') as fid: - fid.write(mkl) - info = mkl_info() - assert info.get_lib_dirs() == lib_dirs - - # Also, the values will be taken from a section named '[DEFAULT]' - with open(cfg) as fid: - dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1) - with open(cfg, 'w') as fid: - fid.write(dflt) - info = mkl_info() - assert info.get_lib_dirs() == lib_dirs - finally: - os.chdir(previousDir) - - -def test_distutils_parse_env_order(monkeypatch): - from numpy.distutils.system_info import _parse_env_order - env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER' - - base_order = list('abcdef') - - monkeypatch.setenv(env, 'b,i,e,f') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 3 - assert order == list('bef') - assert len(unknown) == 1 - - # For when LAPACK/BLAS optimization is disabled - monkeypatch.setenv(env, '') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 0 - assert len(unknown) == 0 - - for prefix in '^!': - monkeypatch.setenv(env, f'{prefix}b,i,e') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 4 - assert order == list('acdf') - assert len(unknown) == 1 - - with pytest.raises(ValueError): - monkeypatch.setenv(env, 'b,^e,i') - _parse_env_order(base_order, env) - - with pytest.raises(ValueError): - monkeypatch.setenv(env, '!b,^e,i') - _parse_env_order(base_order, env) diff --git a/numpy/distutils/tests/utilities.py b/numpy/distutils/tests/utilities.py deleted file mode 100644 index 5016a83d2164..000000000000 --- a/numpy/distutils/tests/utilities.py +++ /dev/null @@ -1,90 +0,0 @@ -# Kanged out of numpy.f2py.tests.util for test_build_ext -from numpy.testing import IS_WASM -import textwrap -import shutil -import tempfile -import os -import re -import subprocess -import sys - -# -# Check if compilers are available at all... -# - -_compiler_status = None - - -def _get_compiler_status(): - global _compiler_status - if _compiler_status is not None: - return _compiler_status - - _compiler_status = (False, False, False) - if IS_WASM: - # Can't run compiler from inside WASM. - return _compiler_status - - # XXX: this is really ugly. But I don't know how to invoke Distutils - # in a safer way... - code = textwrap.dedent( - f"""\ - import os - import sys - sys.path = {repr(sys.path)} - - def configuration(parent_name='',top_path=None): - global config - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - return config - - from numpy.distutils.core import setup - setup(configuration=configuration) - - config_cmd = config.get_config_cmd() - have_c = config_cmd.try_compile('void foo() {{}}') - print('COMPILERS:%%d,%%d,%%d' %% (have_c, - config.have_f77c(), - config.have_f90c())) - sys.exit(99) - """ - ) - code = code % dict(syspath=repr(sys.path)) - - tmpdir = tempfile.mkdtemp() - try: - script = os.path.join(tmpdir, "setup.py") - - with open(script, "w") as f: - f.write(code) - - cmd = [sys.executable, "setup.py", "config"] - p = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmpdir - ) - out, err = p.communicate() - finally: - shutil.rmtree(tmpdir) - - m = re.search(rb"COMPILERS:(\d+),(\d+),(\d+)", out) - if m: - _compiler_status = ( - bool(int(m.group(1))), - bool(int(m.group(2))), - bool(int(m.group(3))), - ) - # Finished - return _compiler_status - - -def has_c_compiler(): - return _get_compiler_status()[0] - - -def has_f77_compiler(): - return _get_compiler_status()[1] - - -def has_f90_compiler(): - return _get_compiler_status()[2] diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py deleted file mode 100644 index 4884960fdf22..000000000000 --- a/numpy/distutils/unixccompiler.py +++ /dev/null @@ -1,141 +0,0 @@ -""" -unixccompiler - can handle very long argument lists for ar. - -""" -import os -import sys -import subprocess -import shlex - -from distutils.errors import CompileError, DistutilsExecError, LibError -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.ccompiler import replace_method -from numpy.distutils.misc_util import _commandline_dep_string -from numpy.distutils import log - -# Note that UnixCCompiler._compile appeared in Python 2.3 -def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile a single source files with a Unix-style compiler.""" - # HP ad-hoc fix, see ticket 1383 - ccomp = self.compiler_so - if ccomp[0] == 'aCC': - # remove flags that will trigger ANSI-C mode for aCC - if '-Ae' in ccomp: - ccomp.remove('-Ae') - if '-Aa' in ccomp: - ccomp.remove('-Aa') - # add flags for (almost) sane C++ handling - ccomp += ['-AA'] - self.compiler_so = ccomp - # ensure OPT environment variable is read - if 'OPT' in os.environ: - # XXX who uses this? - from sysconfig import get_config_vars - opt = shlex.join(shlex.split(os.environ['OPT'])) - gcv_opt = shlex.join(shlex.split(get_config_vars('OPT')[0])) - ccomp_s = shlex.join(self.compiler_so) - if opt not in ccomp_s: - ccomp_s = ccomp_s.replace(gcv_opt, opt) - self.compiler_so = shlex.split(ccomp_s) - llink_s = shlex.join(self.linker_so) - if opt not in llink_s: - self.linker_so = self.linker_so + shlex.split(opt) - - display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) - - # gcc style automatic dependencies, outputs a makefile (-MF) that lists - # all headers needed by a c file as a side effect of compilation (-MMD) - if getattr(self, '_auto_depends', False): - deps = ['-MMD', '-MF', obj + '.d'] - else: - deps = [] - - try: - self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps + - extra_postargs, display = display) - except DistutilsExecError as e: - msg = str(e) - raise CompileError(msg) from None - - # add commandline flags to dependency file - if deps: - # After running the compiler, the file created will be in EBCDIC - # but will not be tagged as such. This tags it so the file does not - # have multiple different encodings being written to it - if sys.platform == 'zos': - subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d']) - with open(obj + '.d', 'a') as f: - f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts)) - -replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) - - -def UnixCCompiler_create_static_lib(self, objects, output_libname, - output_dir=None, debug=0, target_lang=None): - """ - Build a static library in a separate sub-process. - - Parameters - ---------- - objects : list or tuple of str - List of paths to object files used to build the static library. - output_libname : str - The library name as an absolute or relative (if `output_dir` is used) - path. - output_dir : str, optional - The path to the output directory. Default is None, in which case - the ``output_dir`` attribute of the UnixCCompiler instance. - debug : bool, optional - This parameter is not used. - target_lang : str, optional - This parameter is not used. - - Returns - ------- - None - - """ - objects, output_dir = self._fix_object_args(objects, output_dir) - - output_filename = \ - self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - try: - # previous .a may be screwed up; best to remove it first - # and recreate. - # Also, ar on OS X doesn't handle updating universal archives - os.unlink(output_filename) - except OSError: - pass - self.mkpath(os.path.dirname(output_filename)) - tmp_objects = objects + self.objects - while tmp_objects: - objects = tmp_objects[:50] - tmp_objects = tmp_objects[50:] - display = '%s: adding %d object files to %s' % ( - os.path.basename(self.archiver[0]), - len(objects), output_filename) - self.spawn(self.archiver + [output_filename] + objects, - display = display) - - # Not many Unices required ranlib anymore -- SunOS 4.x is, I - # think the only major Unix that does. Maybe we need some - # platform intelligence here to skip ranlib if it's not - # needed -- or maybe Python's configure script took care of - # it for us, hence the check for leading colon. - if self.ranlib: - display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), - output_filename) - try: - self.spawn(self.ranlib + [output_filename], - display = display) - except DistutilsExecError as e: - msg = str(e) - raise LibError(msg) from None - else: - log.debug("skipping %s (up-to-date)", output_filename) - return - -replace_method(UnixCCompiler, 'create_static_lib', - UnixCCompiler_create_static_lib) diff --git a/numpy/f2py/_backends/__init__.py b/numpy/f2py/_backends/__init__.py index e91393c14be3..beb2bab2384d 100644 --- a/numpy/f2py/_backends/__init__.py +++ b/numpy/f2py/_backends/__init__.py @@ -2,8 +2,5 @@ def f2py_build_generator(name): if name == "meson": from ._meson import MesonBackend return MesonBackend - elif name == "distutils": - from ._distutils import DistutilsBackend - return DistutilsBackend else: raise ValueError(f"Unknown backend: {name}") diff --git a/numpy/f2py/_backends/_distutils.py b/numpy/f2py/_backends/_distutils.py deleted file mode 100644 index 5c8f1092b568..000000000000 --- a/numpy/f2py/_backends/_distutils.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -import shutil -import sys -import warnings - -from numpy.distutils.core import Extension, setup -from numpy.distutils.misc_util import dict_append -from numpy.distutils.system_info import get_info -from numpy.exceptions import VisibleDeprecationWarning - -from ._backend import Backend - - -class DistutilsBackend(Backend): - def __init__(sef, *args, **kwargs): - warnings.warn( - "\ndistutils has been deprecated since NumPy 1.26.x\n" - "Use the Meson backend instead, or generate wrappers" - " without -c and use a custom build script", - VisibleDeprecationWarning, - stacklevel=2, - ) - super().__init__(*args, **kwargs) - - def compile(self): - num_info = {} - if num_info: - self.include_dirs.extend(num_info.get("include_dirs", [])) - ext_args = { - "name": self.modulename, - "sources": self.sources, - "include_dirs": self.include_dirs, - "library_dirs": self.library_dirs, - "libraries": self.libraries, - "define_macros": self.define_macros, - "undef_macros": self.undef_macros, - "extra_objects": self.extra_objects, - "f2py_options": self.f2py_flags, - } - - if self.sysinfo_flags: - for n in self.sysinfo_flags: - i = get_info(n) - if not i: - print( - f"No {n!r} resources found" - "in system (try `f2py --help-link`)" - ) - dict_append(ext_args, **i) - - ext = Extension(**ext_args) - - sys.argv = [sys.argv[0]] + self.setup_flags - sys.argv.extend( - [ - "build", - "--build-temp", - self.build_dir, - "--build-base", - self.build_dir, - "--build-platlib", - ".", - "--disable-optimization", - ] - ) - - if self.fc_flags: - sys.argv.extend(["config_fc"] + self.fc_flags) - if self.flib_flags: - sys.argv.extend(["build_ext"] + self.flib_flags) - - setup(ext_modules=[ext]) - - if self.remove_build_dir and os.path.exists(self.build_dir): - print(f"Removing build directory {self.build_dir}") - shutil.rmtree(self.build_dir) diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py index 7eb1697cc787..865cf4a3e6cd 100644 --- a/numpy/f2py/diagnose.py +++ b/numpy/f2py/diagnose.py @@ -36,15 +36,11 @@ def run(): has_f2py2e = 0 try: - import numpy.distutils - has_numpy_distutils = 2 - except ImportError: - try: - import numpy_distutils - has_numpy_distutils = 1 - except ImportError as e: - print('Failed to import numpy_distutils:', e) - has_numpy_distutils = 0 + import numpy_distutils + has_numpy_distutils = 1 + except ImportError as e: + print('Failed to import numpy_distutils:', e) + has_numpy_distutils = 0 if has_newnumpy: try: @@ -63,14 +59,9 @@ def run(): if has_numpy_distutils: try: - if has_numpy_distutils == 2: - print('Found numpy.distutils version %r in %r' % ( - numpy.distutils.__version__, - numpy.distutils.__file__)) - else: - print('Found numpy_distutils version %r in %r' % ( - numpy_distutils.numpy_distutils_version.numpy_distutils_version, - numpy_distutils.__file__)) + print('Found numpy_distutils version %r in %r' % ( + numpy_distutils.numpy_distutils_version.numpy_distutils_version, + numpy_distutils.__file__)) print('------') except Exception as msg: print('error:', msg) @@ -96,12 +87,8 @@ def run(): 'error:', msg, '(ignore it, build_flib is obsolete for numpy.distutils 0.2.2 and up)') print('------') try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.fcompiler ...', end=' ') - import numpy.distutils.fcompiler as fcompiler - else: - print('Importing numpy_distutils.fcompiler ...', end=' ') - import numpy_distutils.fcompiler as fcompiler + print('Importing numpy_distutils.fcompiler ...', end=' ') + import numpy_distutils.fcompiler as fcompiler print('ok') print('------') try: @@ -115,24 +102,18 @@ def run(): print('error:', msg) print('------') try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.cpuinfo ...', end=' ') - from numpy.distutils.cpuinfo import cpuinfo + try: + print( + 'Importing numpy_distutils.command.cpuinfo ...', end=' ') + from numpy_distutils.command.cpuinfo import cpuinfo + print('ok') + print('------') + except Exception as msg: + print('error:', msg, '(ignore it)') + print('Importing numpy_distutils.cpuinfo ...', end=' ') + from numpy_distutils.cpuinfo import cpuinfo print('ok') print('------') - else: - try: - print( - 'Importing numpy_distutils.command.cpuinfo ...', end=' ') - from numpy_distutils.command.cpuinfo import cpuinfo - print('ok') - print('------') - except Exception as msg: - print('error:', msg, '(ignore it)') - print('Importing numpy_distutils.cpuinfo ...', end=' ') - from numpy_distutils.cpuinfo import cpuinfo - print('ok') - print('------') cpu = cpuinfo() print('CPU information:', end=' ') for name in dir(cpuinfo): diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 84a5aa3c20a6..a1cce0fa9a74 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -36,7 +36,6 @@ # outmess=sys.stdout.write show = pprint.pprint outmess = auxfuncs.outmess -MESON_ONLY_VER = (sys.version_info >= (3, 12)) __usage__ =\ f"""Usage: @@ -583,7 +582,7 @@ def preparse_sysargv(): sys.argv = [sys.argv[0]] + remaining_argv backend_key = args.backend - if MESON_ONLY_VER and backend_key == 'distutils': + if backend_key == 'distutils': outmess("Cannot use distutils backend with Python>=3.12," " using meson backend instead.\n") backend_key = "meson" @@ -656,36 +655,16 @@ def run_compile(): reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] - distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] - if not (MESON_ONLY_VER or backend_key == 'meson'): - fc_flags.extend(distutils_flags) sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] del_list = [] for s in flib_flags: v = '--fcompiler=' if s[:len(v)] == v: - if MESON_ONLY_VER or backend_key == 'meson': - outmess( - "--fcompiler cannot be used with meson," - "set compiler with the FC environment variable\n" - ) - else: - from numpy.distutils import fcompiler - fcompiler.load_all_fcompiler_classes() - allowed_keys = list(fcompiler.fcompiler_class.keys()) - nv = ov = s[len(v):].lower() - if ov not in allowed_keys: - vmap = {} # XXX - try: - nv = vmap[ov] - except KeyError: - if ov not in vmap.values(): - print(f'Unknown vendor: "{s[len(v):]}"') - nv = ov - i = flib_flags.index(s) - flib_flags[i] = '--fcompiler=' + nv # noqa: B909 - continue + outmess( + "--fcompiler cannot be used with meson," + "set compiler with the FC environment variable\n" + ) for s in del_list: i = flib_flags.index(s) del flib_flags[i] @@ -775,11 +754,7 @@ def validate_modulename(pyf_files, modulename='untitled'): def main(): if '--help-link' in sys.argv[1:]: sys.argv.remove('--help-link') - if MESON_ONLY_VER: - outmess("Use --dep for meson builds\n") - else: - from numpy.distutils.system_info import show_all - show_all() + outmess("Use --dep for meson builds\n") return if '-c' in sys.argv[1:]: diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 959e1527c482..65d33dded209 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -233,38 +233,6 @@ def test_untitled_cli(capfd, hello_world_f90, monkeypatch): assert "untitledmodule.c" in out -@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), reason='Compiler and 3.12 required') -def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): - """Check that no distutils imports are performed on 3.12 - CLI :: --fcompiler --help-link --backend distutils - """ - MNAME = "hi" - foutl = get_io_paths(hello_world_f90, mname=MNAME) - ipath = foutl.f90inp - monkeypatch.setattr( - sys, "argv", f"f2py {ipath} -c --fcompiler=gfortran -m {MNAME}".split() - ) - with util.switchdir(ipath.parent): - compiler_check_f2pycli() - out, _ = capfd.readouterr() - assert "--fcompiler cannot be used with meson" in out - monkeypatch.setattr( - sys, "argv", ["f2py", "--help-link"] - ) - with util.switchdir(ipath.parent): - f2pycli() - out, _ = capfd.readouterr() - assert "Use --dep for meson builds" in out - MNAME = "hi2" # Needs to be different for a new -c - monkeypatch.setattr( - sys, "argv", f"f2py {ipath} -c -m {MNAME} --backend distutils".split() - ) - with util.switchdir(ipath.parent): - f2pycli() - out, _ = capfd.readouterr() - assert "Cannot use distutils backend with Python>=3.12" in out - - @pytest.mark.xfail def test_f2py_skip(capfd, retreal_f77, monkeypatch): """Tests that functions can be skipped diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index ed928a5ec7b4..8a89c464b49d 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -6,6 +6,7 @@ import contextlib import gc import importlib.metadata +import importlib.util import operator import os import pathlib @@ -1441,12 +1442,13 @@ def rundocs(filename=None, raise_on_error=True): """ import doctest - from numpy.distutils.misc_util import exec_mod_from_location if filename is None: f = sys._getframe(1) filename = f.f_globals['__file__'] name = os.path.splitext(os.path.basename(filename))[0] - m = exec_mod_from_location(name, filename) + spec = importlib.util.spec_from_file_location(name, filename) + m = importlib.util.module_from_spec(spec) + spec.loader.exec_module(m) tests = doctest.DocTestFinder().find(m) runner = doctest.DocTestRunner(verbose=False) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index f6fa8611e181..1e26cb18b60f 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -141,18 +141,6 @@ def test_NPY_NO_EXPORT(): "typing.mypy_plugin", "version", ]] -if sys.version_info < (3, 12): - PUBLIC_MODULES += [ - 'numpy.' + s for s in [ - "distutils", - "distutils.cpuinfo", - "distutils.exec_command", - "distutils.misc_util", - "distutils.log", - "distutils.system_info", - ] - ] - PUBLIC_ALIASED_MODULES = [ "numpy.char", @@ -202,67 +190,6 @@ def test_NPY_NO_EXPORT(): "random.bit_generator", "testing.print_coercion_tables", ]] -if sys.version_info < (3, 12): - PRIVATE_BUT_PRESENT_MODULES += [ - 'numpy.' + s for s in [ - "distutils.armccompiler", - "distutils.fujitsuccompiler", - "distutils.ccompiler", - 'distutils.ccompiler_opt', - "distutils.command", - "distutils.command.autodist", - "distutils.command.bdist_rpm", - "distutils.command.build", - "distutils.command.build_clib", - "distutils.command.build_ext", - "distutils.command.build_py", - "distutils.command.build_scripts", - "distutils.command.build_src", - "distutils.command.config", - "distutils.command.config_compiler", - "distutils.command.develop", - "distutils.command.egg_info", - "distutils.command.install", - "distutils.command.install_clib", - "distutils.command.install_data", - "distutils.command.install_headers", - "distutils.command.sdist", - "distutils.conv_template", - "distutils.core", - "distutils.extension", - "distutils.fcompiler", - "distutils.fcompiler.absoft", - "distutils.fcompiler.arm", - "distutils.fcompiler.compaq", - "distutils.fcompiler.environment", - "distutils.fcompiler.g95", - "distutils.fcompiler.gnu", - "distutils.fcompiler.hpux", - "distutils.fcompiler.ibm", - "distutils.fcompiler.intel", - "distutils.fcompiler.lahey", - "distutils.fcompiler.mips", - "distutils.fcompiler.nag", - "distutils.fcompiler.none", - "distutils.fcompiler.pathf95", - "distutils.fcompiler.pg", - "distutils.fcompiler.nv", - "distutils.fcompiler.sun", - "distutils.fcompiler.vast", - "distutils.fcompiler.fujitsu", - "distutils.from_template", - "distutils.intelccompiler", - "distutils.lib2def", - "distutils.line_endings", - "distutils.mingw32ccompiler", - "distutils.msvccompiler", - "distutils.npy_pkg_config", - "distutils.numpy_distribution", - "distutils.pathccompiler", - "distutils.unixccompiler", - ] - ] - def is_unexpected(name): """Check if this needs to be considered.""" @@ -274,10 +201,7 @@ def is_unexpected(name): ) -if sys.version_info >= (3, 12): - SKIP_LIST = [] -else: - SKIP_LIST = ["numpy.distutils.msvc9compiler"] +SKIP_LIST = [] def test_all_modules_are_expected(): @@ -315,12 +239,6 @@ def test_all_modules_are_expected(): 'numpy.matlib.ctypeslib', 'numpy.matlib.ma', ] -if sys.version_info < (3, 12): - SKIP_LIST_2 += [ - 'numpy.distutils.log.sys', - 'numpy.distutils.log.logging', - 'numpy.distutils.log.warnings', - ] def test_all_modules_are_expected_2(): diff --git a/numpy/typing/tests/test_isfile.py b/numpy/typing/tests/test_isfile.py index 0e3157a1e54d..1d91f908f021 100644 --- a/numpy/typing/tests/test_isfile.py +++ b/numpy/typing/tests/test_isfile.py @@ -23,8 +23,6 @@ ROOT / "random" / "__init__.pyi", ROOT / "testing" / "__init__.pyi", ] -if sys.version_info < (3, 12): - FILES += [ROOT / "distutils" / "__init__.pyi"] @pytest.mark.thread_unsafe( diff --git a/pytest.ini b/pytest.ini index b8a1da2b4ec6..532095ab9aa7 100644 --- a/pytest.ini +++ b/pytest.ini @@ -22,11 +22,8 @@ filterwarnings = ignore:The numpy.array_api submodule is still experimental. See NEP 47. # ignore matplotlib headless warning for pyplot ignore:Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.:UserWarning -# Ignore DeprecationWarnings from distutils - ignore::DeprecationWarning:.*distutils - ignore:\n\n `numpy.distutils`:DeprecationWarning # Ignore DeprecationWarning from typing.mypy_plugin ignore:`numpy.typing.mypy_plugin` is deprecated:DeprecationWarning # Ignore DeprecationWarning from struct module # see https://github.com/numpy/numpy/issues/28926 - ignore:Due to \'_pack_\', the \ No newline at end of file + ignore:Due to \'_pack_\', the diff --git a/ruff.toml b/ruff.toml index b25a34d45984..4f87ca65c1a1 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,6 +1,5 @@ extend-exclude = [ "numpy/__config__.py", - "numpy/distutils", "numpy/typing/_char_codes.py", "spin/cmds.py", # Submodules. diff --git a/tools/check_installed_files.py b/tools/check_installed_files.py index 61bc49197d79..fd66b68a43fc 100644 --- a/tools/check_installed_files.py +++ b/tools/check_installed_files.py @@ -84,11 +84,6 @@ def get_files(dir_to_check, kind='test'): relpath = os.path.relpath(path, dir_to_check) files[relpath] = path - if sys.version_info >= (3, 12): - files = { - k: v for k, v in files.items() if not k.startswith('distutils') - } - # ignore python files in vendored pythoncapi-compat submodule files = { k: v for k, v in files.items() if 'pythoncapi-compat' not in k diff --git a/tools/stubtest/allowlist_py311.txt b/tools/stubtest/allowlist_py311.txt index 4413f164f582..e6b2e364230e 100644 --- a/tools/stubtest/allowlist_py311.txt +++ b/tools/stubtest/allowlist_py311.txt @@ -1,3 +1,2 @@ # python == 3.11.* -numpy\.distutils\..* diff --git a/tools/stubtest/allowlist_py312.txt b/tools/stubtest/allowlist_py312.txt index 867b2f1870a3..a0ee0edf2be2 100644 --- a/tools/stubtest/allowlist_py312.txt +++ b/tools/stubtest/allowlist_py312.txt @@ -4,5 +4,3 @@ numpy\.typing\.ArrayLike numpy\.typing\.DTypeLike -# only exists before Python 3.12 -numpy\.f2py\._backends\._distutils diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini index ded91d7d56f3..0a3bb07b1b94 100644 --- a/tools/stubtest/mypy.ini +++ b/tools/stubtest/mypy.ini @@ -5,7 +5,6 @@ exclude = (?x)( .+\.py$ | _build_utils/ | _core/code_generators/ - | distutils/ ) ) namespace_packages = False From 9ae90b664a2e95dbf6bc0f2d3b2cde256783379e Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 1 Dec 2025 14:18:49 +0200 Subject: [PATCH 0939/1018] DOC, MAINT: fixes, add release note --- .../upcoming_changes/30340.expired.rst | 1 + doc/source/f2py/buildtools/distutils.rst | 2 +- .../reference/distutils_status_migration.rst | 4 +- doc/source/release/1.16.0-notes.rst | 2 +- doc/source/release/1.18.0-notes.rst | 2 +- doc/source/release/1.20.0-notes.rst | 2 +- numpy/_build_utils/conv_template.py | 44 +++++++++++-------- numpy/_pytesttester.py | 1 - numpy/f2py/__init__.py | 6 --- numpy/f2py/f2py2e.py | 2 +- numpy/typing/tests/test_isfile.py | 1 - 11 files changed, 33 insertions(+), 34 deletions(-) create mode 100644 doc/release/upcoming_changes/30340.expired.rst diff --git a/doc/release/upcoming_changes/30340.expired.rst b/doc/release/upcoming_changes/30340.expired.rst new file mode 100644 index 000000000000..79dd57dde737 --- /dev/null +++ b/doc/release/upcoming_changes/30340.expired.rst @@ -0,0 +1 @@ +* ``numpy.distutils`` has been removed diff --git a/doc/source/f2py/buildtools/distutils.rst b/doc/source/f2py/buildtools/distutils.rst index 98416c8cdef9..bdc94ea8ae53 100644 --- a/doc/source/f2py/buildtools/distutils.rst +++ b/doc/source/f2py/buildtools/distutils.rst @@ -1,7 +1,7 @@ .. _f2py-distutils: ============================= -Using via `numpy.distutils` +Using via ``numpy.distutils`` ============================= .. legacy:: diff --git a/doc/source/reference/distutils_status_migration.rst b/doc/source/reference/distutils_status_migration.rst index 2fea390a7a8b..eff13439cf52 100644 --- a/doc/source/reference/distutils_status_migration.rst +++ b/doc/source/reference/distutils_status_migration.rst @@ -3,7 +3,7 @@ Status of ``numpy.distutils`` and migration advice ================================================== -`numpy.distutils` has been removed in NumPy ``1.25.0``. +``numpy.distutils`` was removed in NumPy ``2.5.0``. Migration advice ---------------- @@ -18,7 +18,7 @@ using a well-designed, modern and reliable build system, we recommend: If you have modest needs (only simple Cython/C extensions; no need for Fortran, BLAS/LAPACK, nested ``setup.py`` files, or other features of -``numpy.distutils``) and have been happy with ``numpy.distutils`` so far, you +``numpy.distutils``) and have been happy with ``numpy.distutils``, you can also consider switching to ``setuptools``. Note that most functionality of ``numpy.distutils`` is unlikely to be ported to ``setuptools``. diff --git a/doc/source/release/1.16.0-notes.rst b/doc/source/release/1.16.0-notes.rst index 07e06ca6e043..7a387629fe46 100644 --- a/doc/source/release/1.16.0-notes.rst +++ b/doc/source/release/1.16.0-notes.rst @@ -271,7 +271,7 @@ via the services of shippable.com. Appending to build flags ------------------------ -`numpy.distutils` has always overridden rather than appended to `LDFLAGS` and +``numpy.distutils`` has always overridden rather than appended to `LDFLAGS` and other similar such environment variables for compiling Fortran extensions. Now, if the `NPY_DISTUTILS_APPEND_FLAGS` environment variable is set to 1, the behavior will be appending. This applied to: `LDFLAGS`, `F77FLAGS`, diff --git a/doc/source/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst index a90dbb7a67d9..43d2cdedf4b6 100644 --- a/doc/source/release/1.18.0-notes.rst +++ b/doc/source/release/1.18.0-notes.rst @@ -350,7 +350,7 @@ and load will be addressed in a future release. ``numpy.distutils`` append behavior changed for LDFLAGS and similar ------------------------------------------------------------------- -`numpy.distutils` has always overridden rather than appended to ``LDFLAGS`` and +``numpy.distutils`` has always overridden rather than appended to ``LDFLAGS`` and other similar such environment variables for compiling Fortran extensions. Now the default behavior has changed to appending - which is the expected behavior in most situations. To preserve the old (overwriting) behavior, set the diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst index a2276ac5016d..298d417bb0c2 100644 --- a/doc/source/release/1.20.0-notes.rst +++ b/doc/source/release/1.20.0-notes.rst @@ -735,7 +735,7 @@ checking. Negation of user defined BLAS/LAPACK detection order ---------------------------------------------------- -`~numpy.distutils` allows negation of libraries when determining BLAS/LAPACK +``~numpy.distutils`` allows negation of libraries when determining BLAS/LAPACK libraries. This may be used to remove an item from the library resolution phase, i.e. to disallow NetLIB libraries one could do: diff --git a/numpy/_build_utils/conv_template.py b/numpy/_build_utils/conv_template.py index c8933d1d4286..fb57abdf1587 100644 --- a/numpy/_build_utils/conv_template.py +++ b/numpy/_build_utils/conv_template.py @@ -82,8 +82,8 @@ __all__ = ['process_str', 'process_file'] import os -import sys import re +import sys # names for replacement that are already global. global_names = {} @@ -106,10 +106,10 @@ def parse_structure(astr, level): at zero. Returns an empty list if no loops found. """ - if level == 0 : + if level == 0: loopbeg = "/**begin repeat" loopend = "/**end repeat**/" - else : + else: loopbeg = "/**begin repeat%d" % level loopend = "/**end repeat%d**/" % level @@ -124,9 +124,9 @@ def parse_structure(astr, level): start2 = astr.find("\n", start2) fini1 = astr.find(loopend, start2) fini2 = astr.find("\n", fini1) - line += astr.count("\n", ind, start2+1) - spanlist.append((start, start2+1, fini1, fini2+1, line)) - line += astr.count("\n", start2+1, fini2) + line += astr.count("\n", ind, start2 + 1) + spanlist.append((start, start2 + 1, fini1, fini2 + 1, line)) + line += astr.count("\n", start2 + 1, fini2) ind = fini2 spanlist.sort() return spanlist @@ -135,10 +135,13 @@ def parse_structure(astr, level): def paren_repl(obj): torep = obj.group(1) numrep = obj.group(2) - return ','.join([torep]*int(numrep)) + return ','.join([torep] * int(numrep)) + parenrep = re.compile(r"\(([^)]*)\)\*(\d+)") plainrep = re.compile(r"([^*]+)\*(\d+)") + + def parse_values(astr): # replaces all occurrences of '(a,b,c)*4' in astr # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate @@ -155,7 +158,7 @@ def parse_values(astr): named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") exclude_vars_re = re.compile(r"(\w*)=(\w*)") exclude_re = re.compile(":exclude:") -def parse_loop_header(loophead) : +def parse_loop_header(loophead): """Find all named replacements in the header Returns a list of dictionaries, one for each loop iteration, @@ -179,14 +182,13 @@ def parse_loop_header(loophead) : name = rep[0] vals = parse_values(rep[1]) size = len(vals) - if nsub is None : + if nsub is None: nsub = size - elif nsub != size : + elif nsub != size: msg = "Mismatch in number of values, %d != %d\n%s = %s" raise ValueError(msg % (nsub, size, name, vals)) names.append((name, vals)) - # Find any exclude variables excludes = [] @@ -200,30 +202,33 @@ def parse_loop_header(loophead) : # generate list of dictionaries, one for each template iteration dlist = [] - if nsub is None : + if nsub is None: raise ValueError("No substitution variables found") for i in range(nsub): tmp = {name: vals[i] for name, vals in names} dlist.append(tmp) return dlist + replace_re = re.compile(r"@(\w+)@") -def parse_string(astr, env, level, line) : + + +def parse_string(astr, env, level, line): lineno = "#line %d\n" % line # local function for string replacement, uses env def replace(match): name = match.group(1) - try : + try: val = env[name] except KeyError: - msg = 'line %d: no definition of key "%s"'%(line, name) + msg = f'line {line}: no definition of key "{name}"' raise ValueError(msg) from None return val code = [lineno] struct = parse_structure(astr, level) - if struct : + if struct: # recurse over inner loops oldend = 0 newlevel = level + 1 @@ -234,18 +239,18 @@ def replace(match): oldend = sub[3] newline = line + sub[4] code.append(replace_re.sub(replace, pref)) - try : + try: envlist = parse_loop_header(head) except ValueError as e: msg = "line %d: %s" % (newline, e) raise ValueError(msg) - for newenv in envlist : + for newenv in envlist: newenv.update(env) newcode = parse_string(text, newenv, newlevel, newline) code.extend(newcode) suff = astr[oldend:] code.append(replace_re.sub(replace, suff)) - else : + else: # replace keys code.append(replace_re.sub(replace, astr)) code.append('\n') @@ -325,5 +330,6 @@ def main(): outfile.write(writestr) + if __name__ == "__main__": main() diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index ecc6a1cda738..cbb4191047ba 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -123,7 +123,6 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, True """ - import warnings import pytest diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index e34dd99aec1c..f545c9c5fd84 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -26,12 +26,6 @@ def get_include(): """ Return the directory that contains the ``fortranobject.c`` and ``.h`` files. - .. note:: - - This function is not needed when building an extension with - `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files - in one go. - Python extension modules built with f2py-generated code need to use ``fortranobject.c`` as a source file, and include the ``fortranobject.h`` header. This function can be used to obtain the directory containing diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index a1cce0fa9a74..23682957c19a 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -655,7 +655,7 @@ def run_compile(): reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] + sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + reg_distutils_flags)] del_list = [] for s in flib_flags: diff --git a/numpy/typing/tests/test_isfile.py b/numpy/typing/tests/test_isfile.py index 1d91f908f021..250686a98ee8 100644 --- a/numpy/typing/tests/test_isfile.py +++ b/numpy/typing/tests/test_isfile.py @@ -1,5 +1,4 @@ import os -import sys from pathlib import Path import pytest From e4eced3246450cbff86ac745f1a456563fe6373f Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 1 Dec 2025 14:20:32 +0200 Subject: [PATCH 0940/1018] TYPE: remove stubs for f2py._backend._distutils DOC, MAINT: fix doc and f2py --- .../f2py/buildtools/distutils-to-meson.rst | 109 ++++-------------- doc/source/f2py/buildtools/distutils.rst | 11 -- doc/source/f2py/buildtools/index.rst | 3 +- numpy/f2py/_backends/__init__.pyi | 2 +- numpy/f2py/_backends/_distutils.pyi | 13 --- numpy/f2py/f2py2e.py | 3 +- numpy/f2py/f2py2e.pyi | 1 - 7 files changed, 25 insertions(+), 117 deletions(-) delete mode 100644 doc/source/f2py/buildtools/distutils.rst delete mode 100644 numpy/f2py/_backends/_distutils.pyi diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index 585bfba57246..5248ab3355d6 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -57,46 +57,21 @@ This is unchanged: 1.2.2 Specify the backend ^^^^^^^^^^^^^^^^^^^^^^^^^ -.. tab-set:: +.. code-block:: bash - .. tab-item:: Distutils - :sync: distutils + python -m numpy.f2py -c fib.f90 -m fib --backend meson - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils - - This is the default for Python versions before 3.12. - - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson - - This is the only option for Python versions after 3.12. +This is the only option. There used to be a ``distutils`` backend but it was +removed in NumPy2.5. 1.2.3 Pass a compiler name ^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash +.. code-block:: bash - python -m numpy.f2py -c fib.f90 -m fib --backend distutils --fcompiler=gfortran + FC="gfortran" python -m numpy.f2py -c fib.f90 -m fib - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - FC="gfortran" python -m numpy.f2py -c fib.f90 -m fib --backend meson - - Native files can also be used. +Native files can also be used. Similarly, ``CC`` can be used in both cases to set the ``C`` compiler. Since the environment variables are generally pretty common across both, so a small @@ -137,73 +112,31 @@ sample is included below. 1.2.4 Dependencies ^^^^^^^^^^^^^^^^^^ -Here, ``meson`` can actually be used to set dependencies more robustly. - -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils -llapack +.. code-block:: bash - Note that this approach in practice is error prone. + python -m numpy.f2py -c fib.f90 -m fib --dep lapack - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson --dep lapack - - This maps to ``dependency("lapack")`` and so can be used for a wide variety - of dependencies. They can be `customized further `_ - to use CMake or other systems to resolve dependencies. +This maps to ``dependency("lapack")`` and so can be used for a wide variety +of dependencies. They can be `customized further `_ +to use CMake or other systems to resolve dependencies. 1.2.5 Libraries ^^^^^^^^^^^^^^^ -Both ``meson`` and ``distutils`` are capable of linking against libraries. - -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils -lmylib -L/path/to/mylib +``meson`` is capable of linking against libraries. - .. tab-item:: Meson - :sync: meson +.. code-block:: bash - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson -lmylib -L/path/to/mylib + python -m numpy.f2py -c fib.f90 -m fib -lmylib -L/path/to/mylib 1.3 Customizing builds ~~~~~~~~~~~~~~~~~~~~~~ -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils --build-dir blah - - This can be technically integrated with other codes, see :ref:`f2py-distutils`. - - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash +.. code-block:: bash - python -m numpy.f2py -c fib.f90 -m fib --backend meson --build-dir blah + python -m numpy.f2py -c fib.f90 -m fib --build-dir blah - The resulting build can be customized via the - `Meson Build How-To Guide `_. - In fact, the resulting set of files can even be committed directly and used - as a meson subproject in a separate codebase. +The resulting build can be customized via the +`Meson Build How-To Guide `_. +In fact, the resulting set of files can even be committed directly and used +as a meson subproject in a separate codebase. diff --git a/doc/source/f2py/buildtools/distutils.rst b/doc/source/f2py/buildtools/distutils.rst deleted file mode 100644 index bdc94ea8ae53..000000000000 --- a/doc/source/f2py/buildtools/distutils.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _f2py-distutils: - -============================= -Using via ``numpy.distutils`` -============================= - -.. legacy:: - - ``distutils`` has been removed in favor of ``meson`` see - :ref:`distutils-status-migration`. - diff --git a/doc/source/f2py/buildtools/index.rst b/doc/source/f2py/buildtools/index.rst index d5f3876e6dd5..946f82eb57dd 100644 --- a/doc/source/f2py/buildtools/index.rst +++ b/doc/source/f2py/buildtools/index.rst @@ -11,7 +11,7 @@ with ``f2py``. The default build system for ``f2py`` has traditionally been through the enhanced ``numpy.distutils`` module. This module is based on ``distutils`` - which was removed in ``Python 3.12.0`` in **October 2025**. Like the rest of + which was removed in ``NumPy2.5`` in **June 2026**. Like the rest of NumPy and SciPy, ``f2py`` uses ``meson`` now, see :ref:`distutils-status-migration` for some more details. @@ -107,7 +107,6 @@ Build systems .. toctree:: :maxdepth: 2 - distutils meson cmake skbuild diff --git a/numpy/f2py/_backends/__init__.pyi b/numpy/f2py/_backends/__init__.pyi index 43625c68061f..11e3743be541 100644 --- a/numpy/f2py/_backends/__init__.pyi +++ b/numpy/f2py/_backends/__init__.pyi @@ -2,4 +2,4 @@ from typing import Literal as L from ._backend import Backend -def f2py_build_generator(name: L["distutils", "meson"]) -> Backend: ... +def f2py_build_generator(name: L["meson"]) -> Backend: ... diff --git a/numpy/f2py/_backends/_distutils.pyi b/numpy/f2py/_backends/_distutils.pyi deleted file mode 100644 index 56bbf7e5b49a..000000000000 --- a/numpy/f2py/_backends/_distutils.pyi +++ /dev/null @@ -1,13 +0,0 @@ -from typing_extensions import deprecated, override - -from ._backend import Backend - -class DistutilsBackend(Backend): - @deprecated( - "distutils has been deprecated since NumPy 1.26.x. Use the Meson backend instead, or generate wrappers without -c and " - "use a custom build script" - ) - # NOTE: the `sef` typo matches runtime - def __init__(sef, *args: object, **kwargs: object) -> None: ... - @override - def compile(self) -> None: ... diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 23682957c19a..2f73b1924f6d 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -655,7 +655,8 @@ def run_compile(): reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + reg_distutils_flags)] + distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] del_list = [] for s in flib_flags: diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi index 46794e552b41..686898041b9d 100644 --- a/numpy/f2py/f2py2e.pyi +++ b/numpy/f2py/f2py2e.pyi @@ -28,7 +28,6 @@ class _PreparseResult(TypedDict): ### -MESON_ONLY_VER: Final[bool] f2py_version: Final = version numpy_version: Final = version __usage__: Final[str] From 47b8b3ca327ab1c797052ee1c27ee555a1870082 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 1 Dec 2025 19:26:24 +0200 Subject: [PATCH 0941/1018] DOC: remove comparison of meson and distutils, remove warning exclusion --- doc/source/conf.py | 16 ---------------- .../f2py/buildtools/distutils-to-meson.rst | 5 +---- doc/source/f2py/buildtools/meson.rst | 5 ----- 3 files changed, 1 insertion(+), 25 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index e26719b05cb0..302f1fd3731c 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -145,24 +145,8 @@ class PyTypeObject(ctypes.Structure): # The reST default role (used for this markup: `text`) to use for all documents. default_role = "autolink" -# List of directories, relative to source directories, that shouldn't be searched -# for source files. -exclude_dirs = [] - -exclude_patterns = [] -suppress_warnings = [] nitpick_ignore = [] -if sys.version_info[:2] >= (3, 12): - suppress_warnings += [ - 'toc.excluded', # Suppress warnings about excluded toctree entries - ] - nitpicky = True - nitpick_ignore += [ - # The first ignore is not captured without nitpicky = True. - ('py:class', 'Extension'), - ] - # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index 5248ab3355d6..8dd504dd9c8f 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -5,8 +5,7 @@ ------------------------ As per the timeline laid out in :ref:`distutils-status-migration`, -``distutils`` has ceased to be the default build backend for ``f2py``. This page -collects common workflows in both formats. +``distutils`` has been removed. This page collects common workflows. .. note:: @@ -44,8 +43,6 @@ This will not win any awards, but can be a reasonable starting point. 1.2.1 Basic Usage ^^^^^^^^^^^^^^^^^ -This is unchanged: - .. code:: bash python -m numpy.f2py -c fib.f90 -m fib diff --git a/doc/source/f2py/buildtools/meson.rst b/doc/source/f2py/buildtools/meson.rst index c17c5d2ddc87..44560bef8c5f 100644 --- a/doc/source/f2py/buildtools/meson.rst +++ b/doc/source/f2py/buildtools/meson.rst @@ -15,11 +15,6 @@ Using via ``meson`` The default build system for ``f2py`` is now ``meson``, see :ref:`distutils-status-migration` for some more details.. -The key advantage gained by leveraging ``meson`` over the techniques described -in :ref:`f2py-distutils` is that this feeds into existing systems and larger -projects with ease. ``meson`` has a rather pythonic syntax which makes it more -comfortable and amenable to extension for ``python`` users. - Fibonacci walkthrough (F77) =========================== From 0248909815caa83878128a7305b78dccb4f4a84e Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 2 Dec 2025 12:49:22 +0200 Subject: [PATCH 0942/1018] BUG: raise BufferError when creating dlpack with wrong device tuple (#30351) --- numpy/_core/src/multiarray/dlpack.c | 3 ++- numpy/_core/tests/test_dlpack.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 62cd137daa7c..29e5aecec5d5 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -392,7 +392,8 @@ device_converter(PyObject *obj, DLDevice *result_device) return NPY_SUCCEED; } - PyErr_SetString(PyExc_ValueError, "unsupported device requested"); + /* Must be a BufferError */ + PyErr_SetString(PyExc_BufferError, "unsupported device requested"); return NPY_FAIL; } diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index 89c24032b6c1..e8198ac1823e 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -184,7 +184,7 @@ def test_device(self): np.from_dlpack(x, device="cpu") np.from_dlpack(x, device=None) - with pytest.raises(ValueError): + with pytest.raises(BufferError): x.__dlpack__(dl_device=(10, 0)) with pytest.raises(ValueError): np.from_dlpack(x, device="gpu") From 69e28adc9a2b379fc7c5d025f797e0803b63bdff Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 2 Dec 2025 13:42:39 +0200 Subject: [PATCH 0943/1018] fixes from review --- doc/neps/scope.rst | 2 +- doc/source/dev/depending_on_numpy.rst | 3 +- .../f2py/buildtools/distutils-to-meson.rst | 6 +- doc/source/f2py/buildtools/index.rst | 2 +- .../reference/distutils_status_migration.rst | 11 --- numpy/__init__.py | 7 -- numpy/f2py/diagnose.py | 83 ++----------------- numpy/f2py/f2py2e.py | 9 -- numpy/f2py/tests/test_f2py2e.py | 31 +++++++ 9 files changed, 42 insertions(+), 112 deletions(-) diff --git a/doc/neps/scope.rst b/doc/neps/scope.rst index 1d5722700a79..ffa3d8655ad8 100644 --- a/doc/neps/scope.rst +++ b/doc/neps/scope.rst @@ -36,7 +36,7 @@ Here, we describe aspects of N-d array computation that are within scope for Num - NumPy provides some **infrastructure for other packages in the scientific Python ecosystem**: - - numpy.distutils (deprecated and removed, build support for C++, Fortran, + - numpy.distutils (removed in NumPy 2.5.0, was providing build support for C++, Fortran, BLAS/LAPACK, and other relevant libraries for scientific computing) - f2py (generating bindings for Fortran code) - testing utilities (mostly deprecated, pytest does a good job) diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index fcdae806fad3..98dc552a779e 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -141,8 +141,7 @@ for dropping support for old Python and NumPy versions: :ref:`NEP29`. We recommend all packages depending on NumPy to follow the recommendations in NEP 29. -For *run-time dependencies*, specify version bounds using -``install_requires`` in ``setup.py`` (assuming you use ``setuptools`` to build). +For *run-time dependencies*, specify version bounds in `pyproject.toml`. Most libraries that rely on NumPy will not need to set an upper version bound: NumPy is careful to preserve backward-compatibility. diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index 8dd504dd9c8f..920848b9d0d3 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -56,17 +56,17 @@ This will not win any awards, but can be a reasonable starting point. .. code-block:: bash - python -m numpy.f2py -c fib.f90 -m fib --backend meson + python -m numpy.f2py -c fib.f90 -m fib This is the only option. There used to be a ``distutils`` backend but it was -removed in NumPy2.5. +removed in NumPy2.5.0. 1.2.3 Pass a compiler name ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash - FC="gfortran" python -m numpy.f2py -c fib.f90 -m fib + FC=gfortran python -m numpy.f2py -c fib.f90 -m fib Native files can also be used. diff --git a/doc/source/f2py/buildtools/index.rst b/doc/source/f2py/buildtools/index.rst index 946f82eb57dd..671fd5b6d2cf 100644 --- a/doc/source/f2py/buildtools/index.rst +++ b/doc/source/f2py/buildtools/index.rst @@ -11,7 +11,7 @@ with ``f2py``. The default build system for ``f2py`` has traditionally been through the enhanced ``numpy.distutils`` module. This module is based on ``distutils`` - which was removed in ``NumPy2.5`` in **June 2026**. Like the rest of + which was removed in ``NumPy2.5.0`` in **June 2026**. Like the rest of NumPy and SciPy, ``f2py`` uses ``meson`` now, see :ref:`distutils-status-migration` for some more details. diff --git a/doc/source/reference/distutils_status_migration.rst b/doc/source/reference/distutils_status_migration.rst index eff13439cf52..e4ca4fedcf81 100644 --- a/doc/source/reference/distutils_status_migration.rst +++ b/doc/source/reference/distutils_status_migration.rst @@ -88,17 +88,6 @@ E.g.,:: For more details, see the `setuptools documentation `__ - -.. _numpy-setuptools-interaction: - -Versioning ``setuptools`` ------------------------------------------------------- - -It is recommended to put an upper bound on your ``setuptools`` -build requirement in ``pyproject.toml`` to avoid future breakage - see -:ref:`for-downstream-package-authors`. - - .. _CMake: https://cmake.org/ .. _Meson: https://mesonbuild.com/ .. _meson-python: https://meson-python.readthedocs.io diff --git a/numpy/__init__.py b/numpy/__init__.py index 42a2e8896d68..5012decc43ab 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -744,19 +744,12 @@ def __getattr__(attr): elif attr == "char": import numpy.char as char return char - elif attr == "array_api": - raise AttributeError("`numpy.array_api` is not available from " - "numpy 2.0 onwards", name=None) elif attr == "core": import numpy.core as core return core elif attr == "strings": import numpy.strings as strings return strings - elif attr == "distutils": - raise AttributeError("`numpy.distutils` is not available from " - "numpy 2.5 onwards", name=None) - if attr in __future_scalars__: # And future warnings for those that will change, but also give # the AttributeError diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py index 865cf4a3e6cd..3e2c53b0ec1d 100644 --- a/numpy/f2py/diagnose.py +++ b/numpy/f2py/diagnose.py @@ -23,10 +23,10 @@ def run(): try: import numpy - has_newnumpy = 1 + has_numpy = 1 except ImportError as e: - print('Failed to import new numpy:', e) - has_newnumpy = 0 + print('Failed to import numpy:', e) + has_numpy = 0 try: from numpy.f2py import f2py2e @@ -35,16 +35,9 @@ def run(): print('Failed to import f2py2e:', e) has_f2py2e = 0 - try: - import numpy_distutils - has_numpy_distutils = 1 - except ImportError as e: - print('Failed to import numpy_distutils:', e) - has_numpy_distutils = 0 - - if has_newnumpy: + if has_numpy: try: - print(f'Found new numpy version {numpy.__version__!r} in {numpy.__file__}') + print(f'Found numpy version {numpy.__version__!r} in {numpy.__file__}') except Exception as msg: print('error:', msg) print('------') @@ -57,72 +50,6 @@ def run(): print('error:', msg) print('------') - if has_numpy_distutils: - try: - print('Found numpy_distutils version %r in %r' % ( - numpy_distutils.numpy_distutils_version.numpy_distutils_version, - numpy_distutils.__file__)) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 1: - print( - 'Importing numpy_distutils.command.build_flib ...', end=' ') - import numpy_distutils.command.build_flib as build_flib - print('ok') - print('------') - try: - print( - 'Checking availability of supported Fortran compilers:') - for compiler_class in build_flib.all_compilers: - compiler_class(verbose=1).is_available() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print( - 'error:', msg, '(ignore it, build_flib is obsolete for numpy.distutils 0.2.2 and up)') - print('------') - try: - print('Importing numpy_distutils.fcompiler ...', end=' ') - import numpy_distutils.fcompiler as fcompiler - print('ok') - print('------') - try: - print('Checking availability of supported Fortran compilers:') - fcompiler.show_fcompilers() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - try: - print( - 'Importing numpy_distutils.command.cpuinfo ...', end=' ') - from numpy_distutils.command.cpuinfo import cpuinfo - print('ok') - print('------') - except Exception as msg: - print('error:', msg, '(ignore it)') - print('Importing numpy_distutils.cpuinfo ...', end=' ') - from numpy_distutils.cpuinfo import cpuinfo - print('ok') - print('------') - cpu = cpuinfo() - print('CPU information:', end=' ') - for name in dir(cpuinfo): - if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): - print(name[1:], end=' ') - print('------') - except Exception as msg: - print('error:', msg) - print('------') os.chdir(_path) diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 2f73b1924f6d..eb5a39e088ff 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -116,10 +116,6 @@ --include-paths ::... Search include files from the given directories. - --help-link [..] List system resources found by system_info.py. See also - --link- switch below. [..] is optional list - of resources names. E.g. try 'f2py --help-link lapack_opt'. - --f2cmap Load Fortran-to-Python KIND specification from the given file. Default: .f2py_f2cmap in current directory. @@ -753,11 +749,6 @@ def validate_modulename(pyf_files, modulename='untitled'): return modulename def main(): - if '--help-link' in sys.argv[1:]: - sys.argv.remove('--help-link') - outmess("Use --dep for meson builds\n") - return - if '-c' in sys.argv[1:]: run_compile() else: diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 65d33dded209..bd7064fd348a 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -232,6 +232,37 @@ def test_untitled_cli(capfd, hello_world_f90, monkeypatch): out, _ = capfd.readouterr() assert "untitledmodule.c" in out +def test_no_distutils_backend(capfd, hello_world_f90, monkeypatch): + """Check that distutils backend and related options fail + CLI :: --fcompiler --help-link --backend distutils + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f90, mname=MNAME) + ipath = foutl.f90inp + monkeypatch.setattr( + sys, "argv", f"f2py {ipath} -c --fcompiler=gfortran -m {MNAME}".split() + ) + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + out, _ = capfd.readouterr() + assert "--fcompiler cannot be used with meson" in out + + monkeypatch.setattr( + sys, "argv", ["f2py", "--help-link"] + ) + with pytest.raises(SystemExit): + f2pycli() + out, _ = capfd.readouterr() + assert "Unknown option --help-link" in out + + monkeypatch.setattr( + sys, "argv", ["f2py", "--backend", "distutils"] + ) + with pytest.raises(SystemExit): + compiler_check_f2pycli() + f2pycli() + out, _ = capfd.readouterr() + assert "'distutils' backend was removed" in out @pytest.mark.xfail def test_f2py_skip(capfd, retreal_f77, monkeypatch): From 54ab2c25b244f87803b7dbe065056e2b04597bfa Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 1 Dec 2025 07:56:43 +0100 Subject: [PATCH 0944/1018] MAINT: require python >= 3.12 --- .../check-warnings/msvc-allowed-warnings.txt | 9 ++-- .github/workflows/linux.yml | 41 +++++-------------- .github/workflows/linux_blas.yml | 12 +++--- .github/workflows/linux_simd.yml | 14 +++---- .github/workflows/macos.yml | 2 +- .github/workflows/mypy.yml | 4 +- .github/workflows/stubtest.yml | 2 +- .github/workflows/wheels.yml | 2 +- .github/workflows/windows.yml | 4 +- INSTALL.rst | 4 +- doc/source/building/understanding_meson.rst | 4 +- numpy/_core/tests/examples/cython/setup.py | 2 +- numpy/_core/tests/test_arrayprint.py | 2 - pyproject.toml | 2 +- ruff.toml | 3 +- tools/ci/cirrus_arm.yml | 12 +++--- tools/swig/test/testFarray.py | 1 + 17 files changed, 50 insertions(+), 70 deletions(-) diff --git a/.github/check-warnings/msvc-allowed-warnings.txt b/.github/check-warnings/msvc-allowed-warnings.txt index 7d2c149629ec..3f728ae29cd8 100644 --- a/.github/check-warnings/msvc-allowed-warnings.txt +++ b/.github/check-warnings/msvc-allowed-warnings.txt @@ -17,10 +17,10 @@ ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -numpy/random/_generator.cp311-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp311-win_arm64.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp311-win32.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp311-win32.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX512' cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' @@ -28,4 +28,3 @@ D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hp D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(53): warning C4309: 'argument': truncation of constant value D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(65): warning C4309: 'argument': truncation of constant value D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(139): warning C4556: - diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index a23f1f7649d8..157f2c2054e6 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -44,7 +44,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install linter requirements run: python -m pip install -r requirements/linter_requirements.txt @@ -56,7 +56,7 @@ jobs: - name: Check Python.h is first file included run: | python tools/check_python_h_first.py - + smoke_test: # To enable this job on a fork, comment out: @@ -66,7 +66,7 @@ jobs: MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" strategy: matrix: - version: ["3.11", "3.12", "3.13", "3.14", "3.14t"] + version: ["3.12", "3.13", "3.14", "3.14t"] steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: @@ -78,25 +78,6 @@ jobs: python-version: ${{ matrix.version }} - uses: ./.github/meson_actions - pypy: - needs: [smoke_test] - runs-on: ubuntu-latest - if: github.event_name != 'push' - steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - with: - python-version: 'pypy3.11-v7.3.20' - - name: Setup using scipy-openblas - run: | - python -m pip install -r requirements/ci_requirements.txt - spin config-openblas --with-scipy-openblas=32 - - uses: ./.github/meson_actions - debug: needs: [smoke_test] runs-on: ubuntu-24.04 @@ -139,7 +120,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install build and test dependencies from PyPI run: | pip install -r requirements/build_requirements.txt @@ -224,7 +205,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install build and benchmarking dependencies run: | sudo apt-get update @@ -263,7 +244,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install gfortran and setup OpenBLAS (sdist build) run: | set -xe @@ -312,7 +293,7 @@ jobs: - name: Set up Python uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install build and test dependencies from PyPI run: | python -m pip install -r requirements/build_requirements.txt @@ -341,7 +322,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: repository: numpy/numpy-release @@ -376,8 +357,8 @@ jobs: python tools/check_installed_files.py $(find ./build-install -path '*/site-packages/numpy') --no-tests - Linux_Python_311_32bit_full: - name: i686, cp311, full + Linux_Python_312_32bit_full: + name: i686, cp312, full needs: [smoke_test] runs-on: ubuntu-latest container: @@ -403,7 +384,7 @@ jobs: - name: build run: | - python3.11 -m venv venv + python3.12 -m venv venv source venv/bin/activate pip install --upgrade pip pip install -r requirements/ci32_requirements.txt diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 77cb9aaf91fe..6d0ab1a93777 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -76,7 +76,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -199,7 +199,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -227,7 +227,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -290,7 +290,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -354,7 +354,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -391,7 +391,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index dcc483eaf6df..a94eca64b59f 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -65,7 +65,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - uses: ./.github/meson_actions name: Build/Test @@ -83,7 +83,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install GCC9/10 run: | @@ -129,7 +129,7 @@ jobs: fetch-tags: true - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt @@ -157,12 +157,12 @@ jobs: - [ "native", "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none", - "3.11" + "3.12" ] - [ "without avx512", "-Dallow-noblas=true -Dcpu-dispatch=max-x86_v4", - "3.11" + "3.12" ] env: @@ -192,7 +192,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install Intel SDE run: | @@ -242,7 +242,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install Intel SDE run: | diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 2787a7c81d3a..c13df7226a1c 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -118,7 +118,7 @@ jobs: build_runner: - [ macos-15-intel, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] - version: ["3.11", "3.14t"] + version: ["3.12", "3.14t"] steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index e3c6e7beba13..d8a1e3dbee97 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -53,8 +53,8 @@ jobs: matrix: os_python: - [macos-latest, '3.14'] - - [ubuntu-latest, '3.12'] - - [windows-latest, '3.11'] + - [ubuntu-latest, '3.13'] + - [windows-latest, '3.12'] steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 731d9091ec69..5d2ca7385ed5 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -34,7 +34,7 @@ jobs: matrix: # TODO: consider including macos and windows os: [ubuntu] - py: ["3.11", "3.14"] + py: ["3.12", "3.14"] steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 24061ae7b014..86d4c35ca8a4 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -42,7 +42,7 @@ jobs: - [macos-14, macosx_arm64, openblas] - [windows-2022, win_amd64, ""] - [windows-11-arm, win_arm64, ""] - python: ["cp311"] + python: ["cp312"] env: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index a0604fa87aa9..3b6f7dd6ff73 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -92,7 +92,7 @@ jobs: - name: Setup Python uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' architecture: ${{ matrix.architecture }} - name: Setup MSVC @@ -133,7 +133,7 @@ jobs: pyver: '3.14' - BLAS: 32 TEST_MODE: fast - pyver: '3.11' + pyver: '3.12' # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' diff --git a/INSTALL.rst b/INSTALL.rst index 6e9d2cd242f5..72caf98380b7 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -14,7 +14,7 @@ Prerequisites Building NumPy requires the following installed software: -1) Python__ 3.11.x or newer. +1) Python__ 3.12.x or newer. Please note that the Python development headers also need to be installed, e.g., on Debian/Ubuntu one needs to install both `python3` and @@ -82,7 +82,7 @@ Choosing compilers NumPy needs C and C++ compilers, and for development versions also needs Cython. A Fortran compiler isn't needed to build NumPy itself; the ``numpy.f2py`` tests will be skipped when running the test suite if no Fortran -compiler is available. +compiler is available. For more options including selecting compilers, setting custom compiler flags and controlling parallelism, see diff --git a/doc/source/building/understanding_meson.rst b/doc/source/building/understanding_meson.rst index b990ff283271..0c29302c9abb 100644 --- a/doc/source/building/understanding_meson.rst +++ b/doc/source/building/understanding_meson.rst @@ -87,11 +87,11 @@ that's just an arbitrary name we picked here):: meson install -C build -It will then install to ``build-install/lib/python3.11/site-packages/numpy``, +It will then install to ``build-install/lib/python3.12/site-packages/numpy``, which is not on your Python path, so to add it do (*again, this is for learning purposes, using ``PYTHONPATH`` explicitly is typically not the best idea*):: - export PYTHONPATH=$PWD/build-install/lib/python3.11/site-packages/ + export PYTHONPATH=$PWD/build-install/lib/python3.12/site-packages/ Now we should be able to import ``numpy`` and run the tests. Remembering that we need to move out of the root of the repo to ensure we pick up the package diff --git a/numpy/_core/tests/examples/cython/setup.py b/numpy/_core/tests/examples/cython/setup.py index eb57477fc2a1..ba0639ebcf1c 100644 --- a/numpy/_core/tests/examples/cython/setup.py +++ b/numpy/_core/tests/examples/cython/setup.py @@ -4,10 +4,10 @@ """ import os -from distutils.core import setup import Cython from Cython.Build import cythonize +from distutils.core import setup from setuptools.extension import Extension import numpy as np diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 06d1306dd408..3777981724dc 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -1267,8 +1267,6 @@ def test_scalar_void_float_str(): assert str(scalar) == "(1.0, 2.0)" @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") -@pytest.mark.skipif(sys.version_info < (3, 11), - reason="asyncio.barrier was added in Python 3.11") def test_printoptions_asyncio_safe(): asyncio = pytest.importorskip("asyncio") diff --git a/pyproject.toml b/pyproject.toml index e7830c5248a1..a459ea2cc2ee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,7 @@ authors = [{name = "Travis E. Oliphant et al."}] maintainers = [ {name = "NumPy Developers", email="numpy-discussion@python.org"}, ] -requires-python = ">=3.11" +requires-python = ">=3.12" readme = "README.md" classifiers = [ 'Development Status :: 5 - Production/Stable', diff --git a/ruff.toml b/ruff.toml index 4f87ca65c1a1..8e0081e34622 100644 --- a/ruff.toml +++ b/ruff.toml @@ -36,7 +36,8 @@ extend-select = [ "W", # pycodestyle/warning "PGH", # pygrep-hooks "PLE", # pylint/error - "UP", # pyupgrade + # TODO: re-enable after merging https://github.com/numpy/numpy/pull/30319 + # "UP", # pyupgrade ] ignore = [ # flake8-bugbear diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index 81a342f20e4e..026c467f2e09 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -9,7 +9,7 @@ modified_clone: &MODIFIED_CLONE # it's a PR so clone the main branch then merge the changes from the PR git clone https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR git fetch origin pull/$CIRRUS_PR/head:pull/$CIRRUS_PR - + # CIRRUS_BASE_BRANCH will probably be `main` for the majority of the time # However, if you do a PR against a maintenance branch we will want to # merge the PR into the maintenance branch, not main @@ -45,22 +45,22 @@ freebsd_test_task: prepare_env_script: | # Create a venv (the `source` command needs bash, not the default sh shell) chsh -s /usr/local/bin/bash - python3.11 -m venv .venv + python3.12 -m venv .venv source .venv/bin/activate # Minimal build and test requirements - python3.11 -m pip install -U pip - python3.11 -m pip install meson-python Cython pytest hypothesis + python3.12 -m pip install -U pip + python3.12 -m pip install meson-python Cython pytest hypothesis build_script: | chsh -s /usr/local/bin/bash source .venv/bin/activate - python3.11 -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false" + python3.12 -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false" test_script: | chsh -s /usr/local/bin/bash source .venv/bin/activate cd tools - python3.11 -m pytest --pyargs numpy -m "not slow" + python3.12 -m pytest --pyargs numpy -m "not slow" ccache -s on_failure: diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py index 75bf99c054cb..3798029dbe4b 100755 --- a/tools/swig/test/testFarray.py +++ b/tools/swig/test/testFarray.py @@ -2,6 +2,7 @@ import os import sys import unittest + from distutils.util import get_platform import numpy as np From 2582fb09ba0ec0f92efb0b138a48bb9a8643bba8 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 1 Dec 2025 08:24:59 +0100 Subject: [PATCH 0945/1018] MAINT: update allowed MSVC warnings --- .../check-warnings/msvc-allowed-warnings.txt | 33 +++++++------------ 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/.github/check-warnings/msvc-allowed-warnings.txt b/.github/check-warnings/msvc-allowed-warnings.txt index 3f728ae29cd8..fa5597b18216 100644 --- a/.github/check-warnings/msvc-allowed-warnings.txt +++ b/.github/check-warnings/msvc-allowed-warnings.txt @@ -1,30 +1,21 @@ -../numpy/linalg/lapack_lite/f2c.c(194): warning C4244: 'return': conversion from 'double' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_c_lapack.c(230): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_c_lapack.c(250): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/_core/src/common/npy_cpu_features.c(451): warning C4098: 'npy__cpu_cpuid': 'void' function returning a value ../numpy/linalg/lapack_lite/f2c_c_lapack.c(1530): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(1625): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(1645): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_config.c(1368): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c.c(194): warning C4244: 'return': conversion from 'double' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_c_lapack.c(230): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_c_lapack.c(250): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_c_lapack.c(1530): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_config.c(1368): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(1625): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(1645): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' -cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' -cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX512' +../numpy/random/src/mt19937/mt19937.c(88): warning C4146: unary minus operator applied to unsigned type, result still unsigned +../numpy/random/src/mt19937/mt19937.c(92): warning C4146: unary minus operator applied to unsigned type, result still unsigned +../numpy/random/src/mt19937/mt19937.c(95): warning C4146: unary minus operator applied to unsigned type, result still unsigned +..\numpy\random\src/pcg64/pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned +C:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned +D:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' -D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(52): warning C4309: 'argument': truncation of constant value -D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(53): warning C4309: 'argument': truncation of constant value -D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(65): warning C4309: 'argument': truncation of constant value -D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(139): warning C4556: +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26290): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(38314): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26290): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(38314): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data From f7a42fb820da4b5c51a3c4d5394cea305a5cfa68 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 1 Dec 2025 08:30:35 +0100 Subject: [PATCH 0946/1018] CI: install the correct python version on freebsd --- tools/ci/cirrus_arm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index 026c467f2e09..119575b2a7be 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -32,7 +32,7 @@ freebsd_test_task: install_devtools_script: | pkg install -y git bash ninja ccache blas cblas lapack pkgconf - pkg install -y python311 + pkg install -y python312 <<: *MODIFIED_CLONE From e88edc879bc020e2d99e4a2cce7dadc0a3047a16 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Dec 2025 17:39:03 +0000 Subject: [PATCH 0947/1018] MAINT: Bump actions/checkout from 6.0.0 to 6.0.1 Bumps [actions/checkout](https://github.com/actions/checkout) from 6.0.0 to 6.0.1. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/1af3b93b6815bc44a9784bd300feb67ff0d1eeb3...8e8c483db84b4bee98b60c0593521ed34d9990e8) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 6.0.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 2 +- .github/workflows/compiler_sanitizers.yml | 4 ++-- .github/workflows/cygwin.yml | 2 +- .github/workflows/dependency-review.yml | 2 +- .github/workflows/emscripten.yml | 2 +- .github/workflows/linux-ppc64le.yml | 2 +- .github/workflows/linux.yml | 22 +++++++++++----------- .github/workflows/linux_blas.yml | 18 +++++++++--------- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 4 ++-- .github/workflows/mypy.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/stubtest.yml | 2 +- .github/workflows/wheels.yml | 2 +- .github/workflows/windows.yml | 6 +++--- 17 files changed, 45 insertions(+), 45 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index cbeb3ca07349..2b124d115710 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,7 +41,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index bb185282b083..8ad89759c906 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -30,7 +30,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: macos-latest steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -86,7 +86,7 @@ jobs: options: --shm-size=2g # increase memory for large matrix ops steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 03dc0b41f987..e8f7083336e6 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -22,7 +22,7 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 28fe31642faa..a4bec8af6d82 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - name: 'Dependency Review' diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 2b7eb24f9812..099f5f67336b 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -29,7 +29,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout NumPy - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index 3817f3ebc0d8..cff7fb1d5b89 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-24.04-ppc64le-p10 name: "Native PPC64LE" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 157f2c2054e6..516f0538572b 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -37,7 +37,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-depth: 0 @@ -68,7 +68,7 @@ jobs: matrix: version: ["3.12", "3.13", "3.14", "3.14t"] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -83,7 +83,7 @@ jobs: runs-on: ubuntu-24.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -113,7 +113,7 @@ jobs: needs: [smoke_test] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -153,7 +153,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-24.04-arm steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -198,7 +198,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -237,7 +237,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -277,13 +277,13 @@ jobs: if: github.event_name != 'push' steps: - name: Checkout NumPy - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Checkout array-api-tests - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: repository: data-apis/array-api-tests ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 @@ -315,7 +315,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -323,7 +323,7 @@ jobs: - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.12' - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: repository: numpy/numpy-release path: numpy-release diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 6d0ab1a93777..c4e2d55330c4 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -69,7 +69,7 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -127,7 +127,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel openblas-devel -y - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -162,7 +162,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel flexiblas-devel -y - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -192,7 +192,7 @@ jobs: runs-on: ubuntu-latest name: "OpenBLAS with CMake" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -220,7 +220,7 @@ jobs: runs-on: ubuntu-latest name: "Debian libblas/liblapack" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -256,7 +256,7 @@ jobs: # If it is needed in the future, use install name `pkgconf-pkg-config` zypper install -y git gcc-c++ python3-pip python3-devel blas cblas lapack - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -283,7 +283,7 @@ jobs: runs-on: ubuntu-latest name: "MKL (LP64, ILP64, SDL)" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -347,7 +347,7 @@ jobs: runs-on: ubuntu-latest name: "BLIS" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -384,7 +384,7 @@ jobs: runs-on: ubuntu-latest name: "ATLAS" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index e4688e50e5bc..268e1d916d1c 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -95,7 +95,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -202,7 +202,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index a94eca64b59f..92e46c8053b8 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -58,7 +58,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -76,7 +76,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -123,7 +123,7 @@ jobs: args: "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none" name: "ARM64 SIMD - ${{ matrix.config.name }}" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -170,7 +170,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -185,7 +185,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -235,7 +235,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index c13df7226a1c..f2af68370b99 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -32,7 +32,7 @@ jobs: python-version: ["3.12"] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -121,7 +121,7 @@ jobs: version: ["3.12", "3.14t"] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index d8a1e3dbee97..1deb5ab82815 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -56,7 +56,7 @@ jobs: - [ubuntu-latest, '3.13'] - [windows-latest, '3.12'] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 03b471b10c63..040a154d2895 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -28,7 +28,7 @@ jobs: shard-index: [0] # e.g. change this to [0, 1, 2] and --num-shards below to 3 fail-fast: false steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: numpy_to_test fetch-depth: 0 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 4f9bf10985a5..9303e511dde8 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,7 +25,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v3.1.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v3.1.0 with: persist-credentials: false diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 5d2ca7385ed5..dcac63be8a0a 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -37,7 +37,7 @@ jobs: py: ["3.12", "3.14"] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 86d4c35ca8a4..436e929fe636 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -48,7 +48,7 @@ jobs: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh steps: - name: Checkout numpy - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: true persist-credentials: false diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 3b6f7dd6ff73..3ef295cc8f5b 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -25,7 +25,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -83,7 +83,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -139,7 +139,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true From db92fe804e4b567200de7141bb171604f6883d57 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 2 Dec 2025 23:24:11 +0100 Subject: [PATCH 0948/1018] MAINT: bump ``ruff`` to ``0.14.7`` --- environment.yml | 2 +- requirements/linter_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index c5ee0c381bb3..e27172e8b7b2 100644 --- a/environment.yml +++ b/environment.yml @@ -46,7 +46,7 @@ dependencies: - breathe>4.33.0 # For linting - cython-lint - - ruff=0.14.0 + - ruff=0.14.7 - gitpython # Used in some tests - cffi diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index da6bac6f7b84..73eafbaf52a1 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,4 +1,4 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.14.0 +ruff==0.14.7 GitPython>=3.1.30 From 95e36ee856d93a39ab76e675aa5b8a3c78be6447 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 2 Dec 2025 15:48:42 -0700 Subject: [PATCH 0949/1018] BUG: fix free-threaded races in RandomState --- numpy/_core/tests/test_multithreading.py | 13 ++++++++++++- numpy/random/mtrand.pyx | 15 ++++++++++----- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 9b10974839a4..44b2c34cd68b 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -16,7 +16,7 @@ reason="tests in this module are already explicitly multi-threaded" ) -def test_parallel_randomstate_creation(): +def test_parallel_randomstate(): # if the coercion cache is enabled and not thread-safe, creating # RandomState instances simultaneously leads to a data race def func(seed): @@ -24,6 +24,17 @@ def func(seed): run_threaded(func, 500, pass_count=True) + # seeding and setting state shouldn't race with generating RNG samples + rng = np.random.RandomState() + + def func(seed): + base_rng = np.random.RandomState(seed) + state = base_rng.get_state() + rng.seed(seed) + rng.random() + rng.set_state(state) + + run_threaded(func, 8, pass_count=True) def test_parallel_ufunc_execution(): # if the loop data cache or dispatch cache are not thread-safe diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 8e7f437641ca..213d227cdeeb 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -251,8 +251,9 @@ cdef class RandomState: """ if not isinstance(self._bit_generator, _MT19937): raise TypeError('can only re-seed a MT19937 BitGenerator') - self._bit_generator._legacy_seeding(seed) - self._reset_gauss() + with cython.critical_section(self): + self._bit_generator._legacy_seeding(seed) + self._reset_gauss() def get_state(self, legacy=True): """ @@ -381,9 +382,13 @@ cdef class RandomState: st['has_gauss'] = state[3] st['gauss'] = state[4] - self._aug_state.gauss = st.get('gauss', 0.0) - self._aug_state.has_gauss = st.get('has_gauss', 0) - self._bit_generator.state = st + cdef double gauss = st.get('gauss', 0.0) + cdef int has_gauss = st.get('has_gauss', 0) + + with cython.critical_section(self): + self._aug_state.gauss = gauss + self._aug_state.has_gauss = has_gauss + self._bit_generator.state = st def random_sample(self, size=None): """ From 6c14af1a934723d6a8c624f1cbd7481d8928e51b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 2 Dec 2025 16:07:11 -0700 Subject: [PATCH 0950/1018] MAINT: bump minimum required Cython version --- INSTALL.rst | 2 +- meson.build | 4 ++-- numpy/_core/tests/examples/cython/meson.build | 9 +++------ numpy/_core/tests/examples/limited_api/meson.build | 4 ++-- numpy/_core/tests/test_cython.py | 2 +- numpy/_core/tests/test_limited_api.py | 2 +- numpy/random/_examples/cython/meson.build | 9 +++------ numpy/random/tests/test_extending.py | 2 +- pyproject.toml | 2 +- requirements/build_requirements.txt | 2 +- 10 files changed, 16 insertions(+), 22 deletions(-) diff --git a/INSTALL.rst b/INSTALL.rst index 6e9d2cd242f5..214e2785eb33 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -20,7 +20,7 @@ Building NumPy requires the following installed software: e.g., on Debian/Ubuntu one needs to install both `python3` and `python3-dev`. On Windows and macOS this is normally not an issue. -2) Cython >= 3.0.6 +2) Cython >= 3.1.0 3) pytest__ (optional) diff --git a/meson.build b/meson.build index 2cb7ce987ad5..9d398dbd180b 100644 --- a/meson.build +++ b/meson.build @@ -30,8 +30,8 @@ elif cc.get_id() == 'msvc' 'when building with MSVC') endif endif -if not cy.version().version_compare('>=3.0.6') - error('NumPy requires Cython >= 3.0.6') +if not cy.version().version_compare('>=3.1.0') + error('NumPy requires Cython >= 3.1.0') endif py = import('python').find_installation(pure: false) diff --git a/numpy/_core/tests/examples/cython/meson.build b/numpy/_core/tests/examples/cython/meson.build index 8362c339ae73..9afe90103665 100644 --- a/numpy/_core/tests/examples/cython/meson.build +++ b/numpy/_core/tests/examples/cython/meson.build @@ -6,14 +6,11 @@ cc = meson.get_compiler('c') cy = meson.get_compiler('cython') # Keep synced with pyproject.toml -if not cy.version().version_compare('>=3.0.6') - error('tests requires Cython >= 3.0.6') +if not cy.version().version_compare('>=3.1.0') + error('tests requires Cython >= 3.1.0') endif -cython_args = [] -if cy.version().version_compare('>=3.1.0') - cython_args += ['-Xfreethreading_compatible=True'] -endif +cython_args = ['-Xfreethreading_compatible=True'] npy_include_path = run_command(py, [ '-c', diff --git a/numpy/_core/tests/examples/limited_api/meson.build b/numpy/_core/tests/examples/limited_api/meson.build index 65287d8654f5..02496acd1f84 100644 --- a/numpy/_core/tests/examples/limited_api/meson.build +++ b/numpy/_core/tests/examples/limited_api/meson.build @@ -6,8 +6,8 @@ cc = meson.get_compiler('c') cy = meson.get_compiler('cython') # Keep synced with pyproject.toml -if not cy.version().version_compare('>=3.0.6') - error('tests requires Cython >= 3.0.6') +if not cy.version().version_compare('>=3.1.0') + error('tests requires Cython >= 3.1.0') endif npy_include_path = run_command(py, [ diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index c405a59e535e..0c00bab0333c 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -19,7 +19,7 @@ from numpy._utils import _pep440 # Note: keep in sync with the one in pyproject.toml - required_version = "3.0.6" + required_version = "3.1.0" if _pep440.parse(cython_version) < _pep440.Version(required_version): # too old or wrong cython, skip the test cython = None diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index 984210e53af7..9aa469cd83c8 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -17,7 +17,7 @@ from numpy._utils import _pep440 # Note: keep in sync with the one in pyproject.toml - required_version = "3.0.6" + required_version = "3.1.0" if _pep440.parse(cython_version) < _pep440.Version(required_version): # too old or wrong cython, skip the test cython = None diff --git a/numpy/random/_examples/cython/meson.build b/numpy/random/_examples/cython/meson.build index 7aa367d13787..68f30827442a 100644 --- a/numpy/random/_examples/cython/meson.build +++ b/numpy/random/_examples/cython/meson.build @@ -7,14 +7,11 @@ cc = meson.get_compiler('c') cy = meson.get_compiler('cython') # Keep synced with pyproject.toml -if not cy.version().version_compare('>=3.0.6') - error('tests requires Cython >= 3.0.6') +if not cy.version().version_compare('>=3.1.0') + error('tests requires Cython >= 3.1.0') endif -base_cython_args = [] -if cy.version().version_compare('>=3.1.0') - base_cython_args += ['-Xfreethreading_compatible=True'] -endif +base_cython_args = ['-Xfreethreading_compatible=True'] _numpy_abs = run_command(py3, ['-c', 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'], diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index a1e64ecbe343..19f56d9e3a83 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -38,7 +38,7 @@ else: from numpy._utils import _pep440 # Note: keep in sync with the one in pyproject.toml - required_version = '3.0.6' + required_version = '3.1.0' if _pep440.parse(cython_version) < _pep440.Version(required_version): # too old or wrong cython, skip the test cython = None diff --git a/pyproject.toml b/pyproject.toml index e7830c5248a1..861d40ea1836 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ build-backend = "mesonpy" requires = [ "meson-python>=0.18.0", - "Cython>=3.0.6", # keep in sync with version check in meson.build + "Cython>=3.1.0", # keep in sync with version check in meson.build ] [project] diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index 1f6eb1435cfc..52a062c92ebd 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 -Cython>=3.0.6 +Cython>=3.1.0 ninja spin==0.15 build From 258a202c126a9d2a5e34fc5770618b148d93feae Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 2 Dec 2025 16:47:20 -0700 Subject: [PATCH 0951/1018] MAINT: try with an RLock instead --- numpy/random/bit_generator.pyx | 4 ++-- numpy/random/mtrand.pyx | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index 0bb9552a86ce..01b35a7a621a 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -38,7 +38,7 @@ from itertools import cycle import re from secrets import randbits -from threading import Lock +from threading import RLock from cpython.pycapsule cimport PyCapsule_New @@ -522,7 +522,7 @@ cdef class BitGenerator: """ def __init__(self, seed=None): - self.lock = Lock() + self.lock = RLock() self._bitgen.state = 0 if type(self) is BitGenerator: raise NotImplementedError('BitGenerator is a base class and cannot be instantized') diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 213d227cdeeb..ae1706eb884b 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -251,7 +251,7 @@ cdef class RandomState: """ if not isinstance(self._bit_generator, _MT19937): raise TypeError('can only re-seed a MT19937 BitGenerator') - with cython.critical_section(self): + with self.lock: self._bit_generator._legacy_seeding(seed) self._reset_gauss() @@ -385,7 +385,7 @@ cdef class RandomState: cdef double gauss = st.get('gauss', 0.0) cdef int has_gauss = st.get('has_gauss', 0) - with cython.critical_section(self): + with self.lock: self._aug_state.gauss = gauss self._aug_state.has_gauss = has_gauss self._bit_generator.state = st From 638b4fb4c7678ea2e9dbec7d8d4cf05cc46dfa7b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 2 Dec 2025 18:38:12 -0700 Subject: [PATCH 0952/1018] Revert "MAINT: bump minimum required Cython version" This reverts commit 6c14af1a934723d6a8c624f1cbd7481d8928e51b. --- INSTALL.rst | 2 +- meson.build | 4 ++-- numpy/_core/tests/examples/cython/meson.build | 9 ++++++--- numpy/_core/tests/examples/limited_api/meson.build | 4 ++-- numpy/_core/tests/test_cython.py | 2 +- numpy/_core/tests/test_limited_api.py | 2 +- numpy/random/_examples/cython/meson.build | 9 ++++++--- numpy/random/tests/test_extending.py | 2 +- pyproject.toml | 2 +- requirements/build_requirements.txt | 2 +- 10 files changed, 22 insertions(+), 16 deletions(-) diff --git a/INSTALL.rst b/INSTALL.rst index 214e2785eb33..6e9d2cd242f5 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -20,7 +20,7 @@ Building NumPy requires the following installed software: e.g., on Debian/Ubuntu one needs to install both `python3` and `python3-dev`. On Windows and macOS this is normally not an issue. -2) Cython >= 3.1.0 +2) Cython >= 3.0.6 3) pytest__ (optional) diff --git a/meson.build b/meson.build index 9d398dbd180b..2cb7ce987ad5 100644 --- a/meson.build +++ b/meson.build @@ -30,8 +30,8 @@ elif cc.get_id() == 'msvc' 'when building with MSVC') endif endif -if not cy.version().version_compare('>=3.1.0') - error('NumPy requires Cython >= 3.1.0') +if not cy.version().version_compare('>=3.0.6') + error('NumPy requires Cython >= 3.0.6') endif py = import('python').find_installation(pure: false) diff --git a/numpy/_core/tests/examples/cython/meson.build b/numpy/_core/tests/examples/cython/meson.build index 9afe90103665..8362c339ae73 100644 --- a/numpy/_core/tests/examples/cython/meson.build +++ b/numpy/_core/tests/examples/cython/meson.build @@ -6,11 +6,14 @@ cc = meson.get_compiler('c') cy = meson.get_compiler('cython') # Keep synced with pyproject.toml -if not cy.version().version_compare('>=3.1.0') - error('tests requires Cython >= 3.1.0') +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') endif -cython_args = ['-Xfreethreading_compatible=True'] +cython_args = [] +if cy.version().version_compare('>=3.1.0') + cython_args += ['-Xfreethreading_compatible=True'] +endif npy_include_path = run_command(py, [ '-c', diff --git a/numpy/_core/tests/examples/limited_api/meson.build b/numpy/_core/tests/examples/limited_api/meson.build index 02496acd1f84..65287d8654f5 100644 --- a/numpy/_core/tests/examples/limited_api/meson.build +++ b/numpy/_core/tests/examples/limited_api/meson.build @@ -6,8 +6,8 @@ cc = meson.get_compiler('c') cy = meson.get_compiler('cython') # Keep synced with pyproject.toml -if not cy.version().version_compare('>=3.1.0') - error('tests requires Cython >= 3.1.0') +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') endif npy_include_path = run_command(py, [ diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 0c00bab0333c..c405a59e535e 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -19,7 +19,7 @@ from numpy._utils import _pep440 # Note: keep in sync with the one in pyproject.toml - required_version = "3.1.0" + required_version = "3.0.6" if _pep440.parse(cython_version) < _pep440.Version(required_version): # too old or wrong cython, skip the test cython = None diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index 9aa469cd83c8..984210e53af7 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -17,7 +17,7 @@ from numpy._utils import _pep440 # Note: keep in sync with the one in pyproject.toml - required_version = "3.1.0" + required_version = "3.0.6" if _pep440.parse(cython_version) < _pep440.Version(required_version): # too old or wrong cython, skip the test cython = None diff --git a/numpy/random/_examples/cython/meson.build b/numpy/random/_examples/cython/meson.build index 68f30827442a..7aa367d13787 100644 --- a/numpy/random/_examples/cython/meson.build +++ b/numpy/random/_examples/cython/meson.build @@ -7,11 +7,14 @@ cc = meson.get_compiler('c') cy = meson.get_compiler('cython') # Keep synced with pyproject.toml -if not cy.version().version_compare('>=3.1.0') - error('tests requires Cython >= 3.1.0') +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') endif -base_cython_args = ['-Xfreethreading_compatible=True'] +base_cython_args = [] +if cy.version().version_compare('>=3.1.0') + base_cython_args += ['-Xfreethreading_compatible=True'] +endif _numpy_abs = run_command(py3, ['-c', 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'], diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 19f56d9e3a83..a1e64ecbe343 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -38,7 +38,7 @@ else: from numpy._utils import _pep440 # Note: keep in sync with the one in pyproject.toml - required_version = '3.1.0' + required_version = '3.0.6' if _pep440.parse(cython_version) < _pep440.Version(required_version): # too old or wrong cython, skip the test cython = None diff --git a/pyproject.toml b/pyproject.toml index 861d40ea1836..e7830c5248a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ build-backend = "mesonpy" requires = [ "meson-python>=0.18.0", - "Cython>=3.1.0", # keep in sync with version check in meson.build + "Cython>=3.0.6", # keep in sync with version check in meson.build ] [project] diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index 52a062c92ebd..1f6eb1435cfc 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 -Cython>=3.1.0 +Cython>=3.0.6 ninja spin==0.15 build From 4a6bb2d843ba271f09a4115fd2c2c9dc82c78820 Mon Sep 17 00:00:00 2001 From: Kader Miyanyedi <48386782+Kadermiyanyedi@users.noreply.github.com> Date: Thu, 4 Dec 2025 00:35:19 +0300 Subject: [PATCH 0953/1018] MAINT: Enable linting with ruff E501 (#30356) --- numpy/_core/tests/test_arrayprint.py | 7 +++--- numpy/_core/tests/test_cpu_dispatcher.py | 6 ++--- numpy/_core/tests/test_dtype.py | 32 ++++++++++++++++++++---- ruff.toml | 3 --- 4 files changed, 34 insertions(+), 14 deletions(-) diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 3777981724dc..e6cbb6f72229 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -277,11 +277,12 @@ def test_structure_format_mixed(self): # for issue #5692 A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) A[5:].fill(np.datetime64('NaT')) + date_string = '1970-01-01T00:00:00' assert_equal( np.array2string(A), - textwrap.dedent("""\ - [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) - ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) + textwrap.dedent(f"""\ + [('{date_string}',) ('{date_string}',) ('{date_string}',) + ('{date_string}',) ('{date_string}',) ('NaT',) ('NaT',) ('NaT',) ('NaT',) ('NaT',)]""") ) finally: diff --git a/numpy/_core/tests/test_cpu_dispatcher.py b/numpy/_core/tests/test_cpu_dispatcher.py index 01fd53a3dbd0..04acf13c228d 100644 --- a/numpy/_core/tests/test_cpu_dispatcher.py +++ b/numpy/_core/tests/test_cpu_dispatcher.py @@ -20,9 +20,9 @@ def test_dispatcher(): highest_sfx = "" # no suffix for the baseline all_sfx = [] for feature in reversed(targets): - # skip baseline features, by the default `CCompilerOpt` do not generate separated objects - # for the baseline, just one object combined all of them via 'baseline' option - # within the configuration statements. + # skip baseline features, by the default `CCompilerOpt` do not generate + # separated objects for the baseline, just one object combined all of them + # via 'baseline' option within the configuration statements. if feature in __cpu_baseline__: continue # check compiler and running machine support diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 8819262c4e21..13cc84d61d1e 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1836,7 +1836,12 @@ class Union(ctypes.Union): ] expected = np.dtype({ "names": ['a', 'b', 'c', 'd'], - "formats": ['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]], + "formats": [ + 'u1', + np.uint16, + np.uint32, + [('one', 'u1'), ('two', np.uint32)], + ], "offsets": [0, 0, 0, 0], "itemsize": ctypes.sizeof(Union) }) @@ -1860,7 +1865,12 @@ class Union(ctypes.Union): ] expected = np.dtype({ "names": ['a', 'b', 'c', 'd'], - "formats": ['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]], + "formats": [ + 'u1', + np.uint16, + np.uint32, + [('one', 'u1'), ('two', np.uint32)], + ], "offsets": [0, 0, 0, 0], "itemsize": ctypes.sizeof(Union) }) @@ -1892,7 +1902,15 @@ class PackedStructure(ctypes.Structure): ('g', ctypes.c_uint8) ] expected = np.dtype({ - "formats": [np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8], + "formats": [ + np.uint8, + np.uint16, + np.uint8, + np.uint16, + np.uint32, + np.uint32, + np.uint8, + ], "offsets": [0, 2, 4, 6, 8, 12, 16], "names": ['a', 'b', 'c', 'd', 'e', 'f', 'g'], "itemsize": 18}) @@ -1965,7 +1983,9 @@ def test_pairs(self, pair): class TestUserDType: @pytest.mark.leaks_references(reason="dynamically creates custom dtype.") - @pytest.mark.thread_unsafe(reason="crashes when GIL disabled, dtype setup is thread-unsafe") + @pytest.mark.thread_unsafe( + reason="crashes when GIL disabled, dtype setup is thread-unsafe", + ) def test_custom_structured_dtype(self): class mytype: pass @@ -1986,7 +2006,9 @@ class mytype: del a assert sys.getrefcount(o) == startcount - @pytest.mark.thread_unsafe(reason="crashes when GIL disabled, dtype setup is thread-unsafe") + @pytest.mark.thread_unsafe( + reason="crashes when GIL disabled, dtype setup is thread-unsafe", + ) def test_custom_structured_dtype_errors(self): class mytype: pass diff --git a/ruff.toml b/ruff.toml index 8e0081e34622..c02a90e610e6 100644 --- a/ruff.toml +++ b/ruff.toml @@ -81,10 +81,7 @@ ignore = [ "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] -"numpy/_core/tests/test_arrayprint.py" = ["E501"] -"numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] "numpy/_core/tests/test_cpu_features.py" = ["E501"] -"numpy/_core/tests/test_dtype.py" = ["E501"] "numpy/_core/tests/test_defchararray.py" = ["E501"] "numpy/_core/tests/test_einsum.py" = ["E501"] "numpy/_core/tests/test_multiarray.py" = ["E501"] From e0e4a764f7a329a821f222e8934dc11ec794387e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 4 Dec 2025 01:11:04 -0700 Subject: [PATCH 0954/1018] MAINT: Try pinning to Python 3.14.0t in macos tests. (#30369) --- .github/workflows/macos.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index f2af68370b99..4fe1576c86e0 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -118,7 +118,7 @@ jobs: build_runner: - [ macos-15-intel, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] - version: ["3.12", "3.14t"] + version: ["3.12", "3.14.0t"] steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 From c6fce1a5d2cfa2e7072696d441469daf46f36203 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 4 Dec 2025 09:17:25 +0100 Subject: [PATCH 0955/1018] CI: bump FreeBSD from 14.2 to 14.3 --- tools/ci/cirrus_arm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index 119575b2a7be..977921d8236d 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -25,7 +25,7 @@ freebsd_test_task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' compute_engine_instance: image_project: freebsd-org-cloud-dev - image: family/freebsd-14-2 + image: family/freebsd-14-3 platform: freebsd cpu: 1 memory: 4G From c1ab2e375c3e6ddbde7fcf61f18ca6a8bc3cf437 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 3 Dec 2025 22:20:00 +0100 Subject: [PATCH 0956/1018] TYP: remove unused ``type: ignore`` comments --- numpy/__init__.pyi | 216 +++++++++++++++---------------- numpy/_core/multiarray.pyi | 12 +- numpy/_typing/_nbit_base.pyi | 2 +- numpy/lib/_arrayterator_impl.pyi | 2 +- numpy/lib/_shape_base_impl.pyi | 8 +- numpy/lib/_type_check_impl.pyi | 24 ++-- numpy/lib/_ufunclike_impl.pyi | 4 +- numpy/lib/_user_array_impl.pyi | 10 +- numpy/lib/user_array.pyi | 2 +- numpy/linalg/_linalg.pyi | 6 +- numpy/ma/core.pyi | 141 ++++++++++---------- numpy/ma/extras.pyi | 1 + numpy/ma/mrecords.pyi | 7 +- numpy/matlib.pyi | 2 +- numpy/random/_generator.pyi | 74 +++++------ numpy/random/mtrand.pyi | 130 +++++++++---------- numpy/testing/__init__.pyi | 2 +- numpy/typing/__init__.pyi | 7 +- tools/stubtest/mypy.ini | 2 +- 19 files changed, 331 insertions(+), 321 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 3c459952d6b4..8ef7c321fa76 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -430,7 +430,7 @@ from numpy.lib._arraysetops_impl import ( unique_values, ) -from numpy.lib._function_base_impl import ( # type: ignore[deprecated] +from numpy.lib._function_base_impl import ( select, piecewise, trim_zeros, @@ -2560,7 +2560,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # (None) def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... @overload # (empty_sequence) - def reshape( # type: ignore[overload-overlap] # mypy false positive + def reshape( # mypy false positive self, shape: Sequence[Never], /, @@ -2799,9 +2799,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2811,11 +2811,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __matmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -2828,9 +2828,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __matmul__ def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2840,11 +2840,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -2857,19 +2857,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... @overload def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -2882,19 +2882,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __mod__ def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... @overload def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -2907,19 +2907,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... @overload - def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... @overload - def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... @overload def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... @overload - def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... @overload def __divmod__(self: _ArrayFloat_co, rhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... @overload @@ -2928,19 +2928,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __divmod__ def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... @overload - def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... @overload def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... @overload - def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... @overload def __rdivmod__(self: _ArrayFloat_co, lhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... @overload @@ -2950,11 +2950,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2964,15 +2964,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -2998,11 +2998,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __add__ def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3012,15 +3012,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -3046,11 +3046,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3060,15 +3060,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -3084,11 +3084,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3098,15 +3098,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -3122,11 +3122,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3136,11 +3136,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -3164,11 +3164,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __mul__ def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3178,11 +3178,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -3268,19 +3268,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... @overload def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -3298,19 +3298,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... @overload def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -3326,11 +3326,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... @overload def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3342,11 +3342,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / ) -> NDArray[complex128]: ... @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... @overload def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload @@ -3360,11 +3360,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... @overload def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3376,11 +3376,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / ) -> NDArray[complex128]: ... @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... @overload def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload @@ -3393,7 +3393,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3404,7 +3404,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3415,7 +3415,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3426,7 +3426,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3437,7 +3437,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3448,7 +3448,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3459,7 +3459,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3470,7 +3470,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3481,7 +3481,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3492,7 +3492,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -5492,7 +5492,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] + def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... @overload def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] @overload @@ -5686,7 +5686,7 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... # type: ignore[misc] @overload - def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... # type: ignore[misc] + def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... @overload def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... # type: ignore[misc] @overload diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 0293d193cbc4..812e0b562b9d 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -811,7 +811,7 @@ def fromstring( ) -> NDArray[Any]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] +def frompyfunc( func: Callable[[Any], _ReturnType], /, nin: L[1], nout: L[1], @@ -819,7 +819,7 @@ def frompyfunc( # type: ignore[overload-overlap] identity: None = None, ) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] +def frompyfunc( func: Callable[[Any], _ReturnType], /, nin: L[1], nout: L[1], @@ -827,7 +827,7 @@ def frompyfunc( # type: ignore[overload-overlap] identity: _IDType, ) -> _PyFunc_Nin1_Nout1[_ReturnType, _IDType]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] +def frompyfunc( func: Callable[[Any, Any], _ReturnType], /, nin: L[2], nout: L[1], @@ -835,7 +835,7 @@ def frompyfunc( # type: ignore[overload-overlap] identity: None = None, ) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] +def frompyfunc( func: Callable[[Any, Any], _ReturnType], /, nin: L[2], nout: L[1], @@ -843,7 +843,7 @@ def frompyfunc( # type: ignore[overload-overlap] identity: _IDType, ) -> _PyFunc_Nin2_Nout1[_ReturnType, _IDType]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] +def frompyfunc( func: Callable[..., _ReturnType], /, nin: _Nin, nout: L[1], @@ -851,7 +851,7 @@ def frompyfunc( # type: ignore[overload-overlap] identity: None = None, ) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] +def frompyfunc( func: Callable[..., _ReturnType], /, nin: _Nin, nout: L[1], diff --git a/numpy/_typing/_nbit_base.pyi b/numpy/_typing/_nbit_base.pyi index d88c9f4d9fd9..81810a2caa3b 100644 --- a/numpy/_typing/_nbit_base.pyi +++ b/numpy/_typing/_nbit_base.pyi @@ -15,7 +15,7 @@ from typing_extensions import deprecated class NBitBase: ... @final -class _256Bit(NBitBase): ... +class _256Bit(NBitBase): ... # type: ignore[deprecated] @final class _128Bit(_256Bit): ... diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index 5fd589a3ac36..c70872f6e753 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -39,7 +39,7 @@ class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ... # - @overload # type: ignore[override] + @overload def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ... diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index 352f57dd810a..ec4cd2c595ac 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -210,13 +210,13 @@ def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ... def get_array_wrap(*args: object) -> _ArrayWrap | None: ... @overload -def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] +def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... @overload -def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... # type: ignore[misc] +def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... @overload -def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... # type: ignore[misc] +def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... @overload -def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... # type: ignore[misc] +def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... @overload def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index 8b665cd9a400..dec5fc1dfaa4 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -64,7 +64,7 @@ def mintypecode(typechars: Iterable[str | ArrayLike], typeset: str | Container[s # @overload -def real(val: _HasReal[_T]) -> _T: ... # type: ignore[overload-overlap] +def real(val: _HasReal[_T]) -> _T: ... @overload def real(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... @overload @@ -72,7 +72,7 @@ def real(val: ArrayLike) -> NDArray[Any]: ... # @overload -def imag(val: _HasImag[_T]) -> _T: ... # type: ignore[overload-overlap] +def imag(val: _HasImag[_T]) -> _T: ... @overload def imag(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... @overload @@ -142,7 +142,7 @@ def nan_to_num( # NOTE: The [overload-overlap] mypy error is a false positive @overload -def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... # type: ignore[overload-overlap] +def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... @overload def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ... @overload @@ -202,41 +202,41 @@ def typename(char: L["O"]) -> L["object"]: ... @overload def common_type() -> type[np.float16]: ... @overload -def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... # type: ignore[overload-overlap] +def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... @overload -def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... # type: ignore[overload-overlap] +def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.float64 | np.integer], /, *ai: _HasDType[_RealMax64], ) -> type[np.float64]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.longdouble], /, *ai: _HasDType[_Real], ) -> type[np.longdouble]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.complex64], /, *ai: _HasDType[_InexactMax32], ) -> type[np.complex64]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.complex128], /, *ai: _HasDType[_NumberMax64], ) -> type[np.complex128]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.clongdouble], /, *ai: _HasDType[np.number], ) -> type[np.clongdouble]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[_FloatMax32], array1: _HasDType[np.float32], /, @@ -257,7 +257,7 @@ def common_type( *ai: _HasDType[_Real], ) -> type[np.longdouble]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[_InexactMax32], array1: _HasDType[np.complex64], /, diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index 0f00767356e0..c16ce811e28a 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -28,7 +28,7 @@ def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[object_]: ... def fix(x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: _ArrayT) -> _ArrayT: ... @overload -def isposinf( # type: ignore[misc] +def isposinf( x: _FloatLike_co, out: None = None, ) -> np.bool: ... @@ -44,7 +44,7 @@ def isposinf( ) -> _ArrayT: ... @overload -def isneginf( # type: ignore[misc] +def isneginf( x: _FloatLike_co, out: None = None, ) -> np.bool: ... diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index 7f364b495450..f2e34eacc5a6 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -21,13 +21,13 @@ _ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, cov _DTypeT = TypeVar("_DTypeT", bound=np.dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) -_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) +_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) # type: ignore[deprecated] +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) # type: ignore[deprecated] _RealContainerT = TypeVar( "_RealContainerT", - bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], + bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], # type: ignore[deprecated] ) -_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) +_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) # type: ignore[deprecated] _ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool] @@ -112,7 +112,7 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with np.ndarray @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... # type: ignore[overload-overlap] + def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... @overload def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ... @overload diff --git a/numpy/lib/user_array.pyi b/numpy/lib/user_array.pyi index 9b90d893326b..af90126ad6c9 100644 --- a/numpy/lib/user_array.pyi +++ b/numpy/lib/user_array.pyi @@ -1 +1 @@ -from ._user_array_impl import container as container +from ._user_array_impl import container as container # type: ignore[deprecated] diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 60320b021c71..b2b8ada44419 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -302,9 +302,9 @@ def svd( hermitian: bool = False, ) -> NDArray[floating]: ... -# the ignored `overload-overlap` mypy error below is a false-positive +# @overload -def svdvals( # type: ignore[overload-overlap] +def svdvals( x: _ArrayLike[np.float64 | np.complex128 | np.integer | np.bool] | _NestedSequence[complex], / ) -> NDArray[np.float64]: ... @overload @@ -313,7 +313,7 @@ def svdvals(x: _ArrayLike[np.float32 | np.complex64], /) -> NDArray[np.float32]: def svdvals(x: _ArrayLikeNumber_co, /) -> NDArray[floating]: ... # TODO: Returns a scalar for 2D arrays and -# a `(x.ndim - 2)`` dimensionl array otherwise +# a `(x.ndim - 2)`` dimensional array otherwise def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = None) -> Any: ... # TODO: Returns `int` for <2D arrays and `intp` otherwise diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 62dc32c13d97..67d6b3591a4b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,3 +1,4 @@ +# mypy: disable-error-code=no-untyped-def # pyright: reportIncompatibleMethodOverride=false import datetime as dt @@ -1087,7 +1088,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @property def shape(self) -> _ShapeT_co: ... - @shape.setter # type: ignore[override] + @shape.setter def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @@ -1177,11 +1178,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __add__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload - def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __add__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __add__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1191,15 +1192,15 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload @@ -1225,11 +1226,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] # signature equivalent to __add__ def __radd__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload - def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __radd__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __radd__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1239,15 +1240,15 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __radd__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload @@ -1273,11 +1274,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __sub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload def __sub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __sub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1287,15 +1288,15 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload def __sub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload @@ -1311,11 +1312,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __rsub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __rsub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1325,15 +1326,15 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload @@ -1349,11 +1350,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __mul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload - def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __mul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __mul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1363,13 +1364,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __mul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __mul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __mul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload def __mul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload @@ -1391,11 +1392,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] # signature equivalent to __mul__ def __rmul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload - def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __rmul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1405,13 +1406,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __rmul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __rmul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __rmul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload def __rmul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload @@ -1495,19 +1496,19 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __floordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... @overload - def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... @overload def __floordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload def __floordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @overload - def __floordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __floordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload def __floordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload @@ -1525,21 +1526,21 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... @overload - def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... @overload def __rfloordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload def __rfloordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @overload - def __rfloordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __rfloordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __rfloordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload def __rfloordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... @overload @@ -1553,11 +1554,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload - def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1567,11 +1568,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __pow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload @@ -1585,11 +1586,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload - def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1599,11 +1600,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rpow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload @@ -1638,7 +1639,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # (None) def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: bool | None = None) -> Self: ... @overload # (empty_sequence) - def reshape( # type: ignore[overload-overlap] # mypy false positive + def reshape( # mypy false positive self, shape: Sequence[Never], /, @@ -2284,7 +2285,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in-sync with np.ma.take @overload # type: ignore[override] - def take( # type: ignore[overload-overlap] + def take( self: _MaskedArray[_ScalarT], indices: _IntLike_co, axis: None = None, diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 70881bd15c8a..580dcd486c28 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -243,6 +243,7 @@ def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _MArray[_ScalarT]: ... def diagflat(v: ArrayLike, k: int = 0) -> _MArray[Incomplete]: ... # TODO: everything below +# mypy: disable-error-code=no-untyped-def def count_masked(arr, axis=None): ... def masked_all(shape, dtype=float): ... # noqa: PYI014 diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index 737a34ebdb70..f1319d4bf69d 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -18,6 +18,9 @@ __all__ = [ _ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +### +# mypy: disable-error-code=no-untyped-def + class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DTypeT_co]): def __new__( cls, @@ -50,10 +53,10 @@ class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DT def __setattr__(self, attr, val): ... def __getitem__(self, indx): ... def __setitem__(self, indx, value): ... - def view(self, dtype=None, type=None): ... + def view(self, dtype=None, type=None): ... # type: ignore[override] def harden_mask(self): ... def soften_mask(self): ... - def copy(self): ... + def copy(self): ... # type: ignore[override] def tolist(self, fill_value=None): ... def __reduce__(self): ... diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index d653a5a6cc98..53ac58c85d2e 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -2,7 +2,7 @@ from typing import Any, Literal, TypeAlias, TypeVar, overload import numpy as np import numpy.typing as npt -from numpy import ( # noqa: F401 +from numpy import ( # type: ignore[deprecated] # noqa: F401 False_, ScalarType, True_, diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 1f7c342394e1..9e857fb000ef 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -66,32 +66,32 @@ class Generator: def spawn(self, n_children: int) -> list[Generator]: ... def bytes(self, length: int) -> bytes: ... @overload - def standard_normal( # type: ignore[misc] + def standard_normal( self, size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., out: None = None, ) -> float: ... @overload - def standard_normal( # type: ignore[misc] + def standard_normal( self, size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def standard_normal( # type: ignore[misc] + def standard_normal( self, *, out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload - def standard_normal( # type: ignore[misc] + def standard_normal( self, size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload - def standard_normal( # type: ignore[misc] + def standard_normal( self, size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., @@ -102,7 +102,7 @@ class Generator: @overload def permutation(self, x: ArrayLike, axis: int = 0) -> NDArray[Any]: ... @overload - def standard_exponential( # type: ignore[misc] + def standard_exponential( self, size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., @@ -145,7 +145,7 @@ class Generator: out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload - def random( # type: ignore[misc] + def random( self, size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., @@ -184,7 +184,7 @@ class Generator: a: _FloatLike_co, b: _FloatLike_co, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def beta( self, @@ -193,7 +193,7 @@ class Generator: size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def exponential(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] + def exponential(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... @overload def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ... @@ -523,7 +523,7 @@ class Generator: low: _FloatLike_co = 0.0, high: _FloatLike_co = 1.0, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def uniform( self, @@ -537,7 +537,7 @@ class Generator: loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def normal( self, @@ -546,7 +546,7 @@ class Generator: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def standard_gamma( # type: ignore[misc] + def standard_gamma( self, shape: _FloatLike_co, size: None = None, @@ -585,7 +585,7 @@ class Generator: @overload def gamma( self, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def gamma( self, @@ -596,7 +596,7 @@ class Generator: @overload def f( self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def f( self, @@ -611,7 +611,7 @@ class Generator: dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def noncentral_f( self, @@ -621,7 +621,7 @@ class Generator: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def chisquare(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + def chisquare(self, df: _FloatLike_co, size: None = None) -> float: ... @overload def chisquare( self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None @@ -629,7 +629,7 @@ class Generator: @overload def noncentral_chisquare( self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def noncentral_chisquare( self, @@ -638,7 +638,7 @@ class Generator: size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_t(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + def standard_t(self, df: _FloatLike_co, size: None = None) -> float: ... @overload def standard_t( self, df: _ArrayLikeFloat_co, size: None = None @@ -650,7 +650,7 @@ class Generator: @overload def vonmises( self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def vonmises( self, @@ -659,25 +659,25 @@ class Generator: size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def pareto(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + def pareto(self, a: _FloatLike_co, size: None = None) -> float: ... @overload def pareto( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def weibull(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + def weibull(self, a: _FloatLike_co, size: None = None) -> float: ... @overload def weibull( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def power(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + def power(self, a: _FloatLike_co, size: None = None) -> float: ... @overload def power( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] + def standard_cauchy(self, size: None = None) -> float: ... @overload def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... @overload @@ -686,7 +686,7 @@ class Generator: loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def laplace( self, @@ -700,7 +700,7 @@ class Generator: loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def gumbel( self, @@ -714,7 +714,7 @@ class Generator: loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def logistic( self, @@ -728,7 +728,7 @@ class Generator: mean: _FloatLike_co = 0.0, sigma: _FloatLike_co = 1.0, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def lognormal( self, @@ -737,7 +737,7 @@ class Generator: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def rayleigh(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... @overload def rayleigh( self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None @@ -745,7 +745,7 @@ class Generator: @overload def wald( self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def wald( self, @@ -760,7 +760,7 @@ class Generator: mode: _FloatLike_co, right: _FloatLike_co, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def triangular( self, @@ -770,7 +770,7 @@ class Generator: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def binomial(self, n: int, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] + def binomial(self, n: int, p: _FloatLike_co, size: None = None) -> int: ... @overload def binomial( self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None @@ -778,7 +778,7 @@ class Generator: @overload def negative_binomial( self, n: _FloatLike_co, p: _FloatLike_co, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def negative_binomial( self, @@ -787,19 +787,19 @@ class Generator: size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def poisson(self, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... # type: ignore[misc] + def poisson(self, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... @overload def poisson( self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def zipf(self, a: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] + def zipf(self, a: _FloatLike_co, size: None = None) -> int: ... @overload def zipf( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def geometric(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] + def geometric(self, p: _FloatLike_co, size: None = None) -> int: ... @overload def geometric( self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None @@ -807,7 +807,7 @@ class Generator: @overload def hypergeometric( self, ngood: int, nbad: int, nsample: int, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def hypergeometric( self, @@ -817,7 +817,7 @@ class Generator: size: _ShapeLike | None = None, ) -> NDArray[int64]: ... @overload - def logseries(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] + def logseries(self, p: _FloatLike_co, size: None = None) -> int: ... @overload def logseries( self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index c20d35193d45..c2cbb17aa98d 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -117,15 +117,15 @@ class RandomState: self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float] ) -> None: ... @overload - def random_sample(self, size: None = None) -> float: ... # type: ignore[misc] + def random_sample(self, size: None = None) -> float: ... @overload def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def random(self, size: None = None) -> float: ... # type: ignore[misc] + def random(self, size: None = None) -> float: ... @overload def random(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def beta(self, a: float, b: float, size: None = None) -> float: ... # type: ignore[misc] + def beta(self, a: float, b: float, size: None = None) -> float: ... @overload def beta( self, @@ -134,29 +134,29 @@ class RandomState: size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def exponential(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] + def exponential(self, scale: float = 1.0, size: None = None) -> float: ... @overload def exponential( self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_exponential(self, size: None = None) -> float: ... # type: ignore[misc] + def standard_exponential(self, size: None = None) -> float: ... @overload def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def tomaxint(self, size: None = None) -> int: ... # type: ignore[misc] + def tomaxint(self, size: None = None) -> int: ... @overload # Generates long values, but stores it in a 64bit int: def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, ) -> int: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -164,7 +164,7 @@ class RandomState: dtype: type[bool] = ..., ) -> bool: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -172,7 +172,7 @@ class RandomState: dtype: type[np.bool] = ..., ) -> np.bool: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -180,7 +180,7 @@ class RandomState: dtype: type[int] = ..., ) -> int: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -188,7 +188,7 @@ class RandomState: dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 ) -> uint8: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -196,7 +196,7 @@ class RandomState: dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 ) -> uint16: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -204,7 +204,7 @@ class RandomState: dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 ) -> uint32: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -212,7 +212,7 @@ class RandomState: dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 ) -> uint: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -220,7 +220,7 @@ class RandomState: dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 ) -> ulong: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -228,7 +228,7 @@ class RandomState: dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 ) -> uint64: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -236,7 +236,7 @@ class RandomState: dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 ) -> int8: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -244,7 +244,7 @@ class RandomState: dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 ) -> int16: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -252,7 +252,7 @@ class RandomState: dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 ) -> int32: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -260,7 +260,7 @@ class RandomState: dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 ) -> int_: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -268,7 +268,7 @@ class RandomState: dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 ) -> long: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -276,14 +276,14 @@ class RandomState: dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., # noqa: E501 ) -> int64: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -291,7 +291,7 @@ class RandomState: dtype: _DTypeLikeBool = ..., ) -> NDArray[np.bool]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -299,7 +299,7 @@ class RandomState: dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 ) -> NDArray[int8]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -307,7 +307,7 @@ class RandomState: dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 ) -> NDArray[int16]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -315,7 +315,7 @@ class RandomState: dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 ) -> NDArray[int32]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -323,7 +323,7 @@ class RandomState: dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., # noqa: E501 ) -> NDArray[int64]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -331,7 +331,7 @@ class RandomState: dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 ) -> NDArray[uint8]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -339,7 +339,7 @@ class RandomState: dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 ) -> NDArray[uint16]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -347,7 +347,7 @@ class RandomState: dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 ) -> NDArray[uint32]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -355,7 +355,7 @@ class RandomState: dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 ) -> NDArray[uint64]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -363,7 +363,7 @@ class RandomState: dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 ) -> NDArray[long]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -406,7 +406,7 @@ class RandomState: @overload def uniform( self, low: float = 0.0, high: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def uniform( self, @@ -425,7 +425,7 @@ class RandomState: @overload def random_integers( self, low: int, high: int | None = None, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def random_integers( self, @@ -434,15 +434,15 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def standard_normal(self, size: None = None) -> float: ... # type: ignore[misc] + def standard_normal(self, size: None = None) -> float: ... @overload - def standard_normal( # type: ignore[misc] + def standard_normal( self, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def normal( self, loc: float = 0.0, scale: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def normal( self, @@ -451,7 +451,7 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def standard_gamma( # type: ignore[misc] + def standard_gamma( self, shape: float, size: None = None, @@ -463,7 +463,7 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def gamma(self, shape: float, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] + def gamma(self, shape: float, scale: float = 1.0, size: None = None) -> float: ... @overload def gamma( self, @@ -472,7 +472,7 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... # type: ignore[misc] + def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... @overload def f( self, @@ -483,7 +483,7 @@ class RandomState: @overload def noncentral_f( self, dfnum: float, dfden: float, nonc: float, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def noncentral_f( self, @@ -493,7 +493,7 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def chisquare(self, df: float, size: None = None) -> float: ... # type: ignore[misc] + def chisquare(self, df: float, size: None = None) -> float: ... @overload def chisquare( self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None @@ -501,7 +501,7 @@ class RandomState: @overload def noncentral_chisquare( self, df: float, nonc: float, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def noncentral_chisquare( self, @@ -510,7 +510,7 @@ class RandomState: size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_t(self, df: float, size: None = None) -> float: ... # type: ignore[misc] + def standard_t(self, df: float, size: None = None) -> float: ... @overload def standard_t( self, df: _ArrayLikeFloat_co, size: None = None @@ -520,7 +520,7 @@ class RandomState: self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... # type: ignore[misc] + def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... @overload def vonmises( self, @@ -529,31 +529,31 @@ class RandomState: size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def pareto(self, a: float, size: None = None) -> float: ... # type: ignore[misc] + def pareto(self, a: float, size: None = None) -> float: ... @overload def pareto( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def weibull(self, a: float, size: None = None) -> float: ... # type: ignore[misc] + def weibull(self, a: float, size: None = None) -> float: ... @overload def weibull( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def power(self, a: float, size: None = None) -> float: ... # type: ignore[misc] + def power(self, a: float, size: None = None) -> float: ... @overload def power( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] + def standard_cauchy(self, size: None = None) -> float: ... @overload def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... @overload def laplace( self, loc: float = 0.0, scale: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def laplace( self, @@ -564,7 +564,7 @@ class RandomState: @overload def gumbel( self, loc: float = 0.0, scale: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def gumbel( self, @@ -575,7 +575,7 @@ class RandomState: @overload def logistic( self, loc: float = 0.0, scale: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def logistic( self, @@ -586,7 +586,7 @@ class RandomState: @overload def lognormal( self, mean: float = 0.0, sigma: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def lognormal( self, @@ -595,13 +595,13 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... @overload def rayleigh( self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def wald(self, mean: float, scale: float, size: None = None) -> float: ... # type: ignore[misc] + def wald(self, mean: float, scale: float, size: None = None) -> float: ... @overload def wald( self, @@ -612,7 +612,7 @@ class RandomState: @overload def triangular( self, left: float, mode: float, right: float, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def triangular( self, @@ -624,7 +624,7 @@ class RandomState: @overload def binomial( self, n: int, p: float, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def binomial( self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None @@ -632,7 +632,7 @@ class RandomState: @overload def negative_binomial( self, n: float, p: float, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def negative_binomial( self, @@ -643,19 +643,19 @@ class RandomState: @overload def poisson( self, lam: float = 1.0, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def poisson( self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def zipf(self, a: float, size: None = None) -> int: ... # type: ignore[misc] + def zipf(self, a: float, size: None = None) -> int: ... @overload def zipf( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def geometric(self, p: float, size: None = None) -> int: ... # type: ignore[misc] + def geometric(self, p: float, size: None = None) -> int: ... @overload def geometric( self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None @@ -663,7 +663,7 @@ class RandomState: @overload def hypergeometric( self, ngood: int, nbad: int, nsample: int, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def hypergeometric( self, @@ -673,7 +673,7 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def logseries(self, p: float, size: None = None) -> int: ... # type: ignore[misc] + def logseries(self, p: float, size: None = None) -> int: ... @overload def logseries( self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index 684a7c36adec..95db8728ef70 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -2,7 +2,7 @@ from unittest import TestCase from . import _private as _private, overrides from ._private import extbuild as extbuild -from ._private.utils import ( +from ._private.utils import ( # type: ignore[deprecated] BLAS_SUPPORTS_FPE, HAS_LAPACK64, HAS_REFCOUNT, diff --git a/numpy/typing/__init__.pyi b/numpy/typing/__init__.pyi index 7a4c7b41079c..5af10da218d9 100644 --- a/numpy/typing/__init__.pyi +++ b/numpy/typing/__init__.pyi @@ -1,3 +1,8 @@ -from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray +from numpy._typing import ( # type: ignore[deprecated] + ArrayLike, + DTypeLike, + NBitBase, + NDArray, +) __all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini index 0a3bb07b1b94..4c75171acffe 100644 --- a/tools/stubtest/mypy.ini +++ b/tools/stubtest/mypy.ini @@ -9,7 +9,7 @@ exclude = (?x)( ) namespace_packages = False -enable_error_code = ignore-without-code, redundant-expr, truthy-bool +enable_error_code = deprecated, ignore-without-code, redundant-expr, truthy-bool warn_unreachable = False strict = True strict_bytes = True From 1bd4db906b1d05d5f5920330f8a47f3b0eb736d8 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 4 Dec 2025 14:48:30 +0100 Subject: [PATCH 0957/1018] CI: use Python 3.12 for Qemu tests (#30357) * CI: use Python 3.12 for Qemu tests * CI: use the loong64 uv fork for loongarch64 * CI: remove the ppc64le qemu jobs * CI: `--break-system-packages` when installing uv on loongarch64 * CI: attempt to fix compiling `ninja` in the loongarch64 qemu job * CI: avoid building ninja in the loongarch64 qemu job --- .github/workflows/linux_qemu.yml | 45 ++++++++++++++------------------ 1 file changed, 20 insertions(+), 25 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 268e1d916d1c..916417ebc513 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -42,22 +42,6 @@ jobs: fail-fast: false matrix: BUILD_PROP: - - [ - "ppc64le", - "powerpc64le-linux-gnu", - "ppc64le/ubuntu:22.04", - "-Dallow-noblas=true", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - "ppc64le" - ] - - [ - "ppc64le - baseline(Power9)", - "powerpc64le-linux-gnu", - "ppc64le/ubuntu:22.04", - "-Dallow-noblas=true -Dcpu-baseline=vsx3", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - "ppc64le" - ] - [ "s390x", "s390x-linux-gnu", @@ -124,7 +108,9 @@ jobs: docker run --platform=linux/${ARCH} --name the_container --interactive \ -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " apt update && - apt install -y cmake git python3 python-is-python3 python3-dev python3-pip && + apt install -y cmake git curl ca-certificates && + curl -LsSf https://astral.sh/uv/install.sh | sh && + export PATH="/root/.local/bin:$PATH" && mkdir -p /lib64 && ln -s /host/lib64/ld-* /lib64/ && ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu && rm -rf /usr/${TOOLCHAIN_NAME} && ln -s /host/usr/${TOOLCHAIN_NAME} /usr/${TOOLCHAIN_NAME} && @@ -140,8 +126,9 @@ jobs: git config --global --add safe.directory /numpy && # No need to build ninja from source, the host ninja is used for the build grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && - python -m pip install -r /tmp/build_requirements.txt && - python -m pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout && + uv venv --python 3.12 .venv && + source .venv/bin/activate && + uv pip install -r /tmp/build_requirements.txt pytest pytest-xdist hypothesis pytest-timeout rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " docker commit the_container the_container @@ -157,7 +144,7 @@ jobs: docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build --clean -- ${MESON_OPTIONS} + source .venv/bin/activate && cd /numpy && spin build --clean -- ${MESON_OPTIONS} '" - name: Meson Log @@ -170,7 +157,7 @@ jobs: -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' export F90=/usr/bin/gfortran - cd /numpy && spin test -- --timeout=600 --durations=10 -k \"${RUNTIME_TEST_FILTER}\" + source .venv/bin/activate && cd /numpy && spin test -- --timeout=600 --durations=10 -k \"${RUNTIME_TEST_FILTER}\" '" @@ -232,6 +219,8 @@ jobs: ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu && ln -s /host/usr/${TOOLCHAIN_NAME} /usr/${TOOLCHAIN_NAME} && ln -s /host/usr/lib/gcc-cross/${TOOLCHAIN_NAME} /usr/lib/gcc/${TOOLCHAIN_NAME} && + mkdir -p /usr/libexec/gcc && + rm -rf /usr/libexec/gcc/${TOOLCHAIN_NAME} && ln -s /host/usr/libexec/gcc/${TOOLCHAIN_NAME} /usr/libexec/gcc/${TOOLCHAIN_NAME} && rm -f /usr/bin/gcc && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-gcc-14 /usr/bin/gcc && rm -f /usr/bin/g++ && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-g++-14 /usr/bin/g++ && rm -f /usr/bin/gfortran && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-gfortran-14 /usr/bin/gfortran && @@ -241,8 +230,14 @@ jobs: rm -f /usr/bin/ld.bfd && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ld.bfd /usr/bin/ld.bfd && rm -f /usr/bin/ninja && ln -s /host/usr/bin/ninja /usr/bin/ninja && git config --global --add safe.directory /numpy && - python -m pip install --break-system-packages -r /numpy/requirements/build_requirements.txt && - python -m pip install --break-system-packages pytest pytest-xdist hypothesis typing_extensions + # No need to build ninja from source, the host ninja is used for the build + grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && + python -m pip install --break-system-packages uv --extra-index-url https://mirrors.loong64.com/pypi/simple && + export PATH="/root/.local/bin:$PATH" && + uv venv --python 3.12 .venv && + source .venv/bin/activate && + uv pip install -r /tmp/build_requirements.txt pytest pytest-xdist hypothesis && + rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " docker commit the_container the_container mkdir -p "~/docker_${TOOLCHAIN_NAME}" @@ -257,7 +252,7 @@ jobs: docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy/ && spin build --clean -- ${MESON_OPTIONS} + source .venv/bin/activate && cd /numpy/ && spin build --clean -- ${MESON_OPTIONS} '" - name: Meson Log @@ -269,5 +264,5 @@ jobs: docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" + source .venv/bin/activate && cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" '" From eeaf04662e07cc8e2041f3e25bbd3698949a0c02 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 4 Dec 2025 14:43:24 -0700 Subject: [PATCH 0958/1018] MAINT: don't assert RecursionError in monster dtype test (#30375) * MAINT: don't assert RecursionError in monster dtype test * MAINT: use contextlib --- .github/workflows/macos.yml | 2 +- numpy/_core/tests/test_dtype.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 4fe1576c86e0..f2af68370b99 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -118,7 +118,7 @@ jobs: build_runner: - [ macos-15-intel, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] - version: ["3.12", "3.14.0t"] + version: ["3.12", "3.14t"] steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 13cc84d61d1e..72bcc3e22962 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1,3 +1,4 @@ +import contextlib import ctypes import gc import inspect @@ -991,7 +992,9 @@ def test_tuple_recursion(self): d = np.int32 for i in range(100000): d = (d, (1,)) - with pytest.raises(RecursionError): + # depending on OS and Python version, this might succeed + # see gh-30370 and cpython issue #142253 + with contextlib.suppress(RecursionError): np.dtype(d) @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") From 8f9a6b2ebe6038bb866ca4a0c0cf4f31729cb50f Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 5 Dec 2025 01:17:54 +0100 Subject: [PATCH 0959/1018] MAINT, TYP: bump `mypy` to `1.19.0` (#30323) * MAINT, TYP: bump `mypy` to `1.19.0` * TYP: adjust type-tests for mypy 1.19 --- environment.yml | 2 +- numpy/typing/tests/data/reveal/ufunc_config.pyi | 13 +++++++------ requirements/test_requirements.txt | 2 +- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/environment.yml b/environment.yml index e27172e8b7b2..67b7cec26754 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.18.2 + - mypy=1.19.0 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/numpy/typing/tests/data/reveal/ufunc_config.pyi b/numpy/typing/tests/data/reveal/ufunc_config.pyi index 77c27eb3b4ca..f205b82b4f75 100644 --- a/numpy/typing/tests/data/reveal/ufunc_config.pyi +++ b/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -5,18 +5,19 @@ from collections.abc import Callable from typing import Any, assert_type import numpy as np +from numpy._core._ufunc_config import _ErrDict def func(a: str, b: int) -> None: ... class Write: def write(self, value: str) -> None: ... -assert_type(np.seterr(all=None), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(divide="ignore"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(over="warn"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(under="call"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(invalid="raise"), np._core._ufunc_config._ErrDict) -assert_type(np.geterr(), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(all=None), _ErrDict) +assert_type(np.seterr(divide="ignore"), _ErrDict) +assert_type(np.seterr(over="warn"), _ErrDict) +assert_type(np.seterr(under="call"), _ErrDict) +assert_type(np.seterr(invalid="raise"), _ErrDict) +assert_type(np.geterr(), _ErrDict) assert_type(np.setbufsize(4096), int) assert_type(np.getbufsize(), int) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 33b5756b7362..66537dafdade 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -12,7 +12,7 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.18.2; platform_python_implementation != "PyPy" +mypy==1.19.0; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer From b9e3ed2e41f99ce146b16d8d1041842c38685430 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 5 Dec 2025 14:32:11 +0100 Subject: [PATCH 0960/1018] TYP: default to ``datetime64[Any]`` and ``timedelta64[Any]`` (#30364) * TYP: ``timedelta64[Any]`` by default * TYP: ``datetime64[Any]`` by default * TYP: ignore mypy deprecation warnings --- numpy/__init__.pyi | 183 ++++++++++-------- numpy/typing/tests/data/reveal/arithmetic.pyi | 21 +- .../tests/data/reveal/array_constructors.pyi | 10 +- .../typing/tests/data/reveal/arraysetops.pyi | 12 +- .../tests/data/reveal/lib_function_base.pyi | 6 +- numpy/typing/tests/data/reveal/linalg.pyi | 3 +- numpy/typing/tests/data/reveal/mod.pyi | 37 ++-- numpy/typing/tests/data/reveal/multiarray.pyi | 8 +- numpy/typing/tests/data/reveal/numeric.pyi | 8 +- 9 files changed, 163 insertions(+), 125 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8ef7c321fa76..d837c5b960d6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -744,7 +744,6 @@ _AnyShapeT = TypeVar( _AnyTD64Item = TypeVar("_AnyTD64Item", dt.timedelta, int, None, dt.timedelta | int | None) _AnyDT64Arg = TypeVar("_AnyDT64Arg", dt.datetime, dt.date, None) _AnyDT64Item = TypeVar("_AnyDT64Item", dt.datetime, dt.date, int, None, dt.date, int | None) -_AnyDate = TypeVar("_AnyDate", dt.date, dt.datetime) _AnyDateOrTime = TypeVar("_AnyDateOrTime", dt.date, dt.datetime, dt.timedelta) ### Type parameters (for internal use only) @@ -809,8 +808,8 @@ _FlexibleItemT_co = TypeVar( covariant=True, ) _CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_CharLike_co, covariant=True) -_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=dt.timedelta | int | None, covariant=True) -_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=dt.date | int | None, covariant=True) +_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=Any, covariant=True) +_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=Any, covariant=True) _TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit) _BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[integer | np.bool]) @@ -5402,7 +5401,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] /, ) -> timedelta64[dt.timedelta]: ... @overload - def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> Self: ... + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> timedelta64: ... # inherited at runtime from `signedinteger` def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -5416,9 +5415,16 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __pos__(self, /) -> Self: ... def __abs__(self, /) -> Self: ... + # + @overload + def __add__(self: timedelta64[Never], x: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... @overload def __add__(self: timedelta64[None], x: _TD64Like_co, /) -> timedelta64[None]: ... @overload + def __add__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> timedelta64: ... + @overload + def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + @overload def __add__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... @overload def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... @@ -5426,118 +5432,115 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __add__(self: timedelta64[dt.timedelta], x: _AnyDateOrTime, /) -> _AnyDateOrTime: ... @overload def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... - @overload - def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] __radd__ = __add__ + # @overload - def __mul__(self: timedelta64[_AnyTD64Item], x: int | np.integer | np.bool, /) -> timedelta64[_AnyTD64Item]: ... - @overload - def __mul__(self: timedelta64[_AnyTD64Item], x: float | np.floating, /) -> timedelta64[_AnyTD64Item | None]: ... + def __sub__(self: timedelta64[Never], b: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... @overload - def __mul__(self, x: float | np.floating | np.integer | np.bool, /) -> timedelta64: ... - __rmul__ = __mul__ - + def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... @overload - def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... + def __sub__(self: timedelta64[int | dt.timedelta], b: timedelta64[Never], /) -> timedelta64: ... @overload - def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... + def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... @overload - def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... - @overload - def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... @overload - def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... @overload - def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... @overload - def __mod__(self, x: timedelta64, /) -> timedelta64: ... + def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... - # NOTE: The L[0] makes __mod__ non-commutative, which the first two overloads - # reflect. However, mypy does not seem to like this, so we ignore the errors. + # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. + # This confuses mypy, so we ignore the [misc] errors it reports. @overload - def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[misc] + def __rsub__(self: timedelta64[Never], a: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... @overload - def __rmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> timedelta64[None]: ... + def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... @overload - def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... # type: ignore[misc] + def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... @overload - def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... # type: ignore[misc] + def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... @overload - def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] @overload - def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... # type: ignore[misc] + def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] @overload - def __rmod__(self, x: timedelta64, /) -> timedelta64: ... # type: ignore[misc] + def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... - # keep in sync with __mod__ @overload - def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ... + def __mul__(self: timedelta64[Never], x: _FloatLike_co, /) -> timedelta64: ... @overload - def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + def __mul__(self: timedelta64[None], x: _FloatLike_co, /) -> timedelta64[None]: ... @overload - def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + def __mul__(self, x: _IntLike_co, /) -> Self: ... @overload - def __divmod__( - self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / - ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... - @overload - def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + def __mul__(self, x: float | np.floating, /) -> timedelta64[_TD64ItemT_co | None]: ... @overload - def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... - @overload - def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + def __mul__(self, x: _FloatLike_co, /) -> timedelta64: ... + __rmul__ = __mul__ - # keep in sync with __rmod__ + # keep in sync with __divmod__ @overload - def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] + def __mod__(self: timedelta64[Never], x: timedelta64[dt.timedelta], /) -> timedelta64: ... @overload - def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + def __mod__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> timedelta64: ... @overload - def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] + def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... @overload - def __rdivmod__( # type: ignore[misc] - self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / - ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... @overload - def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... @overload - def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] + def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... @overload - def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... # type: ignore[misc] - + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[int], /) -> timedelta64[int | None]: ... @overload - def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[dt.timedelta], /) -> timedelta64[dt.timedelta | None]: ... @overload - def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... @overload - def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... + def __mod__(self, x: timedelta64, /) -> timedelta64: ... + + # keep in sync with __rdivmod__ + def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + + # keep in sync with __mod__ @overload - def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... + def __divmod__( + self: timedelta64[Never], x: timedelta64[Never] | timedelta64[dt.timedelta], / + ) -> tuple[int64, timedelta64]: ... @overload - def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + def __divmod__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> tuple[int64, timedelta64]: ... @overload - def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] - - # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. - # This confuses mypy, so we ignore the [misc] errors it reports. + def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ... @overload - def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... + def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] + def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] + def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] + def __divmod__( + self: timedelta64[dt.timedelta], x: timedelta64[dt.timedelta], / + ) -> tuple[int64, timedelta64[dt.timedelta | None]]: ... + @overload + def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... @overload - def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... # type: ignore[misc] + def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + # keep in sync with __rmod__ + def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + + @overload + def __truediv__(self, b: timedelta64, /) -> float64: ... @overload def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... @overload - def __truediv__(self, b: timedelta64, /) -> float64: ... + def __truediv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... @overload def __truediv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... @overload @@ -5545,24 +5548,26 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... - @overload - def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... @overload def __rtruediv__(self, a: timedelta64, /) -> float64: ... + @overload + def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... + @overload + def __floordiv__(self, b: timedelta64, /) -> int64: ... @overload def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ... @overload - def __floordiv__(self, b: timedelta64, /) -> int64: ... + def __floordiv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... @overload def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... @overload def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... - @overload - def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... @overload def __rfloordiv__(self, a: timedelta64, /) -> int64: ... + @overload + def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... # comparison ops @@ -5621,13 +5626,19 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> Self: ... + # def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + # + @overload + def __add__(self: datetime64[Never], x: _TD64Like_co, /) -> datetime64: ... @overload - def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __add__(self: datetime64[_AnyDT64Item], x: _IntLike_co, /) -> datetime64[_AnyDT64Item]: ... @overload def __add__(self: datetime64[None], x: _TD64Like_co, /) -> datetime64[None]: ... @overload + def __add__(self: datetime64[int | dt.datetime], x: timedelta64[Never], /) -> datetime64: ... + @overload def __add__(self: datetime64[int], x: timedelta64[int | dt.timedelta], /) -> datetime64[int]: ... @overload def __add__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... @@ -5636,15 +5647,20 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __add__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[int]: ... @overload - def __add__(self, x: datetime64[None], /) -> datetime64[None]: ... + def __add__(self, x: timedelta64[None], /) -> datetime64[None]: ... @overload def __add__(self, x: _TD64Like_co, /) -> datetime64: ... __radd__ = __add__ + # + @overload + def __sub__(self: datetime64[Never], x: _TD64Like_co, /) -> datetime64: ... @overload - def __sub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __sub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... @overload - def __sub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + def __sub__(self: datetime64[_AnyDT64Item], x: _IntLike_co, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __sub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... @overload def __sub__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... @overload @@ -5676,22 +5692,25 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): # NOTE: mypy gets confused by the non-commutativity of subtraction here @overload - def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __rsub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... + @overload + def __rsub__(self: datetime64[_AnyDT64Item], x: _IntLike_co, /) -> datetime64[_AnyDT64Item]: ... @overload - def __rsub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + def __rsub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... @overload def __rsub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... @overload def __rsub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... @overload - def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... # type: ignore[misc] + def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... @overload def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... @overload - def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... # type: ignore[misc] + def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... @overload - def __rsub__(self, x: datetime64, /) -> timedelta64: ... # type: ignore[misc] + def __rsub__(self, x: datetime64, /) -> timedelta64: ... + # @overload def __lt__(self, other: datetime64, /) -> bool_: ... @overload diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 491bce43fdae..68fa5b5230a6 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -406,14 +406,18 @@ assert_type(M8 - M8, np.timedelta64) assert_type(M8 - i, np.datetime64) assert_type(M8 - i8, np.datetime64) -assert_type(M8_none + m8, np.datetime64[None]) assert_type(M8_none + i, np.datetime64[None]) -assert_type(M8_none + i8, np.datetime64[None]) -assert_type(M8_none - M8, np.timedelta64[None]) -assert_type(M8_none - m8, np.datetime64[None]) assert_type(M8_none - i, np.datetime64[None]) + +assert_type(M8_none + i8, np.datetime64[None]) assert_type(M8_none - i8, np.datetime64[None]) +# NOTE: Mypy incorrectly infers `timedelta64[Any]`, but pyright behaves correctly. +assert_type(M8_none + m8, np.datetime64[None]) # type: ignore[assert-type] +assert_type(M8_none - M8, np.timedelta64[None]) # type: ignore[assert-type] +# NOTE: Mypy incorrectly infers `datetime64[Any]`, but pyright behaves correctly. +assert_type(M8_none - m8, np.datetime64[None]) # type: ignore[assert-type] + assert_type(m8 + m8, np.timedelta64) assert_type(m8 + i, np.timedelta64) assert_type(m8 + i8, np.timedelta64) @@ -428,7 +432,8 @@ assert_type(m8 / f4, np.timedelta64) assert_type(m8 / m8, np.float64) assert_type(m8 // m8, np.int64) assert_type(m8 % m8, np.timedelta64) -assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) # type: ignore[assert-type] assert_type(m8_none + m8, np.timedelta64[None]) assert_type(m8_none + i, np.timedelta64[None]) @@ -438,10 +443,12 @@ assert_type(m8_none - i8, np.timedelta64[None]) assert_type(m8_int + i, np.timedelta64[int]) assert_type(m8_int + m8_delta, np.timedelta64[int]) -assert_type(m8_int + m8, np.timedelta64[int | None]) +assert_type(m8_int + m8, np.timedelta64) assert_type(m8_int - i, np.timedelta64[int]) assert_type(m8_int - m8_delta, np.timedelta64[int]) -assert_type(m8_int - m8, np.timedelta64[int | None]) +assert_type(m8_int - m8_int, np.timedelta64[int]) +assert_type(m8_int - m8_none, np.timedelta64[None]) +assert_type(m8_int - m8, np.timedelta64) assert_type(m8_delta + date, dt.date) assert_type(m8_delta + time, dt.datetime) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index ba8fc4db23c9..b7daf3397f9d 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -117,17 +117,17 @@ assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) _x_bool: bool _x_int: int _x_float: float -_x_timedelta: np.timedelta64 -_x_datetime: np.datetime64 +_x_timedelta: np.timedelta64[int] +_x_datetime: np.datetime64[int] assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.int_]]) assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.int_]]) assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.int_]]) assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.arange(0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) -assert_type(np.arange(_x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) -assert_type(np.arange(0, _x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) -assert_type(np.arange(_x_datetime, _x_datetime), np.ndarray[tuple[int], np.dtype[np.datetime64[Any]]]) +assert_type(np.arange(_x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.arange(0, _x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.arange(_x_datetime, _x_datetime), np.ndarray[tuple[int], np.dtype[np.datetime64]]) assert_type(np.arange(10, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.arange(0, 10, step=2, dtype=np.int16), np.ndarray[tuple[int], np.dtype[np.int16]]) assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype[np.int_]]) diff --git a/numpy/typing/tests/data/reveal/arraysetops.pyi b/numpy/typing/tests/data/reveal/arraysetops.pyi index 7e5ca5c5717b..36fc0603dcfd 100644 --- a/numpy/typing/tests/data/reveal/arraysetops.pyi +++ b/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -23,7 +23,8 @@ assert_type(np.ediff1d(AR_O), npt.NDArray[np.object_]) assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), npt.NDArray[Any]) assert_type(np.intersect1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type( np.intersect1d(AR_f8, AR_f8, return_indices=True), @@ -31,7 +32,8 @@ assert_type( ) assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] assert_type(np.setxor1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type(np.isin(AR_i8, AR_i8), npt.NDArray[np.bool]) @@ -40,11 +42,13 @@ assert_type(np.isin(AR_f8, AR_i8), npt.NDArray[np.bool]) assert_type(np.isin(AR_f8, AR_LIKE_f8, invert=True), npt.NDArray[np.bool]) assert_type(np.union1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64]) # type: ignore[assert-type] assert_type(np.union1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type(np.setdiff1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] assert_type(np.setdiff1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type(np.unique(AR_f8), npt.NDArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 345635d06327..090af934a411 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -270,7 +270,8 @@ assert_type(np.sinc(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128] # median assert_type(np.median(AR_f8, keepdims=False), np.float64) assert_type(np.median(AR_c16, overwrite_input=True), np.complex128) -assert_type(np.median(AR_m), np.timedelta64) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.median(AR_m), np.timedelta64) # type: ignore[assert-type] assert_type(np.median(AR_O), Any) assert_type(np.median(AR_f8, keepdims=True), npt.NDArray[np.float64]) assert_type(np.median(AR_f8, axis=0), npt.NDArray[np.float64]) @@ -332,7 +333,8 @@ assert_type(np.trapezoid(AR_f8), np.float64 | npt.NDArray[np.float64]) assert_type(np.trapezoid(AR_f8, AR_f8), np.float64 | npt.NDArray[np.float64]) assert_type(np.trapezoid(AR_c16), np.complex128 | npt.NDArray[np.complex128]) assert_type(np.trapezoid(AR_c16, AR_c16), np.complex128 | npt.NDArray[np.complex128]) -assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) # type: ignore[assert-type] assert_type(np.trapezoid(AR_O), npt.NDArray[np.object_] | Any) assert_type(np.trapezoid(AR_O, AR_LIKE_f8), npt.NDArray[np.object_] | Any) diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 60056516def0..3f73e842aa6c 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -51,7 +51,8 @@ assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.float64]) assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.outer(AR_b, AR_b), npt.NDArray[np.bool]) assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) # type: ignore[assert-type] assert_type(np.linalg.qr(AR_i8), QRResult) assert_type(np.linalg.qr(AR_f8), QRResult) diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index ef07dc0c8c8a..131e9259b6b5 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -27,13 +27,16 @@ f: float AR_b: npt.NDArray[np.bool] AR_m: npt.NDArray[np.timedelta64] +# NOTE: the __divmod__ calls are workarounds for https://github.com/microsoft/pyright/issues/9663 + # Time structures assert_type(m % m, np.timedelta64) assert_type(m % m_nat, np.timedelta64[None]) assert_type(m % m_int0, np.timedelta64[None]) assert_type(m % m_int, np.timedelta64[int | None]) -assert_type(m_nat % m, np.timedelta64[None]) +# NOTE: Mypy incorrectly infers `timedelta64[Any]`, but pyright behaves correctly. +assert_type(m_nat % m, np.timedelta64[None]) # type: ignore[assert-type] assert_type(m_int % m_nat, np.timedelta64[None]) assert_type(m_int % m_int0, np.timedelta64[None]) assert_type(m_int % m_int, np.timedelta64[int | None]) @@ -46,20 +49,22 @@ assert_type(m_td % m_td, np.timedelta64[dt.timedelta | None]) assert_type(AR_m % m, npt.NDArray[np.timedelta64]) assert_type(m % AR_m, npt.NDArray[np.timedelta64]) -assert_type(divmod(m, m), tuple[np.int64, np.timedelta64]) -assert_type(divmod(m, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m, m_int0), tuple[np.int64, np.timedelta64[None]]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 +# +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(m.__divmod__(m), tuple[np.int64, np.timedelta64]) # type: ignore[assert-type] +assert_type(m.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) assert_type(m.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_nat, m), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_int, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_int, m_int0), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_int, m_int), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_int, m_td), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_td, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_td, m_int0), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_td, m_int), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_td, m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(m_nat.__divmod__(m), tuple[np.int64, np.timedelta64[None]]) # type: ignore[assert-type] +assert_type(m_int.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_int.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_int.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_int.__divmod__(m_td), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_td.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_td.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_td.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_td.__divmod__(m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) assert_type(divmod(AR_m, m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) assert_type(divmod(m, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) @@ -77,7 +82,6 @@ assert_type(b_ % AR_b, npt.NDArray[np.int8]) assert_type(divmod(b_, b), tuple[np.int8, np.int8]) assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(b_.__divmod__(i), tuple[np.int_, np.int_]) assert_type(b_.__divmod__(f), tuple[np.float64, np.float64]) assert_type(b_.__divmod__(i8), tuple[np.int64, np.int64]) @@ -118,7 +122,6 @@ assert_type(i8 % AR_b, npt.NDArray[np.int64]) assert_type(divmod(i8, b), tuple[np.int64, np.int64]) assert_type(divmod(i8, i4), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(i8.__divmod__(f), tuple[np.float64, np.float64]) assert_type(i8.__divmod__(f8), tuple[np.float64, np.float64]) assert_type(divmod(i8, f4), tuple[np.floating, np.floating]) @@ -142,7 +145,6 @@ assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) assert_type(divmod(i4, i8), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(f4.__divmod__(i8), tuple[np.floating, np.floating]) assert_type(f4.__divmod__(i4), tuple[np.floating, np.floating]) assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) @@ -172,7 +174,6 @@ assert_type(AR_b % f8, npt.NDArray[np.float64]) assert_type(divmod(b, f8), tuple[np.float64, np.float64]) assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(f8.__rdivmod__(f), tuple[np.float64, np.float64]) assert_type(f8.__rdivmod__(f4), tuple[np.float64, np.float64]) assert_type(AR_b.__divmod__(f8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 424f60df27e7..0fe907c0006b 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -170,9 +170,10 @@ assert_type(np.busday_count("2011-01", "2011-02"), np.int_) assert_type(np.busday_count(["2011-01"], "2011-02"), npt.NDArray[np.int_]) assert_type(np.busday_count(["2011-01"], date_scalar), npt.NDArray[np.int_]) -assert_type(np.busday_offset(M, m), np.datetime64) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.busday_offset(M, m), np.datetime64) # type: ignore[assert-type] +assert_type(np.busday_offset(M, 5), np.datetime64) # type: ignore[assert-type] assert_type(np.busday_offset(date_scalar, m), np.datetime64) -assert_type(np.busday_offset(M, 5), np.datetime64) assert_type(np.busday_offset(AR_M, m), npt.NDArray[np.datetime64]) assert_type(np.busday_offset(M, timedelta_seq), npt.NDArray[np.datetime64]) assert_type(np.busday_offset("2011-01", "2011-02", roll="forward"), np.datetime64) @@ -182,7 +183,8 @@ assert_type(np.is_busday("2012"), np.bool) assert_type(np.is_busday(date_scalar), np.bool) assert_type(np.is_busday(["2012"]), npt.NDArray[np.bool]) -assert_type(np.datetime_as_string(M), np.str_) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.datetime_as_string(M), np.str_) # type: ignore[assert-type] assert_type(np.datetime_as_string(AR_M), npt.NDArray[np.str_]) assert_type(np.busdaycalendar(holidays=date_seq), np.busdaycalendar) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 24f97d2d0784..44192fd64331 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -46,6 +46,8 @@ assert_type(np.argwhere(AR_i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) assert_type(np.flatnonzero(i8), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(np.flatnonzero(AR_i8), np.ndarray[tuple[int], np.dtype[np.intp]]) +# NOTE: Mypy incorrectly infers `np.ndarray[Any, Any]` for timedelta64 + # correlate assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) assert_type(np.correlate(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) @@ -55,7 +57,7 @@ assert_type(np.correlate(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float assert_type(np.correlate(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.correlate(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) assert_type(np.correlate(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) -assert_type(np.correlate(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.correlate(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] assert_type(np.correlate(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) assert_type(np.correlate(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) assert_type(np.correlate(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) @@ -72,7 +74,7 @@ assert_type(np.convolve(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float6 assert_type(np.convolve(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.convolve(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) assert_type(np.convolve(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) -assert_type(np.convolve(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.convolve(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] assert_type(np.convolve(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) assert_type(np.convolve(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) assert_type(np.convolve(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) @@ -89,7 +91,7 @@ assert_type(np.outer(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.floa assert_type(np.outer(AR_f8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) assert_type(np.outer(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) assert_type(np.outer(AR_c16, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) -assert_type(np.outer(AR_m, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) +assert_type(np.outer(AR_m, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] assert_type(np.outer(AR_i8, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64 | Any]]) assert_type(np.outer(AR_O, AR_O), np.ndarray[tuple[int, int], np.dtype[np.object_]]) assert_type(np.outer(AR_i8, AR_i8, out=_sub_nd_i8), SubClass) From 46600888986c7278189b987c974e73c2ce441c22 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 5 Dec 2025 11:53:34 -0700 Subject: [PATCH 0961/1018] MAINT: Bump github/codeql-action from 4.31.6 to 4.31.7 (#30383) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.6 to 4.31.7. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/fe4161a26a8629af62121b670040955b330f9af2...cf1bb45a277cb3c205638b2cd5c984db1c46a412) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 2b124d115710..d7622b4d6dff 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 + uses: github/codeql-action/init@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 + uses: github/codeql-action/autobuild@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 + uses: github/codeql-action/analyze@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 9303e511dde8..8488e97ef116 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@fe4161a26a8629af62121b670040955b330f9af2 # v2.1.27 + uses: github/codeql-action/upload-sarif@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v2.1.27 with: sarif_file: results.sarif From 6a7996c2025b8a191f9f5777a96443d955fab6fb Mon Sep 17 00:00:00 2001 From: partev Date: Fri, 5 Dec 2025 14:26:52 -0500 Subject: [PATCH 0962/1018] DOC: make descriptions of hyperbolic functions consistent (#29297) --- numpy/_core/code_generators/ufunc_docstrings.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index e976be723287..d4707da906b2 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -332,7 +332,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'arcsinh', """ - Inverse hyperbolic sine element-wise. + Inverse hyperbolic sine, element-wise. Parameters ---------- @@ -534,7 +534,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'arctanh', """ - Inverse hyperbolic tangent element-wise. + Inverse hyperbolic tangent, element-wise. Parameters ---------- @@ -4271,7 +4271,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'tanh', """ - Compute hyperbolic tangent element-wise. + Hyperbolic tangent, element-wise. Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``. From eade98761b21776b191643994dea21411598f486 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 27 Nov 2025 23:00:27 +0100 Subject: [PATCH 0963/1018] TYP: PEP 695 Python 3.12 syntax --- numpy/__init__.pyi | 1230 +++++++------- numpy/_array_api_info.pyi | 76 +- numpy/_core/_asarray.pyi | 16 +- numpy/_core/_dtype.pyi | 10 +- numpy/_core/_exceptions.pyi | 14 +- numpy/_core/_internal.pyi | 17 +- numpy/_core/_methods.pyi | 4 +- numpy/_core/_type_aliases.pyi | 4 +- numpy/_core/_ufunc_config.pyi | 10 +- numpy/_core/arrayprint.pyi | 22 +- numpy/_core/defchararray.pyi | 22 +- numpy/_core/einsumfunc.pyi | 39 +- numpy/_core/fromnumeric.pyi | 595 ++++--- numpy/_core/function_base.pyi | 48 +- numpy/_core/multiarray.pyi | 384 +++-- numpy/_core/numeric.pyi | 242 ++- numpy/_core/overrides.pyi | 25 +- numpy/_core/records.pyi | 18 +- numpy/_core/shape_base.pyi | 101 +- numpy/_core/strings.pyi | 8 +- numpy/_core/umath.pyi | 6 +- numpy/_typing/__init__.py | 18 +- numpy/_typing/_array_like.py | 106 +- numpy/_typing/_char_codes.py | 122 +- numpy/_typing/_dtype_like.py | 71 +- numpy/_typing/_nbit.py | 24 +- numpy/_typing/_nbit_base.py | 9 +- numpy/_typing/_nbit_base.pyi | 2 +- numpy/_typing/_nested_sequence.py | 12 +- numpy/_typing/_scalars.py | 22 +- numpy/_typing/_shape.py | 8 +- numpy/_typing/_ufunc.pyi | 173 +- numpy/_utils/__init__.pyi | 11 +- numpy/_utils/_inspect.pyi | 15 +- numpy/_utils/_pep440.pyi | 11 +- numpy/ctypeslib/_ctypeslib.pyi | 55 +- numpy/dtypes.pyi | 63 +- numpy/f2py/_backends/_meson.pyi | 3 +- numpy/f2py/auxfuncs.pyi | 21 +- numpy/f2py/cfuncs.pyi | 6 +- numpy/f2py/crackfortran.pyi | 28 +- numpy/f2py/f2py2e.pyi | 8 +- numpy/f2py/rules.pyi | 23 +- numpy/f2py/symbolic.pyi | 39 +- numpy/fft/_helper.pyi | 8 +- numpy/fft/_pocketfft.pyi | 4 +- numpy/lib/_arraypad_impl.pyi | 39 +- numpy/lib/_arraysetops_impl.pyi | 184 +-- numpy/lib/_arrayterator_impl.pyi | 11 +- numpy/lib/_datasource.pyi | 4 +- numpy/lib/_format_impl.pyi | 4 +- numpy/lib/_function_base_impl.pyi | 699 ++++---- numpy/lib/_histograms_impl.pyi | 4 +- numpy/lib/_index_tricks_impl.pyi | 45 +- numpy/lib/_iotools.pyi | 5 +- numpy/lib/_npyio_impl.pyi | 44 +- numpy/lib/_polynomial_impl.pyi | 23 +- numpy/lib/_shape_base_impl.pyi | 99 +- numpy/lib/_stride_tricks_impl.pyi | 24 +- numpy/lib/_twodim_base_impl.pyi | 170 +- numpy/lib/_type_check_impl.pyi | 63 +- numpy/lib/_ufunclike_impl.pyi | 45 +- numpy/lib/_user_array_impl.pyi | 91 +- numpy/lib/_utils_impl.pyi | 12 +- numpy/lib/recfunctions.pyi | 186 +-- numpy/linalg/_linalg.pyi | 25 +- numpy/ma/core.pyi | 1437 ++++++++++------- numpy/ma/extras.pyi | 101 +- numpy/matlib.pyi | 23 +- numpy/matrixlib/defmatrix.pyi | 90 +- numpy/polynomial/_polybase.pyi | 33 +- numpy/polynomial/_polytypes.pyi | 88 +- numpy/polynomial/chebyshev.pyi | 38 +- numpy/polynomial/hermite.pyi | 9 +- numpy/polynomial/hermite_e.pyi | 9 +- numpy/polynomial/polyutils.pyi | 30 +- numpy/random/_common.pyi | 7 +- numpy/random/_generator.pyi | 42 +- numpy/random/_pickle.pyi | 8 +- numpy/random/bit_generator.pyi | 3 +- numpy/testing/_private/utils.pyi | 83 +- numpy/typing/mypy_plugin.py | 4 +- numpy/typing/tests/data/fail/ma.pyi | 11 +- numpy/typing/tests/data/pass/ma.py | 5 +- .../tests/data/reveal/array_constructors.pyi | 21 +- .../typing/tests/data/reveal/bitwise_ops.pyi | 6 +- numpy/typing/tests/data/reveal/char.pyi | 6 +- numpy/typing/tests/data/reveal/chararray.pyi | 6 +- numpy/typing/tests/data/reveal/dtype.pyi | 4 +- numpy/typing/tests/data/reveal/flatiter.pyi | 8 +- numpy/typing/tests/data/reveal/ma.pyi | 20 +- numpy/typing/tests/data/reveal/matrix.pyi | 4 +- numpy/typing/tests/data/reveal/multiarray.pyi | 6 +- .../tests/data/reveal/nbit_base_example.pyi | 7 +- .../data/reveal/ndarray_assignability.pyi | 51 +- .../reveal/ndarray_shape_manipulation.pyi | 8 +- .../tests/data/reveal/polynomial_polybase.pyi | 31 +- .../data/reveal/polynomial_polyutils.pyi | 14 +- .../tests/data/reveal/polynomial_series.pyi | 12 +- numpy/typing/tests/data/reveal/rec.pyi | 4 +- numpy/typing/tests/data/reveal/scalars.pyi | 18 +- numpy/typing/tests/data/reveal/strings.pyi | 6 +- numpy/typing/tests/data/reveal/testing.pyi | 5 +- .../typing/tests/data/reveal/twodim_base.pyi | 14 +- 104 files changed, 3799 insertions(+), 4003 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d837c5b960d6..8ca91fd5b007 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1,9 +1,6 @@ # ruff: noqa: I001 import builtins -import sys -import mmap import ctypes as ct -import array as _array import datetime as dt import inspect from abc import abstractmethod @@ -142,6 +139,7 @@ from numpy._typing._extended_precision import ( from numpy._array_api_info import __array_namespace_info__ from collections.abc import ( + Buffer, Callable, Iterable, Iterator, @@ -149,19 +147,6 @@ from collections.abc import ( Sequence, ) -if sys.version_info >= (3, 12): - from collections.abc import Buffer as _SupportsBuffer -else: - _SupportsBuffer: TypeAlias = ( - bytes - | bytearray - | memoryview - | _array.array[Any] - | mmap.mmap - | NDArray[Any] - | generic - ) - from typing import ( Any, ClassVar, @@ -177,7 +162,6 @@ from typing import ( SupportsFloat, SupportsInt, SupportsIndex, - TypeAlias, TypedDict, final, overload, @@ -189,7 +173,8 @@ from typing import ( # library include `typing_extensions` stubs: # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite -from typing_extensions import CapsuleType, TypeVar, deprecated, override +from typing_extensions import CapsuleType, TypeVar, deprecated +from typing import override from numpy import ( char, @@ -724,80 +709,19 @@ __all__ = [ # noqa: RUF022 "emath", "show_config", "__version__", "__array_namespace_info__", ] # fmt: skip -### Constrained types (for internal use only) -# Only use these for functions; never as generic type parameter. - -_AnyStr = TypeVar("_AnyStr", LiteralString, str, bytes) -_AnyShapeT = TypeVar( - "_AnyShapeT", - tuple[()], # 0-d - tuple[int], # 1-d - tuple[int, int], # 2-d - tuple[int, int, int], # 3-d - tuple[int, int, int, int], # 4-d - tuple[int, int, int, int, int], # 5-d - tuple[int, int, int, int, int, int], # 6-d - tuple[int, int, int, int, int, int, int], # 7-d - tuple[int, int, int, int, int, int, int, int], # 8-d - tuple[int, ...], # N-d -) -_AnyTD64Item = TypeVar("_AnyTD64Item", dt.timedelta, int, None, dt.timedelta | int | None) -_AnyDT64Arg = TypeVar("_AnyDT64Arg", dt.datetime, dt.date, None) -_AnyDT64Item = TypeVar("_AnyDT64Item", dt.datetime, dt.date, int, None, dt.date, int | None) -_AnyDateOrTime = TypeVar("_AnyDateOrTime", dt.date, dt.datetime, dt.timedelta) - -### Type parameters (for internal use only) +### Type parameters (with defaults); for internal use only -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_T_contra = TypeVar("_T_contra", contravariant=True) -_RealT_co = TypeVar("_RealT_co", covariant=True) -_ImagT_co = TypeVar("_ImagT_co", covariant=True) - -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) -_FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) - -_ArrayT = TypeVar("_ArrayT", bound=ndarray) _ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True) -_BoolArrayT = TypeVar("_BoolArrayT", bound=NDArray[np.bool]) -_IntegerArrayT = TypeVar("_IntegerArrayT", bound=NDArray[integer]) -_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[np.bool | integer | object_]) -_FloatingArrayT = TypeVar("_FloatingArrayT", bound=NDArray[floating]) -_FloatingTimedeltaArrayT = TypeVar("_FloatingTimedeltaArrayT", bound=NDArray[floating | timedelta64]) -_ComplexFloatingArrayT = TypeVar("_ComplexFloatingArrayT", bound=NDArray[complexfloating]) -_InexactArrayT = TypeVar("_InexactArrayT", bound=NDArray[inexact]) -_InexactTimedeltaArrayT = TypeVar("_InexactTimedeltaArrayT", bound=NDArray[inexact | timedelta64]) -_NumberArrayT = TypeVar("_NumberArrayT", bound=NDArray[number]) -_NumberCharacterArrayT = TypeVar("_NumberCharacterArrayT", bound=ndarray[Any, dtype[number | character] | dtypes.StringDType]) -_TimedeltaArrayT = TypeVar("_TimedeltaArrayT", bound=NDArray[timedelta64]) -_TimeArrayT = TypeVar("_TimeArrayT", bound=NDArray[datetime64 | timedelta64]) -_ObjectArrayT = TypeVar("_ObjectArrayT", bound=NDArray[object_]) -_BytesArrayT = TypeVar("_BytesArrayT", bound=NDArray[bytes_]) -_StringArrayT = TypeVar("_StringArrayT", bound=ndarray[Any, dtype[str_] | dtypes.StringDType]) -_RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelta64 | np.bool | object_]) -_NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) - -_ShapeT = TypeVar("_ShapeT", bound=_Shape) -_Shape1T = TypeVar("_Shape1T", bound=tuple[int, *tuple[int, ...]]) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) -_1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | (1, 1) | (1, 1, 1) | ... - -_ScalarT = TypeVar("_ScalarT", bound=generic) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) -_NumberT = TypeVar("_NumberT", bound=number) -_InexactT = TypeVar("_InexactT", bound=inexact) -_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) -_IntegerT = TypeVar("_IntegerT", bound=integer) -_NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64) -_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] -_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] -_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) # pyright: ignore[reportDeprecated] +# intentionally invariant +_NBitT = TypeVar("_NBitT", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBitT1 = TypeVar("_NBitT1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBitT2 = TypeVar("_NBitT2", bound=NBitBase, default=_NBitT1) # pyright: ignore[reportDeprecated] _ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) -_BoolItemT = TypeVar("_BoolItemT", bound=builtins.bool) _BoolItemT_co = TypeVar("_BoolItemT_co", bound=builtins.bool, default=builtins.bool, covariant=True) _NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=int | float | complex, covariant=True) _InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=float | complex, covariant=True) @@ -808,71 +732,74 @@ _FlexibleItemT_co = TypeVar( covariant=True, ) _CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_CharLike_co, covariant=True) -_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=Any, covariant=True) -_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=Any, covariant=True) -_TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit) -_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[integer | np.bool]) +_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=_TD64Item, default=Any, covariant=True) +_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=_DT64Item, default=Any, covariant=True) ### Type Aliases (for internal use only) -_Falsy: TypeAlias = L[False, 0] | np.bool[L[False]] -_Truthy: TypeAlias = L[True, 1] | np.bool[L[True]] +type _Falsy = L[False, 0] | np.bool[L[False]] +type _Truthy = L[True, 1] | np.bool[L[True]] + +type _1D = tuple[int] +type _2D = tuple[int, int] +type _2Tuple[T] = tuple[T, T] -_1D: TypeAlias = tuple[int] -_2D: TypeAlias = tuple[int, int] -_2Tuple: TypeAlias = tuple[_T, _T] +type _ArrayUInt_co = NDArray[unsignedinteger | bool_] +type _ArrayInt_co = NDArray[integer | bool_] +type _ArrayFloat64_co = NDArray[floating[_64Bit] | float32 | float16 | integer | bool_] +type _ArrayFloat_co = NDArray[floating | integer | bool_] +type _ArrayComplex128_co = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | bool_] +type _ArrayComplex_co = NDArray[inexact | integer | bool_] +type _ArrayNumber_co = NDArray[number | bool_] +type _ArrayTD64_co = NDArray[timedelta64 | integer | bool_] -_ArrayUInt_co: TypeAlias = NDArray[unsignedinteger | np.bool] -_ArrayInt_co: TypeAlias = NDArray[integer | np.bool] -_ArrayFloat64_co: TypeAlias = NDArray[floating[_64Bit] | float32 | float16 | integer | np.bool] -_ArrayFloat_co: TypeAlias = NDArray[floating | integer | np.bool] -_ArrayComplex128_co: TypeAlias = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] -_ArrayComplex_co: TypeAlias = NDArray[inexact | integer | np.bool] -_ArrayNumber_co: TypeAlias = NDArray[number | np.bool] -_ArrayTD64_co: TypeAlias = NDArray[timedelta64 | integer | np.bool] +type _ArrayString = ndarray[_AnyShape, dtype[str_] | dtypes.StringDType] +type _ArrayNumeric = NDArray[number | timedelta64 | object_] -_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer | np.bool -_Complex64_co: TypeAlias = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool -_Complex128_co: TypeAlias = complex | number[_64Bit] | _Complex64_co +type _ScalarNotObject = bool_ | number | flexible | datetime64 | timedelta64 -_ToIndex: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None -_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] +type _Float64_co = float | floating[_64Bit] | float32 | float16 | integer | np.bool +type _Complex64_co = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool +type _Complex128_co = complex | number[_64Bit] | _Complex64_co -_UnsignedIntegerCType: TypeAlias = type[ +type _ToIndex = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +type _ToIndices = _ToIndex | tuple[_ToIndex, ...] + +type _UnsignedIntegerCType = type[ ct.c_uint8 | ct.c_uint16 | ct.c_uint32 | ct.c_uint64 | ct.c_ushort | ct.c_uint | ct.c_ulong | ct.c_ulonglong | ct.c_size_t | ct.c_void_p ] # fmt: skip -_SignedIntegerCType: TypeAlias = type[ +type _SignedIntegerCType = type[ ct.c_int8 | ct.c_int16 | ct.c_int32 | ct.c_int64 | ct.c_short | ct.c_int | ct.c_long | ct.c_longlong | ct.c_ssize_t ] # fmt: skip -_FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] -_IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType +type _FloatingCType = type[ct.c_float | ct.c_double | ct.c_longdouble] +type _IntegerCType = _UnsignedIntegerCType | _SignedIntegerCType # some commonly used builtin types that are known to result in a # `dtype[object_]`, when their *type* is passed to the `dtype` constructor # NOTE: `builtins.object` should not be included here -_BuiltinObjectLike: TypeAlias = ( +type _BuiltinObjectLike = ( slice | Decimal | Fraction | UUID | dt.date | dt.time | dt.timedelta | dt.tzinfo | tuple[Any, ...] | list[Any] | set[Any] | frozenset[Any] | dict[Any, Any] ) # fmt: skip # Introduce an alias for `dtype` to avoid naming conflicts. -_dtype: TypeAlias = dtype[_ScalarT] +type _dtype[ScalarT: generic] = dtype[ScalarT] -_ByteOrderChar: TypeAlias = L["<", ">", "=", "|"] +type _ByteOrderChar = L["<", ">", "=", "|"] # can be anything, is case-insensitive, and only the first character matters -_ByteOrder: TypeAlias = L[ +type _ByteOrder = L[ "S", # swap the current order (default) "<", "L", "little", # little-endian ">", "B", "big", # big endian "=", "N", "native", # native order "|", "I", # ignore ] # fmt: skip -_DTypeKind: TypeAlias = L[ +type _DTypeKind = L[ "b", # boolean "i", # signed integer "u", # unsigned integer @@ -886,7 +813,7 @@ _DTypeKind: TypeAlias = L[ "V", # void "T", # unicode-string (variable-width) ] -_DTypeChar: TypeAlias = L[ +type _DTypeChar = L[ "?", # bool "b", # byte "B", # ubyte @@ -915,7 +842,7 @@ _DTypeChar: TypeAlias = L[ "c", # bytes_ (S1) "T", # StringDType ] -_DTypeNum: TypeAlias = L[ +type _DTypeNum = L[ 0, # bool 1, # byte 2, # ubyte @@ -944,35 +871,35 @@ _DTypeNum: TypeAlias = L[ 256, # user-defined 2056, # StringDType ] -_DTypeBuiltinKind: TypeAlias = L[0, 1, 2] +type _DTypeBuiltinKind = L[0, 1, 2] -_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12", "2024.12"] +type _ArrayAPIVersion = L["2021.12", "2022.12", "2023.12", "2024.12"] -_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] +type _CastingKind = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] -_OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None -_OrderACF: TypeAlias = L["A", "C", "F"] | None -_OrderCF: TypeAlias = L["C", "F"] | None # noqa: PYI047 +type _OrderKACF = L["K", "A", "C", "F"] | None +type _OrderACF = L["A", "C", "F"] | None +type _OrderCF = L["C", "F"] | None # noqa: PYI047 -_ModeKind: TypeAlias = L["raise", "wrap", "clip"] -_PartitionKind: TypeAlias = L["introselect"] +type _ModeKind = L["raise", "wrap", "clip"] +type _PartitionKind = L["introselect"] # in practice, only the first case-insensitive character is considered (so e.g. # "QuantumSort3000" will be interpreted as quicksort). -_SortKind: TypeAlias = L[ +type _SortKind = L[ "Q", "quick", "quicksort", "M", "merge", "mergesort", "H", "heap", "heapsort", "S", "stable", "stablesort", -] -_SortSide: TypeAlias = L["left", "right"] +] # fmt: skip +type _SortSide = L["left", "right"] -_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co -_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None -_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co +type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToComplex = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +type _ConvertibleToDT64 = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None -_NDIterFlagsKind: TypeAlias = L[ +type _NDIterFlagsKind = L[ "buffered", "c_index", "copy_if_overlap", @@ -987,7 +914,7 @@ _NDIterFlagsKind: TypeAlias = L[ "reduce_ok", "zerosize_ok", ] -_NDIterFlagsOp: TypeAlias = L[ +type _NDIterFlagsOp = L[ "aligned", "allocate", "arraymask", @@ -1005,27 +932,30 @@ _NDIterFlagsOp: TypeAlias = L[ "writemasked", ] -_MemMapModeKind: TypeAlias = L[ +type _MemMapModeKind = L[ "readonly", "r", "copyonwrite", "c", "readwrite", "r+", "write", "w+", -] +] # fmt: skip + +type _DT64Item = dt.date | int | None +type _TD64Item = dt.timedelta | int | None -_DT64Date: TypeAlias = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] -_DT64Now: TypeAlias = L["NOW", "now", b"NOW", b"now"] -_NaTValue: TypeAlias = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] - -_MonthUnit: TypeAlias = L["Y", "M", b"Y", b"M"] -_DayUnit: TypeAlias = L["W", "D", b"W", b"D"] -_DateUnit: TypeAlias = L[_MonthUnit, _DayUnit] -_NativeTimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "μs", b"h", b"m", b"s", b"ms", b"us"] -_IntTimeUnit: TypeAlias = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] -_TimeUnit: TypeAlias = L[_NativeTimeUnit, _IntTimeUnit] -_NativeTD64Unit: TypeAlias = L[_DayUnit, _NativeTimeUnit] -_IntTD64Unit: TypeAlias = L[_MonthUnit, _IntTimeUnit] -_TD64Unit: TypeAlias = L[_DateUnit, _TimeUnit] -_TimeUnitSpec: TypeAlias = _TD64UnitT | tuple[_TD64UnitT, SupportsIndex] +type _DT64Date = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] +type _DT64Now = L["NOW", "now", b"NOW", b"now"] +type _NaTValue = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] + +type _MonthUnit = L["Y", "M", b"Y", b"M"] +type _DayUnit = L["W", "D", b"W", b"D"] +type _DateUnit = L[_MonthUnit, _DayUnit] +type _NativeTimeUnit = L["h", "m", "s", "ms", "us", "μs", b"h", b"m", b"s", b"ms", b"us"] +type _IntTimeUnit = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] +type _TimeUnit = L[_NativeTimeUnit, _IntTimeUnit] +type _NativeTD64Unit = L[_DayUnit, _NativeTimeUnit] +type _IntTD64Unit = L[_MonthUnit, _IntTimeUnit] +type _TD64Unit = L[_DateUnit, _TimeUnit] +type _TimeUnitSpec[UnitT: _TD64Unit] = _TD64Unit | tuple[_TD64Unit, SupportsIndex] ### TypedDict's (for internal use only) @@ -1070,30 +1000,30 @@ class _SupportsFileMethods(SupportsFlush, Protocol): class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... @type_check_only -class _SupportsDLPack(Protocol[_T_contra]): - def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... +class _SupportsDLPack[StreamT](Protocol): + def __dlpack__(self, /, *, stream: StreamT | None = None) -> CapsuleType: ... @type_check_only -class _HasDType(Protocol[_T_co]): +class _HasDType[DTypeT](Protocol): # DTypeT bound was intentionally left out @property - def dtype(self, /) -> _T_co: ... + def dtype(self, /) -> DTypeT: ... @type_check_only -class _HasRealAndImag(Protocol[_RealT_co, _ImagT_co]): +class _HasRealAndImag[RealT, ImagT](Protocol): @property - def real(self, /) -> _RealT_co: ... + def real(self, /) -> RealT: ... @property - def imag(self, /) -> _ImagT_co: ... + def imag(self, /) -> ImagT: ... @type_check_only -class _HasTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): +class _HasTypeWithRealAndImag[RealT, ImagT](Protocol): @property - def type(self, /) -> type[_HasRealAndImag[_RealT_co, _ImagT_co]]: ... + def type(self, /) -> type[_HasRealAndImag[RealT, ImagT]]: ... @type_check_only -class _HasDTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): +class _HasDTypeWithRealAndImag[RealT, ImagT](Protocol): @property - def dtype(self, /) -> _HasTypeWithRealAndImag[_RealT_co, _ImagT_co]: ... + def dtype(self, /) -> _HasTypeWithRealAndImag[RealT, ImagT]: ... @type_check_only class _HasDateAttributes(Protocol): @@ -1167,7 +1097,7 @@ class _DTypeMeta(type): def _legacy(cls, /) -> bool: ... @final -class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): +class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 names: tuple[builtins.str, ...] | None def __hash__(self) -> int: ... @@ -1183,16 +1113,16 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): ) -> dtype[float64]: ... # Overload for `dtype` instances, scalar types, and instances that have a - # `dtype: dtype[_ScalarT]` attribute + # `dtype: dtype[ScalarT]` attribute @overload - def __new__( + def __new__[ScalarT: generic]( cls, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], align: builtins.bool = False, copy: builtins.bool = False, *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[_ScalarT]: ... + ) -> dtype[ScalarT]: ... # Builtin types # @@ -1588,9 +1518,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes @overload - def __mul__(self: _DTypeT, value: L[1], /) -> _DTypeT: ... + def __mul__[DTypeT: dtype](self: DTypeT, value: L[1], /) -> DTypeT: ... @overload - def __mul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + def __mul__[FlexibleDTypeT: dtype[flexible]](self: FlexibleDTypeT, value: SupportsIndex, /) -> FlexibleDTypeT: ... @overload def __mul__(self, value: SupportsIndex, /) -> dtype[void]: ... @@ -1598,7 +1528,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # literals as of mypy 0.902. Set the return-type to `dtype` for # now for non-flexible dtypes. @overload - def __rmul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + def __rmul__[FlexibleDTypeT: dtype[flexible]](self: FlexibleDTypeT, value: SupportsIndex, /) -> FlexibleDTypeT: ... @overload def __rmul__(self, value: SupportsIndex, /) -> dtype: ... @@ -1664,38 +1594,38 @@ class flatiter(Generic[_ArrayT_co]): @property def base(self, /) -> _ArrayT_co: ... @property - def coords(self: flatiter[ndarray[_ShapeT]], /) -> _ShapeT: ... + def coords[ShapeT: _Shape](self: flatiter[ndarray[ShapeT]], /) -> ShapeT: ... @property def index(self, /) -> int: ... # iteration def __len__(self, /) -> int: ... def __iter__(self, /) -> Self: ... - def __next__(self: flatiter[NDArray[_ScalarT]], /) -> _ScalarT: ... + def __next__[ScalarT: generic](self: flatiter[NDArray[ScalarT]], /) -> ScalarT: ... # indexing @overload # nd: _[()] def __getitem__(self, key: tuple[()], /) -> _ArrayT_co: ... @overload # 0d; _[] - def __getitem__(self: flatiter[NDArray[_ScalarT]], key: int | integer, /) -> _ScalarT: ... + def __getitem__[ScalarT: generic](self: flatiter[NDArray[ScalarT]], key: int | integer, /) -> ScalarT: ... @overload # 1d; _[[*]], _[:], _[...] - def __getitem__( - self: flatiter[ndarray[Any, _DTypeT]], + def __getitem__[DTypeT: dtype]( + self: flatiter[ndarray[Any, DTypeT]], key: list[int] | slice | EllipsisType | flatiter[NDArray[integer]], /, - ) -> ndarray[tuple[int], _DTypeT]: ... + ) -> ndarray[tuple[int], DTypeT]: ... @overload # 2d; _[[*[*]]] - def __getitem__( - self: flatiter[ndarray[Any, _DTypeT]], + def __getitem__[DTypeT: dtype]( + self: flatiter[ndarray[Any, DTypeT]], key: list[list[int]], /, - ) -> ndarray[tuple[int, int], _DTypeT]: ... + ) -> ndarray[tuple[int, int], DTypeT]: ... @overload # ?d - def __getitem__( - self: flatiter[ndarray[Any, _DTypeT]], + def __getitem__[DTypeT: dtype]( + self: flatiter[ndarray[Any, DTypeT]], key: NDArray[integer] | _NestedSequence[int], /, - ) -> ndarray[_AnyShape, _DTypeT]: ... + ) -> ndarray[_AnyShape, DTypeT]: ... # NOTE: `__setitem__` operates via `unsafe` casting rules, and can thus accept any # type accepted by the relevant underlying `np.generic` constructor, which isn't @@ -1704,16 +1634,16 @@ class flatiter(Generic[_ArrayT_co]): # NOTE: `dtype` and `copy` are no-ops at runtime, so we don't support them here to # avoid confusion - def __array__( - self: flatiter[ndarray[Any, _DTypeT]], + def __array__[DTypeT: dtype]( + self: flatiter[ndarray[Any, DTypeT]], dtype: None = None, /, *, copy: None = None, - ) -> ndarray[tuple[int], _DTypeT]: ... + ) -> ndarray[tuple[int], DTypeT]: ... # This returns a flat copy of the underlying array, not of the iterator itself - def copy(self: flatiter[ndarray[Any, _DTypeT]], /) -> ndarray[tuple[int], _DTypeT]: ... + def copy[DTypeT: dtype](self: flatiter[ndarray[Any, DTypeT]], /) -> ndarray[tuple[int], DTypeT]: ... @type_check_only class _ArrayOrScalarCommon: @@ -1802,31 +1732,39 @@ class _ArrayOrScalarCommon: @overload # axis=index, out=None (default) def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... @overload # axis=index, out=ndarray - def argmax(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmax[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: builtins.bool = False + ) -> OutT: ... @overload - def argmax(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmax[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: builtins.bool = False + ) -> OutT: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... @overload # axis=index, out=None (default) def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... @overload # axis=index, out=ndarray - def argmin(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmin[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: builtins.bool = False + ) -> OutT: ... @overload - def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmin[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: builtins.bool = False + ) -> OutT: ... # Keep in sync with `MaskedArray.round` @overload # out=None (default) def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... @overload # out=ndarray - def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: ndarray](self, /, decimals: SupportsIndex, out: ArrayT) -> ArrayT: ... @overload - def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: ndarray](self, /, decimals: SupportsIndex = 0, *, out: ArrayT) -> ArrayT: ... @overload # out=None (default) def choose(self, /, choices: ArrayLike, out: None = None, mode: _ModeKind = "raise") -> NDArray[Any]: ... @overload # out=ndarray - def choose(self, /, choices: ArrayLike, out: _ArrayT, mode: _ModeKind = "raise") -> _ArrayT: ... + def choose[ArrayT: ndarray](self, /, choices: ArrayLike, out: ArrayT, mode: _ModeKind = "raise") -> ArrayT: ... # TODO: Annotate kwargs with an unpacked `TypedDict` @overload # out: None (default) @@ -1836,36 +1774,42 @@ class _ArrayOrScalarCommon: @overload def clip(self, /, min: None = None, *, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... @overload # out: ndarray - def clip(self, /, min: ArrayLike, max: ArrayLike | None, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: ArrayLike, max: ArrayLike | None, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def clip(self, /, min: None, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: None, max: ArrayLike, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def clip(self, /, min: None = None, *, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: None = None, *, max: ArrayLike, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None) -> NDArray[Any]: ... @overload - def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT) -> _ArrayT: ... + def compress[ArrayT: ndarray](self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: ArrayT) -> ArrayT: ... @overload - def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ... + def compress[ArrayT: ndarray]( + self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: ArrayT + ) -> ArrayT: ... # Keep in sync with `MaskedArray.cumprod` @overload # out: None (default) def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray - def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def cumprod[ArrayT: ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def cumprod[ArrayT: ndarray]( + self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: ArrayT + ) -> ArrayT: ... # Keep in sync with `MaskedArray.cumsum` @overload # out: None (default) def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray - def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: ndarray]( + self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: ArrayT + ) -> ArrayT: ... @overload def max( @@ -1879,27 +1823,27 @@ class _ArrayOrScalarCommon: where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def max( + def max[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, *, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def max( + def max[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def min( @@ -1913,27 +1857,27 @@ class _ArrayOrScalarCommon: where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def min( + def min[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, *, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def min( + def min[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def sum( @@ -1948,29 +1892,29 @@ class _ArrayOrScalarCommon: where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def sum( + def sum[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, *, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def sum( + def sum[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def prod( @@ -1985,29 +1929,29 @@ class _ArrayOrScalarCommon: where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def prod( + def prod[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, *, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def prod( + def prod[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def mean( @@ -2020,27 +1964,27 @@ class _ArrayOrScalarCommon: where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def mean( + def mean[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, *, keepdims: builtins.bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def mean( + def mean[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: builtins.bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def std( @@ -2056,31 +2000,31 @@ class _ArrayOrScalarCommon: correction: float | _NoValueType = ..., ) -> Any: ... @overload - def std( + def std[ArrayT: ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, *, keepdims: builtins.bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def std( + def std[ArrayT: ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: builtins.bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def var( @@ -2096,31 +2040,31 @@ class _ArrayOrScalarCommon: correction: float | _NoValueType = ..., ) -> Any: ... @overload - def var( + def var[ArrayT: ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, *, keepdims: builtins.bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def var( + def var[ArrayT: ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: builtins.bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @@ -2130,12 +2074,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def ndim(self) -> int: ... @property def size(self) -> int: ... + @property - def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + def real[ScalarT: generic](self: _HasDTypeWithRealAndImag[ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @real.setter def real(self, value: ArrayLike, /) -> None: ... + @property - def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + def imag[ScalarT: generic](self: _HasDTypeWithRealAndImag[object, ScalarT], /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @imag.setter def imag(self, value: ArrayLike, /) -> None: ... @@ -2143,21 +2089,20 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): cls, shape: _ShapeLike, dtype: DTypeLike | None = ..., - buffer: _SupportsBuffer | None = ..., + buffer: Buffer | None = ..., offset: SupportsIndex = ..., strides: _ShapeLike | None = ..., order: _OrderKACF = ..., ) -> Self: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + def __buffer__(self, flags: int, /) -> memoryview: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, dtype: _DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT]: ... + def __array__[DTypeT: dtype](self, dtype: DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, DTypeT]: ... def __array_ufunc__( self, @@ -2180,13 +2125,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # grant subclasses a bit more flexibility def __array_finalize__(self, obj: NDArray[Any] | None, /) -> None: ... - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, DTypeT: dtype]( self, - array: ndarray[_ShapeT, _DTypeT], + array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[Any, ...], int] | None = ..., return_scalar: builtins.bool = ..., /, - ) -> ndarray[_ShapeT, _DTypeT]: ... + ) -> ndarray[ShapeT, DTypeT]: ... # Keep in sync with `MaskedArray.__getitem__` @overload @@ -2270,7 +2215,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def flat(self) -> flatiter[Self]: ... @overload # use the same output type as that of the underlying `generic` - def item(self: NDArray[generic[_T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> _T: ... + def item[T](self: NDArray[generic[T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> T: ... @overload # special casing for `StringDType`, which has no scalar type def item( self: ndarray[Any, dtypes.StringDType], @@ -2281,15 +2226,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with `ma.MaskedArray.tolist` @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` - def tolist(self: ndarray[tuple[Never], dtype[generic[_T]]], /) -> Any: ... + def tolist[T](self: ndarray[tuple[Never], dtype[generic[T]]], /) -> Any: ... @overload - def tolist(self: ndarray[tuple[()], dtype[generic[_T]]], /) -> _T: ... + def tolist[T](self: ndarray[tuple[()], dtype[generic[T]]], /) -> T: ... @overload - def tolist(self: ndarray[tuple[int], dtype[generic[_T]]], /) -> list[_T]: ... + def tolist[T](self: ndarray[tuple[int], dtype[generic[T]]], /) -> list[T]: ... @overload - def tolist(self: ndarray[tuple[int, int], dtype[generic[_T]]], /) -> list[list[_T]]: ... + def tolist[T](self: ndarray[tuple[int, int], dtype[generic[T]]], /) -> list[list[T]]: ... @overload - def tolist(self: ndarray[tuple[int, int, int], dtype[generic[_T]]], /) -> list[list[list[_T]]]: ... + def tolist[T](self: ndarray[tuple[int, int, int], dtype[generic[T]]], /) -> list[list[list[T]]]: ... @overload def tolist(self, /) -> Any: ... @@ -2331,23 +2276,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): where: _ArrayLikeBool_co = True, ) -> np.bool | NDArray[np.bool]: ... @overload - def all( + def all[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None, - out: _ArrayT, + out: ArrayT, keepdims: SupportsIndex = False, *, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def all( + def all[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: SupportsIndex = False, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def any( @@ -2368,23 +2313,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): where: _ArrayLikeBool_co = True, ) -> np.bool | NDArray[np.bool]: ... @overload - def any( + def any[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None, - out: _ArrayT, + out: ArrayT, keepdims: SupportsIndex = False, *, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def any( + def any[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: SupportsIndex = False, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload @@ -2441,7 +2386,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def dot(self, b: ArrayLike, /, out: None = None) -> Any: ... @overload - def dot(self, b: ArrayLike, /, out: _ArrayT) -> _ArrayT: ... + def dot[ArrayT: ndarray](self, b: ArrayLike, /, out: ArrayT) -> ArrayT: ... # `nonzero()` raises for 0d arrays/generics def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... @@ -2485,7 +2430,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): out: None = None, ) -> Any: ... @overload - def trace( + def trace[ArrayT: ndarray]( self, # >= 2D array /, offset: SupportsIndex = 0, @@ -2493,28 +2438,28 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis2: SupportsIndex = 1, dtype: DTypeLike | None = None, *, - out: _ArrayT, - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload - def trace( + def trace[ArrayT: ndarray]( self, # >= 2D array /, offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, dtype: DTypeLike | None, - out: _ArrayT, - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload - def take( - self: NDArray[_ScalarT], + def take[ScalarT: generic]( + self: NDArray[ScalarT], indices: _IntLike_co, /, axis: SupportsIndex | None = ..., out: None = None, mode: _ModeKind = ..., - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def take( self, @@ -2525,24 +2470,24 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): mode: _ModeKind = ..., ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, /, axis: SupportsIndex | None = ..., *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, /, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # keep in sync with `ma.MaskedArray.repeat` @overload @@ -2568,14 +2513,26 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): copy: builtins.bool | None = None, ) -> ndarray[tuple[()], _DTypeT_co]: ... @overload # (() | (int) | (int, int) | ....) # up to 8-d - def reshape( + def reshape[ + AnyShapeT: ( + tuple[()], # 0d + tuple[int], # 1d + tuple[int, int], # 2d + tuple[int, int, int], # 3d + tuple[int, int, int, int], # 4d + tuple[int, int, int, int, int], # 5d + tuple[int, int, int, int, int, int], # 6d + tuple[int, int, int, int, int, int, int], # 7d + tuple[int, int, int, int, int, int, int, int], # 8d + ) + ]( self, - shape: _AnyShapeT, + shape: AnyShapeT, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[_AnyShapeT, _DTypeT_co]: ... + ) -> ndarray[AnyShapeT, _DTypeT_co]: ... @overload # (index) def reshape( self, @@ -2638,14 +2595,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload - def astype( + def astype[ScalarT: generic]( self, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., casting: _CastingKind = ..., subok: builtins.bool = ..., copy: builtins.bool | _CopyMode = ..., - ) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def astype( self, @@ -2660,26 +2617,26 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # () def view(self, /) -> Self: ... @overload # (dtype: T) - def view(self, /, dtype: _DTypeT | _HasDType[_DTypeT]) -> ndarray[_ShapeT_co, _DTypeT]: ... + def view[DTypeT: dtype](self, /, dtype: DTypeT | _HasDType[DTypeT]) -> ndarray[_ShapeT_co, DTypeT]: ... @overload # (dtype: dtype[T]) - def view(self, /, dtype: _DTypeLike[_ScalarT]) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + def view[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT]) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload # (type: T) - def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... + def view[ArrayT: ndarray](self, /, *, type: type[ArrayT]) -> ArrayT: ... @overload # (_: T) - def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... + def view[ArrayT: ndarray](self, /, dtype: type[ArrayT]) -> ArrayT: ... @overload # (dtype: ?) def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ... @overload # (dtype: ?, type: T) - def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... + def view[ArrayT: ndarray](self, /, dtype: DTypeLike, type: type[ArrayT]) -> ArrayT: ... def setfield(self, val: ArrayLike, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... @overload - def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... + def getfield[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], offset: SupportsIndex = 0) -> NDArray[ScalarT]: ... @overload def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... def __index__(self: NDArray[integer], /) -> int: ... - def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... + def __complex__(self: NDArray[bool_ | number | object_], /) -> complex: ... def __len__(self) -> int: ... def __contains__(self, value: object, /) -> builtins.bool: ... @@ -2692,11 +2649,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never], Any], / ) -> Iterator[Any]: ... @overload # == 1-d & dtype[T \ object_] - def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... + def __iter__[ScalarT: _ScalarNotObject](self: ndarray[tuple[int], dtype[ScalarT]], /) -> Iterator[ScalarT]: ... @overload # == 1-d & StringDType def __iter__(self: ndarray[tuple[int], dtypes.StringDType], /) -> Iterator[str]: ... @overload # >= 2-d - def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], _DTypeT], /) -> Iterator[ndarray[_AnyShape, _DTypeT]]: ... + def __iter__[DTypeT: dtype]( + self: ndarray[tuple[int, int, *tuple[int, ...]], DTypeT], / + ) -> Iterator[ndarray[_AnyShape, DTypeT]]: ... @overload # ?-d def __iter__(self, /) -> Iterator[Any]: ... @@ -2710,9 +2669,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... @overload - def __lt__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... @overload def __lt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... @overload @@ -2728,9 +2685,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... @overload - def __le__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __le__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... @overload def __le__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... @overload @@ -2746,9 +2701,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... @overload - def __gt__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... @overload def __gt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... @overload @@ -2764,9 +2717,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... @overload - def __ge__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... @overload def __ge__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... @overload @@ -2776,31 +2727,33 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed # @overload - # def __abs__(self: ndarray[_ShapeT, dtypes.Complex64DType], /) -> ndarray[_ShapeT, dtypes.Float32DType]: ... + # def __abs__(self: ndarray[ShapeT, dtypes.Complex64DType], /) -> ndarray[ShapeT, dtypes.Float32DType]: ... # @overload - # def __abs__(self: ndarray[_ShapeT, dtypes.Complex128DType], /) -> ndarray[_ShapeT, dtypes.Float64DType]: ... + # def __abs__(self: ndarray[ShapeT, dtypes.Complex128DType], /) -> ndarray[ShapeT, dtypes.Float64DType]: ... # @overload - # def __abs__(self: ndarray[_ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[_ShapeT, dtypes.LongDoubleDType]: ... + # def __abs__(self: ndarray[ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[ShapeT, dtypes.LongDoubleDType]: ... # @overload - # def __abs__(self: ndarray[_ShapeT, dtype[complex128]], /) -> ndarray[_ShapeT, dtype[float64]]: ... + # def __abs__(self: ndarray[ShapeT, dtype[complex128]], /) -> ndarray[ShapeT, dtype[float64]]: ... @overload - def __abs__(self: ndarray[_ShapeT, dtype[complexfloating[_NBit]]], /) -> ndarray[_ShapeT, dtype[floating[_NBit]]]: ... + def __abs__[ShapeT: _Shape, NBitT: NBitBase]( + self: ndarray[ShapeT, dtype[complexfloating[NBitT]]], / + ) -> ndarray[ShapeT, dtype[floating[NBitT]]]: ... @overload - def __abs__(self: _RealArrayT, /) -> _RealArrayT: ... + def __abs__[ArrayT: NDArray[bool_ | integer | floating | timedelta64 | object_]](self: ArrayT, /) -> ArrayT: ... - def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 - def __neg__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 - def __pos__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + def __invert__[ArrayT: NDArray[bool_ | integer | object_]](self: ArrayT, /) -> ArrayT: ... # noqa: PYI019 + def __neg__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... # noqa: PYI019 + def __pos__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... # noqa: PYI019 # Binary ops # TODO: Support the "1d @ 1d -> scalar" case @overload - def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __matmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __matmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2825,11 +2778,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload # signature equivalent to __matmul__ - def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __rmatmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __rmatmul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2854,13 +2807,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __mod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], other: int | np.bool, / + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... + def __mod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... + def __mod__[ScalarT: floating | integer](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2879,13 +2834,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload # signature equivalent to __mod__ - def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __rmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], other: int | np.bool, / + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... + def __rmod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... + def __rmod__[ScalarT: floating | integer](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2904,13 +2861,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + def __divmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], rhs: int | np.bool, / + ) -> _2Tuple[ndarray[_ShapeT_co, dtype[ScalarT]]]: ... @overload - def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... + def __divmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], rhs: _ArrayLikeBool_co, / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... + def __divmod__[ScalarT: floating | integer]( + self: NDArray[np.bool], rhs: _ArrayLike[ScalarT], / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload @@ -2925,13 +2888,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __divmod__(self: NDArray[timedelta64], rhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload # signature equivalent to __divmod__ - def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], lhs: int | np.bool, / + ) -> _2Tuple[ndarray[_ShapeT_co, dtype[ScalarT]]]: ... @overload - def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], lhs: _ArrayLikeBool_co, / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[np.bool], lhs: _ArrayLike[ScalarT], / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload @@ -2947,13 +2916,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__add__` @overload - def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __add__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __add__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __add__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2995,13 +2964,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__radd__` @overload # signature equivalent to __add__ - def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __radd__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __radd__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __radd__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3043,13 +3012,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__sub__` @overload - def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __sub__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __sub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __sub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... # type: ignore[overload-overlap] @overload def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3081,13 +3050,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rsub__` @overload - def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __rsub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... # type: ignore[overload-overlap] @overload def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3119,13 +3088,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__mul__` @overload - def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __mul__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __mul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __mul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3161,13 +3130,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rmul__` @overload # signature equivalent to __mul__ - def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __rmul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3265,13 +3234,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__floordiv__` @overload - def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __floordiv__[ScalarT: integer | floating]( + self: NDArray[ScalarT], other: int | np.bool, / + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... + def __floordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... + def __floordiv__[ScalarT: integer | floating](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3295,13 +3266,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rfloordiv__` @overload - def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __rfloordiv__[ScalarT: integer | floating]( + self: NDArray[ScalarT], other: int | np.bool, / + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... + def __rfloordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... + def __rfloordiv__[ScalarT: integer | floating](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3323,13 +3296,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__pow__` @overload - def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __pow__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... + def __pow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... + def __pow__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3357,13 +3330,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rpow__` @overload - def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... + def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... + def __rpow__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3510,136 +3483,138 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # += @overload # type: ignore[misc] - def __iadd__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __iadd__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __iadd__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __iadd__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __iadd__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + def __iadd__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __iadd__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + def __iadd__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __iadd__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... + def __iadd__[ArrayT: NDArray[datetime64 | timedelta64]](self: ArrayT, other: _ArrayLikeTD64_co, /) -> ArrayT: ... @overload - def __iadd__(self: _BytesArrayT, other: _ArrayLikeBytes_co, /) -> _BytesArrayT: ... + def __iadd__[ArrayT: NDArray[bytes_]](self: ArrayT, other: _ArrayLikeBytes_co, /) -> ArrayT: ... @overload - def __iadd__(self: _StringArrayT, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> _StringArrayT: ... + def __iadd__[ArrayT: _ArrayString](self: ArrayT, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> ArrayT: ... @overload - def __iadd__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __iadd__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # -= @overload # type: ignore[misc] - def __isub__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __isub__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __isub__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + def __isub__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __isub__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + def __isub__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __isub__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... + def __isub__[ArrayT: NDArray[datetime64 | timedelta64]](self: ArrayT, other: _ArrayLikeTD64_co, /) -> ArrayT: ... @overload - def __isub__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __isub__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # *= @overload # type: ignore[misc] - def __imul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __imul__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... + @overload + def __imul__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __imul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __imul__[ArrayT: NDArray[inexact | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imul__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... + def __imul__[ArrayT: NDArray[number | character]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imul__(self: _NumberCharacterArrayT, other: _ArrayLikeInt_co, /) -> _NumberCharacterArrayT: ... + def __imul__[ArrayT: _ArrayString](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __imul__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # @= @overload # type: ignore[misc] - def __imatmul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __imatmul__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __imatmul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __imatmul__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __imatmul__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + def __imatmul__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imatmul__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + def __imatmul__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imatmul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __imatmul__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # **= @overload # type: ignore[misc] - def __ipow__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __ipow__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __ipow__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + def __ipow__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __ipow__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + def __ipow__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ipow__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ipow__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # /= @overload # type: ignore[misc] - def __itruediv__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __itruediv__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __itruediv__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... + def __itruediv__[ArrayT: NDArray[inexact | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __itruediv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __itruediv__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # //= # keep in sync with `__imod__` @overload # type: ignore[misc] - def __ifloordiv__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __ifloordiv__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ifloordiv__(self: _FloatingTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingTimedeltaArrayT: ... + def __ifloordiv__[ArrayT: NDArray[floating | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __ifloordiv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ifloordiv__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # %= # keep in sync with `__ifloordiv__` @overload # type: ignore[misc] - def __imod__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __imod__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imod__(self: _FloatingArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingArrayT: ... + def __imod__[ArrayT: NDArray[floating]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imod__(self: _TimedeltaArrayT, other: _ArrayLike[timedelta64], /) -> _TimedeltaArrayT: ... + def __imod__[ArrayT: NDArray[timedelta64]](self: ArrayT, other: _ArrayLike[timedelta64], /) -> ArrayT: ... @overload - def __imod__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __imod__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # <<= # keep in sync with `__irshift__` @overload # type: ignore[misc] - def __ilshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __ilshift__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ilshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ilshift__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # >>= # keep in sync with `__ilshift__` @overload # type: ignore[misc] - def __irshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __irshift__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __irshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __irshift__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # &= # keep in sync with `__ixor__` and `__ior__` @overload # type: ignore[misc] - def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __iand__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __iand__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __iand__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __iand__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __iand__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # ^= # keep in sync with `__iand__` and `__ior__` @overload # type: ignore[misc] - def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ixor__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __ixor__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __ixor__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ixor__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ixor__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # |= # keep in sync with `__iand__` and `__ixor__` @overload # type: ignore[misc] - def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ior__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __ior__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __ior__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ior__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ior__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # def __dlpack__( @@ -3667,13 +3642,12 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod def __new__(cls, /, *args: Any, **kwargs: Any) -> Self: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + def __buffer__(self, flags: int, /) -> memoryview: ... @overload def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... @overload - def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ... + def __array__[DTypeT: dtype](self, dtype: DTypeT, /) -> ndarray[tuple[()], DTypeT]: ... # @overload @@ -3699,37 +3673,37 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): # @overload - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, DTypeT: dtype]( self, - array: ndarray[_ShapeT, _DTypeT], + array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[object, ...], int] | None, return_scalar: L[False], /, - ) -> ndarray[_ShapeT, _DTypeT]: ... + ) -> ndarray[ShapeT, DTypeT]: ... @overload - def __array_wrap__( + def __array_wrap__[ScalarT: generic]( self, - array: ndarray[tuple[()], dtype[_ScalarT]], + array: ndarray[tuple[()], dtype[ScalarT]], context: tuple[ufunc, tuple[object, ...], int] | None = None, return_scalar: L[True] = True, /, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload - def __array_wrap__( + def __array_wrap__[ShapeT: tuple[int, *tuple[int, ...]], DTypeT: dtype]( self, - array: ndarray[_Shape1T, _DTypeT], + array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[object, ...], int] | None = None, return_scalar: L[True] = True, /, - ) -> ndarray[_Shape1T, _DTypeT]: ... + ) -> ndarray[ShapeT, DTypeT]: ... @overload - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, ScalarT: generic]( self, - array: ndarray[_ShapeT, dtype[_ScalarT]], + array: ndarray[ShapeT, dtype[ScalarT]], context: tuple[ufunc, tuple[object, ...], int] | None = None, return_scalar: L[True] = True, /, - ) -> _ScalarT | ndarray[_ShapeT, dtype[_ScalarT]]: ... + ) -> ScalarT | ndarray[ShapeT, dtype[ScalarT]]: ... @property def base(self) -> None: ... @@ -3779,15 +3753,15 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): # @overload - def astype( + def astype[ScalarT: generic]( self, /, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", casting: _CastingKind = "unsafe", subok: builtins.bool = True, copy: builtins.bool | _CopyMode = True, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def astype( self, @@ -3804,12 +3778,12 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @overload def view(self, type: type[ndarray] = ...) -> Self: ... @overload - def view(self, /, dtype: _DTypeLike[_ScalarT], type: type[ndarray] = ...) -> _ScalarT: ... + def view[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], type: type[ndarray] = ...) -> ScalarT: ... @overload def view(self, /, dtype: DTypeLike, type: type[ndarray] = ...) -> Incomplete: ... @overload - def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> _ScalarT: ... + def getfield[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], offset: SupportsIndex = 0) -> ScalarT: ... @overload def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> Incomplete: ... @@ -3832,30 +3806,30 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): mode: _ModeKind = "raise", ) -> NDArray[Self]: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None, *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, /, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", - ) -> _ArrayT: ... + ) -> ArrayT: ... def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... - @overload # (() | []) + @overload # (()) def reshape( self, shape: tuple[()] | list[Never], @@ -3864,15 +3838,15 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): order: _OrderACF = "C", copy: builtins.bool | None = None, ) -> Self: ... - @overload # ((1, *(1, ...))@_ShapeT) - def reshape( + @overload # (ShapeT: (index, ...)) + def reshape[ShapeT: tuple[int, *tuple[int, ...]]]( self, - shape: _1NShapeT, + shape: ShapeT, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[_1NShapeT, dtype[Self]]: ... + ) -> ndarray[ShapeT, dtype[Self]]: ... @overload # (Sequence[index, ...]) # not recommended def reshape( self, @@ -3881,7 +3855,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> Self | ndarray[tuple[L[1], ...], dtype[Self]]: ... + ) -> NDArray[Self] | Any: ... @overload # _(index) def reshape( self, @@ -3890,7 +3864,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1]], dtype[Self]]: ... + ) -> ndarray[tuple[int], dtype[Self]]: ... @overload # _(index, index) def reshape( self, @@ -3900,7 +3874,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1]], dtype[Self]]: ... + ) -> ndarray[tuple[int, int], dtype[Self]]: ... @overload # _(index, index, index) def reshape( self, @@ -3911,7 +3885,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1], L[1]], dtype[Self]]: ... + ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... @overload # _(index, index, index, index) def reshape( self, @@ -3923,7 +3897,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1], L[1], L[1]], dtype[Self]]: ... + ) -> ndarray[tuple[int, int, int, int], dtype[Self]]: ... @overload # _(index, index, index, index, index, *index) # ndim >= 5 def reshape( self, @@ -3936,7 +3910,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *sizes6_: SupportsIndex, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1], L[1], L[1], L[1], *tuple[L[1], ...]], dtype[Self]]: ... + ) -> ndarray[tuple[int, int, int, int, int, *tuple[int, ...]], dtype[Self]]: ... def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ... def transpose(self, axes: tuple[()] | None = ..., /) -> Self: ... @@ -3952,25 +3926,25 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True ) -> np.bool: ... @overload - def all( + def all[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload - def all( + def all[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None = None, *, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def any( @@ -3983,31 +3957,31 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True ) -> np.bool: ... @overload - def any( + def any[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload - def any( + def any[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None = None, *, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + ) -> ScalarT: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property def dtype(self) -> _dtype[Self]: ... -class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): +class number(generic[_NumberItemT_co], Generic[_NBitT, _NumberItemT_co]): @abstractmethod # `SupportsIndex | str | bytes` equivs `_ConvertibleToInt & _ConvertibleToFloat` def __new__(cls, value: SupportsIndex | str | bytes = 0, /) -> Self: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @@ -4095,7 +4069,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __invert__(self, /) -> np.bool: ... @overload - def __add__(self, other: _NumberT, /) -> _NumberT: ... + def __add__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __add__(self, other: builtins.bool | bool_, /) -> bool_: ... @overload @@ -4106,7 +4080,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __add__(self, other: complex, /) -> complex128: ... @overload - def __radd__(self, other: _NumberT, /) -> _NumberT: ... + def __radd__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __radd__(self, other: builtins.bool, /) -> bool_: ... @overload @@ -4117,7 +4091,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __radd__(self, other: complex, /) -> complex128: ... @overload - def __sub__(self, other: _NumberT, /) -> _NumberT: ... + def __sub__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __sub__(self, other: int, /) -> int_: ... @overload @@ -4126,7 +4100,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __sub__(self, other: complex, /) -> complex128: ... @overload - def __rsub__(self, other: _NumberT, /) -> _NumberT: ... + def __rsub__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __rsub__(self, other: int, /) -> int_: ... @overload @@ -4135,7 +4109,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __rsub__(self, other: complex, /) -> complex128: ... @overload - def __mul__(self, other: _NumberT, /) -> _NumberT: ... + def __mul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __mul__(self, other: builtins.bool | bool_, /) -> bool_: ... @overload @@ -4146,7 +4120,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __mul__(self, other: complex, /) -> complex128: ... @overload - def __rmul__(self, other: _NumberT, /) -> _NumberT: ... + def __rmul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __rmul__(self, other: builtins.bool, /) -> bool_: ... @overload @@ -4157,7 +4131,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __rmul__(self, other: complex, /) -> complex128: ... @overload - def __pow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + def __pow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... @overload def __pow__(self, other: builtins.bool | bool_, mod: None = None, /) -> int8: ... @overload @@ -4168,7 +4142,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... @overload - def __rpow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + def __rpow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... @overload def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... @overload @@ -4179,21 +4153,21 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... @overload - def __truediv__(self, other: _InexactT, /) -> _InexactT: ... + def __truediv__[ScalarT: inexact](self, other: ScalarT, /) -> ScalarT: ... @overload def __truediv__(self, other: float | integer | bool_, /) -> float64: ... @overload def __truediv__(self, other: complex, /) -> complex128: ... @overload - def __rtruediv__(self, other: _InexactT, /) -> _InexactT: ... + def __rtruediv__[ScalarT: inexact](self, other: ScalarT, /) -> ScalarT: ... @overload def __rtruediv__(self, other: float | integer, /) -> float64: ... @overload def __rtruediv__(self, other: complex, /) -> complex128: ... @overload - def __floordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + def __floordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload def __floordiv__(self, other: builtins.bool | bool_, /) -> int8: ... @overload @@ -4202,7 +4176,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __floordiv__(self, other: float, /) -> float64: ... @overload - def __rfloordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + def __rfloordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... @overload @@ -4212,7 +4186,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __floordiv__ @overload - def __mod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + def __mod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload def __mod__(self, other: builtins.bool | bool_, /) -> int8: ... @overload @@ -4222,7 +4196,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __rfloordiv__ @overload - def __rmod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + def __rmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload def __rmod__(self, other: builtins.bool, /) -> int8: ... @overload @@ -4232,7 +4206,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __mod__ @overload - def __divmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + def __divmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... @overload def __divmod__(self, other: builtins.bool | bool_, /) -> _2Tuple[int8]: ... @overload @@ -4242,7 +4216,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __rmod__ @overload - def __rdivmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + def __rdivmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... @overload def __rdivmod__(self, other: builtins.bool, /) -> _2Tuple[int8]: ... @overload @@ -4251,14 +4225,14 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... @overload - def __lshift__(self, other: _IntegerT, /) -> _IntegerT: ... + def __lshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __lshift__(self, other: builtins.bool | bool_, /) -> int8: ... @overload def __lshift__(self, other: int, /) -> int_: ... @overload - def __rlshift__(self, other: _IntegerT, /) -> _IntegerT: ... + def __rlshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __rlshift__(self, other: builtins.bool, /) -> int8: ... @overload @@ -4266,7 +4240,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __lshift__ @overload - def __rshift__(self, other: _IntegerT, /) -> _IntegerT: ... + def __rshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __rshift__(self, other: builtins.bool | bool_, /) -> int8: ... @overload @@ -4274,7 +4248,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __rlshift__ @overload - def __rrshift__(self, other: _IntegerT, /) -> _IntegerT: ... + def __rrshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __rrshift__(self, other: builtins.bool, /) -> int8: ... @overload @@ -4289,13 +4263,13 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ... @overload - def __and__(self, other: _IntegerT, /) -> _IntegerT: ... + def __and__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __and__(self, other: int, /) -> np.bool | intp: ... __rand__ = __and__ @overload - def __xor__(self: np.bool[L[False]], other: _BoolItemT | np.bool[_BoolItemT], /) -> np.bool[_BoolItemT]: ... + def __xor__[ItemT: builtins.bool](self: np.bool[L[False]], other: ItemT | np.bool[ItemT], /) -> np.bool[ItemT]: ... @overload def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ... @overload @@ -4303,7 +4277,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ... @overload - def __xor__(self, other: _IntegerT, /) -> _IntegerT: ... + def __xor__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __xor__(self, other: int, /) -> np.bool | intp: ... __rxor__ = __xor__ @@ -4317,7 +4291,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ... @overload - def __or__(self, other: _IntegerT, /) -> _IntegerT: ... + def __or__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __or__(self, other: int, /) -> np.bool | intp: ... __ror__ = __or__ @@ -4350,7 +4324,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __ge__(self, other: _SupportsLE, /) -> bool_: ... -# NOTE: This should _not_ be `Final` or a `TypeAlias` +# NOTE: This should _not_ be `Final[_]`, `_: TypeAlias`, or `type _` bool_ = bool # NOTE: The `object_` constructor returns the passed object, so instances with type @@ -4362,13 +4336,13 @@ class object_(_RealMixin, generic): @overload def __new__(cls, value: None = None, /) -> None: ... # type: ignore[misc] @overload - def __new__(cls, value: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] + def __new__[AnyStrT: (LiteralString, str, bytes)](cls, value: AnyStrT, /) -> AnyStrT: ... # type: ignore[misc] @overload - def __new__(cls, value: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] + def __new__[ShapeT: _Shape](cls, value: ndarray[ShapeT, Any], /) -> ndarray[ShapeT, dtype[Self]]: ... # type: ignore[misc] @overload def __new__(cls, value: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] @overload - def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] + def __new__[T](cls, value: T, /) -> T: ... # type: ignore[misc] @overload # catch-all def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] @@ -4376,10 +4350,9 @@ class object_(_RealMixin, generic): def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ def __call__(self, /, *args: object, **kwargs: object) -> Any: ... - if sys.version_info >= (3, 12): - def __release_buffer__(self, buffer: memoryview, /) -> None: ... + def __release_buffer__(self, buffer: memoryview, /) -> None: ... -class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): +class integer(_IntegralMixin, _RoundMixin, number[_NBitT, int]): @abstractmethod def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... @@ -4419,7 +4392,7 @@ class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): def __xor__(self, other: _IntLike_co, /) -> integer: ... def __rxor__(self, other: _IntLike_co, /) -> integer: ... -class signedinteger(integer[_NBit]): +class signedinteger(integer[_NBitT]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... # arithmetic ops @@ -4657,7 +4630,7 @@ int_ = intp long = signedinteger[_NBitLong] longlong = signedinteger[_NBitLongLong] -class unsignedinteger(integer[_NBit1]): +class unsignedinteger(integer[_NBitT1]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... # arithmetic ops @@ -4902,24 +4875,24 @@ class unsignedinteger(integer[_NBit1]): @overload def __ror__(self, other: signedinteger, /) -> signedinteger: ... -uint8: TypeAlias = unsignedinteger[_8Bit] -uint16: TypeAlias = unsignedinteger[_16Bit] -uint32: TypeAlias = unsignedinteger[_32Bit] -uint64: TypeAlias = unsignedinteger[_64Bit] +uint8 = unsignedinteger[_8Bit] +uint16 = unsignedinteger[_16Bit] +uint32 = unsignedinteger[_32Bit] +uint64 = unsignedinteger[_64Bit] -ubyte: TypeAlias = unsignedinteger[_NBitByte] -ushort: TypeAlias = unsignedinteger[_NBitShort] -uintc: TypeAlias = unsignedinteger[_NBitIntC] -uintp: TypeAlias = unsignedinteger[_NBitIntP] -uint: TypeAlias = uintp -ulong: TypeAlias = unsignedinteger[_NBitLong] -ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] +ubyte = unsignedinteger[_NBitByte] +ushort = unsignedinteger[_NBitShort] +uintc = unsignedinteger[_NBitIntC] +uintp = unsignedinteger[_NBitIntP] +uint = uintp +ulong = unsignedinteger[_NBitLong] +ulonglong = unsignedinteger[_NBitLongLong] -class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): +class inexact(number[_NBitT, _InexactItemT_co], Generic[_NBitT, _InexactItemT_co]): @abstractmethod def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... -class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): +class floating(_RealMixin, _RoundMixin, inexact[_NBitT1, float]): def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... # arithmetic ops @@ -5072,8 +5045,8 @@ class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): def is_integer(self, /) -> builtins.bool: ... def as_integer_ratio(self, /) -> tuple[int, int]: ... -float16: TypeAlias = floating[_16Bit] -float32: TypeAlias = floating[_32Bit] +float16 = floating[_16Bit] +float32 = floating[_32Bit] # either a C `double`, `float`, or `longdouble` class float64(floating[_64Bit], float): # type: ignore[misc] @@ -5103,7 +5076,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __add__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __add__(self, other: complex, /) -> float64 | complex128: ... @@ -5112,7 +5085,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __radd__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __radd__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __radd__(self, other: complex, /) -> float64 | complex128: ... @@ -5121,7 +5094,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __sub__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __sub__(self, other: complex, /) -> float64 | complex128: ... @@ -5130,7 +5103,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rsub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rsub__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __rsub__(self, other: complex, /) -> float64 | complex128: ... @@ -5139,7 +5112,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __mul__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __mul__(self, other: complex, /) -> float64 | complex128: ... @@ -5148,7 +5121,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rmul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rmul__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __rmul__(self, other: complex, /) -> float64 | complex128: ... @@ -5157,7 +5130,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __truediv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __truediv__(self, other: complex, /) -> float64 | complex128: ... @@ -5166,7 +5139,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rtruediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rtruediv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... @@ -5175,7 +5148,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __floordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __floordiv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __floordiv__(self, other: complex, /) -> float64 | complex128: ... @@ -5184,7 +5157,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __rfloordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rfloordiv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... @@ -5194,8 +5167,8 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... @overload def __pow__( - self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / - ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + self, other: complexfloating[_NBitT1, _NBitT2], mod: None = None, / + ) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... @@ -5205,8 +5178,8 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] @overload def __rpow__( - self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / - ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + self, other: complexfloating[_NBitT1, _NBitT2], mod: None = None, / + ) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... @@ -5216,16 +5189,16 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[misc] -half: TypeAlias = float16 -single: TypeAlias = float32 -double: TypeAlias = float64 -longdouble: TypeAlias = floating[_NBitLongDouble] +half = float16 +single = float32 +double = float64 +longdouble = floating[_NBitLongDouble] # The main reason for `complexfloating` having two typevars is cosmetic. # It is used to clarify why `complex128`s precision is `_64Bit`, the latter # describing the two 64 bit floats representing its real and imaginary component -class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): +class complexfloating(inexact[_NBitT1, complex], Generic[_NBitT1, _NBitT2]): @overload def __new__( cls, @@ -5237,91 +5210,101 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): def __new__(cls, real: _ConvertibleToComplex | None = 0, /) -> Self: ... @property - def real(self) -> floating[_NBit1]: ... + def real(self) -> floating[_NBitT1]: ... @property - def imag(self) -> floating[_NBit2]: ... + def imag(self) -> floating[_NBitT2]: ... # NOTE: `__complex__` is technically defined in the concrete subtypes def __complex__(self, /) -> complex: ... - def __abs__(self, /) -> floating[_NBit1 | _NBit2]: ... # type: ignore[override] + def __abs__(self, /) -> floating[_NBitT1 | _NBitT2]: ... # type: ignore[override] @overload # type: ignore[override] - def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __add__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __add__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __radd__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __radd__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __radd__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __radd__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __sub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __sub__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rsub__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rsub__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rsub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rsub__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __mul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __mul__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rmul__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rmul__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rmul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rmul__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __truediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __truediv__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rtruediv__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rtruediv__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rtruediv__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload def __pow__( self, other: complex | float64 | complex128, mod: None = None, / - ) -> complexfloating[_NBit1, _NBit2] | complex128: ... + ) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __pow__( - self, other: number[_NBit], mod: None = None, / - ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __pow__[NBitT: NBitBase]( + self, other: number[NBitT], mod: None = None, / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rpow__( - self, other: number[_NBit], mod: None = None, / - ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rpow__[NBitT: NBitBase]( + self, other: number[NBitT], mod: None = None, / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... -complex64: TypeAlias = complexfloating[_32Bit] +complex64 = complexfloating[_32Bit] class complex128(complexfloating[_64Bit, _64Bit], complex): @property @@ -5342,38 +5325,36 @@ class complex128(complexfloating[_64Bit, _64Bit], complex): @overload # type: ignore[override] def __add__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __add__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... def __radd__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] @overload # type: ignore[override] def __sub__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __sub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... def __rsub__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] @overload # type: ignore[override] def __mul__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __mul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... def __rmul__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] @overload # type: ignore[override] def __truediv__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __truediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] @overload # type: ignore[override] def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... @overload - def __pow__( - self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / - ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __pow__[NBitT: NBitBase](self, other: complexfloating[NBitT], mod: None = None, /) -> complexfloating[NBitT | _64Bit]: ... def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... # type: ignore[override] -csingle: TypeAlias = complex64 -cdouble: TypeAlias = complex128 -clongdouble: TypeAlias = complexfloating[_NBitLongDouble] +csingle = complex64 +cdouble = complex128 +clongdouble = complexfloating[_NBitLongDouble] class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): @property @@ -5386,7 +5367,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __new__(cls, /) -> timedelta64[L[0]]: ... @overload - def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec, /) -> timedelta64[None]: ... + def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec[_TD64Unit], /) -> timedelta64[None]: ... @overload def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[L[0]]: ... @overload @@ -5401,7 +5382,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] /, ) -> timedelta64[dt.timedelta]: ... @overload - def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> timedelta64: ... + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec[_TD64Unit] = ..., /) -> timedelta64: ... # inherited at runtime from `signedinteger` def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -5429,9 +5410,11 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... @overload - def __add__(self: timedelta64[dt.timedelta], x: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + def __add__[DateOrTimeT: dt.date | dt.timedelta](self: timedelta64[dt.timedelta], x: DateOrTimeT, /) -> DateOrTimeT: ... @overload - def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + def __add__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], x: timedelta64[AnyItemT] | _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... __radd__ = __add__ # @@ -5450,7 +5433,9 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... @overload - def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + def __sub__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: timedelta64[AnyItemT] | _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. # This confuses mypy, so we ignore the [misc] errors it reports. @@ -5459,16 +5444,21 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + def __rsub__[AnyDateT: (dt.datetime, dt.date)](self: timedelta64[dt.timedelta], a: AnyDateT, /) -> AnyDateT: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... + def __rsub__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[dt.timedelta], a: timedelta64[AnyItemT], / + ) -> timedelta64[AnyItemT]: ... @overload - def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] + def __rsub__[AnyItemT: (dt.timedelta, int, None)]( # type: ignore[misc] + self: timedelta64[AnyItemT], a: timedelta64[AnyItemT] | _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... @overload def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] @overload def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... + # @overload def __mul__(self: timedelta64[Never], x: _FloatLike_co, /) -> timedelta64: ... @overload @@ -5542,9 +5532,13 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __truediv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... @overload - def __truediv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + def __truediv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: int | integer, / + ) -> timedelta64[AnyItemT]: ... @overload - def __truediv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + def __truediv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: float | floating, / + ) -> timedelta64[AnyItemT | None]: ... @overload def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... @@ -5560,17 +5554,19 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __floordiv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... @overload - def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + def __floordiv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: int | integer, / + ) -> timedelta64[AnyItemT]: ... @overload - def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + def __floordiv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: float | floating, / + ) -> timedelta64[AnyItemT | None]: ... @overload def __rfloordiv__(self, a: timedelta64, /) -> int64: ... @overload def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... - # comparison ops - @overload def __lt__(self, other: _TD64Like_co, /) -> bool_: ... @overload @@ -5608,9 +5604,9 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __new__(cls, value: datetime64[_DT64ItemT_co], /) -> Self: ... @overload - def __new__(cls, value: _AnyDT64Arg, /) -> datetime64[_AnyDT64Arg]: ... + def __new__[AnyItemT: (dt.date, dt.datetime, None)](cls, value: AnyItemT, /) -> datetime64[AnyItemT]: ... @overload - def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> datetime64[None]: ... + def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec[_TD64Unit] = ..., /) -> datetime64[None]: ... @overload def __new__(cls, value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> datetime64[dt.datetime]: ... @overload @@ -5618,13 +5614,13 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> datetime64[int]: ... @overload - def __new__( + def __new__( # type: ignore[overload-cannot-match] cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / ) -> datetime64[dt.datetime]: ... @overload - def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... # type: ignore[overload-cannot-match] @overload - def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> Self: ... + def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec[_TD64Unit] = ..., /) -> Self: ... # def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -5633,9 +5629,9 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __add__(self: datetime64[Never], x: _TD64Like_co, /) -> datetime64: ... @overload - def __add__(self: datetime64[_AnyDT64Item], x: _IntLike_co, /) -> datetime64[_AnyDT64Item]: ... + def __add__(self, x: _IntLike_co, /) -> Self: ... @overload - def __add__(self: datetime64[None], x: _TD64Like_co, /) -> datetime64[None]: ... + def __add__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... @overload def __add__(self: datetime64[int | dt.datetime], x: timedelta64[Never], /) -> datetime64: ... @overload @@ -5658,7 +5654,7 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __sub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... @overload - def __sub__(self: datetime64[_AnyDT64Item], x: _IntLike_co, /) -> datetime64[_AnyDT64Item]: ... + def __sub__(self, x: _IntLike_co, /) -> Self: ... @overload def __sub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... @overload @@ -5694,7 +5690,7 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __rsub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... @overload - def __rsub__(self: datetime64[_AnyDT64Item], x: _IntLike_co, /) -> datetime64[_AnyDT64Item]: ... + def __rsub__(self, x: _IntLike_co, /) -> Self: ... @overload def __rsub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... @overload @@ -6125,7 +6121,7 @@ class nditer: def value(self) -> tuple[NDArray[Any], ...]: ... class memmap(ndarray[_ShapeT_co, _DTypeT_co]): - __array_priority__: ClassVar[float] + __array_priority__: ClassVar[float] = 100.0 # pyright: ignore[reportIncompatibleMethodOverride] filename: str | None offset: int mode: str @@ -6140,15 +6136,15 @@ class memmap(ndarray[_ShapeT_co, _DTypeT_co]): order: _OrderKACF = "C", ) -> memmap[Any, dtype[uint8]]: ... @overload - def __new__( + def __new__[ScalarT: generic]( subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], mode: _MemMapModeKind = "r+", offset: int = 0, shape: int | tuple[int, ...] | None = None, order: _OrderKACF = "C", - ) -> memmap[Any, dtype[_ScalarT]]: ... + ) -> memmap[Any, dtype[ScalarT]]: ... @overload def __new__( subtype, @@ -6160,9 +6156,9 @@ class memmap(ndarray[_ShapeT_co, _DTypeT_co]): order: _OrderKACF = "C", ) -> memmap[Any, dtype]: ... def __array_finalize__(self, obj: object) -> None: ... - def __array_wrap__( + def __array_wrap__( # type: ignore[override] self, - array: memmap[_ShapeT_co, _DTypeT_co], # type: ignore[override] + array: memmap[_ShapeT_co, _DTypeT_co], context: tuple[ufunc, tuple[Any, ...], int] | None = None, return_scalar: builtins.bool = False, ) -> Any: ... @@ -6203,9 +6199,9 @@ class poly1d: __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @overload - def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int], dtype]: ... + def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int]]: ... @overload - def __array__(self, /, t: _DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], _DTypeT]: ... + def __array__[DTypeT: dtype](self, /, t: DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], DTypeT]: ... @overload def __call__(self, val: _ScalarLike_co) -> Any: ... diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index 396125143e92..0bad9c65b137 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -1,18 +1,9 @@ -from typing import ( - Literal, - Never, - TypeAlias, - TypedDict, - TypeVar, - final, - overload, - type_check_only, -) +from typing import Literal, Never, TypedDict, final, overload, type_check_only import numpy as np -_Device: TypeAlias = Literal["cpu"] -_DeviceLike: TypeAlias = _Device | None +type _Device = Literal["cpu"] +type _DeviceLike = _Device | None _Capabilities = TypedDict( "_Capabilities", @@ -32,33 +23,22 @@ _DefaultDTypes = TypedDict( }, ) -_KindBool: TypeAlias = Literal["bool"] -_KindInt: TypeAlias = Literal["signed integer"] -_KindUInt: TypeAlias = Literal["unsigned integer"] -_KindInteger: TypeAlias = Literal["integral"] -_KindFloat: TypeAlias = Literal["real floating"] -_KindComplex: TypeAlias = Literal["complex floating"] -_KindNumber: TypeAlias = Literal["numeric"] -_Kind: TypeAlias = ( - _KindBool - | _KindInt - | _KindUInt - | _KindInteger - | _KindFloat - | _KindComplex - | _KindNumber -) - -_T1 = TypeVar("_T1") -_T2 = TypeVar("_T2") -_T3 = TypeVar("_T3") -_Permute1: TypeAlias = _T1 | tuple[_T1] -_Permute2: TypeAlias = tuple[_T1, _T2] | tuple[_T2, _T1] -_Permute3: TypeAlias = ( - tuple[_T1, _T2, _T3] | tuple[_T1, _T3, _T2] - | tuple[_T2, _T1, _T3] | tuple[_T2, _T3, _T1] - | tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1] -) +type _KindBool = Literal["bool"] +type _KindInt = Literal["signed integer"] +type _KindUInt = Literal["unsigned integer"] +type _KindInteger = Literal["integral"] +type _KindFloat = Literal["real floating"] +type _KindComplex = Literal["complex floating"] +type _KindNumber = Literal["numeric"] +type _Kind = _KindBool | _KindInt | _KindUInt | _KindInteger | _KindFloat | _KindComplex | _KindNumber + +type _Permute1[T1] = T1 | tuple[T1] +type _Permute2[T1, T2] = tuple[T1, T2] | tuple[T2, T1] +type _Permute3[T1, T2, T3] = ( + tuple[T1, T2, T3] | tuple[T1, T3, T2] + | tuple[T2, T1, T3] | tuple[T2, T3, T1] + | tuple[T3, T1, T2] | tuple[T3, T2, T1] +) # fmt: skip @type_check_only class _DTypesBool(TypedDict): @@ -113,7 +93,7 @@ class _DTypesUnion(TypedDict, total=False): complex64: np.dtype[np.complex64] complex128: np.dtype[np.complex128] -_EmptyDict: TypeAlias = dict[Never, Never] +type _EmptyDict = dict[Never, Never] @final class __array_namespace_info__: @@ -121,11 +101,7 @@ class __array_namespace_info__: def capabilities(self) -> _Capabilities: ... def default_device(self) -> _Device: ... - def default_dtypes( - self, - *, - device: _DeviceLike = None, - ) -> _DefaultDTypes: ... + def default_dtypes(self, *, device: _DeviceLike = None) -> _DefaultDTypes: ... def devices(self) -> list[_Device]: ... @overload @@ -175,20 +151,14 @@ class __array_namespace_info__: self, *, device: _DeviceLike = None, - kind: ( - _Permute1[_KindInteger] - | _Permute2[_KindInt, _KindUInt] - ), + kind: _Permute1[_KindInteger] | _Permute2[_KindInt, _KindUInt], ) -> _DTypesInteger: ... @overload def dtypes( self, *, device: _DeviceLike = None, - kind: ( - _Permute1[_KindNumber] - | _Permute3[_KindInteger, _KindFloat, _KindComplex] - ), + kind: _Permute1[_KindNumber] | _Permute3[_KindInteger, _KindFloat, _KindComplex], ) -> _DTypesNumber: ... @overload def dtypes( diff --git a/numpy/_core/_asarray.pyi b/numpy/_core/_asarray.pyi index 6bef69d8e4ea..07adc83fbcff 100644 --- a/numpy/_core/_asarray.pyi +++ b/numpy/_core/_asarray.pyi @@ -1,30 +1,28 @@ from collections.abc import Iterable -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc __all__ = ["require"] -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) - -_Requirements: TypeAlias = Literal[ +type _Requirements = Literal[ "C", "C_CONTIGUOUS", "CONTIGUOUS", "F", "F_CONTIGUOUS", "FORTRAN", "A", "ALIGNED", "W", "WRITEABLE", "O", "OWNDATA" ] -_E: TypeAlias = Literal["E", "ENSUREARRAY"] -_RequirementsWithE: TypeAlias = _Requirements | _E +type _E = Literal["E", "ENSUREARRAY"] +type _RequirementsWithE = _Requirements | _E @overload -def require( - a: _ArrayT, +def require[ArrayT: NDArray[Any]]( + a: ArrayT, dtype: None = None, requirements: _Requirements | Iterable[_Requirements] | None = None, *, like: _SupportsArrayFunc | None = None -) -> _ArrayT: ... +) -> ArrayT: ... @overload def require( a: object, diff --git a/numpy/_core/_dtype.pyi b/numpy/_core/_dtype.pyi index 28adecf4ad2f..4d34ae9efb99 100644 --- a/numpy/_core/_dtype.pyi +++ b/numpy/_core/_dtype.pyi @@ -1,13 +1,11 @@ -from typing import Final, Literal as L, TypeAlias, TypedDict, overload, type_check_only -from typing_extensions import ReadOnly, TypeVar +from typing import Final, Literal as L, TypedDict, overload, type_check_only +from typing_extensions import ReadOnly import numpy as np ### -_T = TypeVar("_T") - -_Name: TypeAlias = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"] +type _Name = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"] @type_check_only class _KindToStemType(TypedDict): @@ -50,7 +48,7 @@ def _name_get(dtype: np.dtype) -> str: ... # @overload -def _unpack_field(dtype: np.dtype, offset: int, title: _T) -> tuple[np.dtype, int, _T]: ... +def _unpack_field[T](dtype: np.dtype, offset: int, title: T) -> tuple[np.dtype, int, T]: ... @overload def _unpack_field(dtype: np.dtype, offset: int, title: None = None) -> tuple[np.dtype, int, None]: ... def _aligned_offset(offset: int, alignment: int) -> int: ... diff --git a/numpy/_core/_exceptions.pyi b/numpy/_core/_exceptions.pyi index b340fde3e463..00c1cdbaa575 100644 --- a/numpy/_core/_exceptions.pyi +++ b/numpy/_core/_exceptions.pyi @@ -1,17 +1,11 @@ from collections.abc import Iterable -from typing import Any, Final, TypeVar, overload +from typing import Any, Final, overload import numpy as np from numpy import _CastingKind ### -_T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]]) -_ExceptionT = TypeVar("_ExceptionT", bound=Exception) - -### - class UFuncTypeError(TypeError): ufunc: Final[np.ufunc] def __init__(self, /, ufunc: np.ufunc) -> None: ... @@ -48,7 +42,7 @@ class _ArrayMemoryError(MemoryError): def _size_to_string(num_bytes: int) -> str: ... @overload -def _unpack_tuple(tup: tuple[_T]) -> _T: ... +def _unpack_tuple[T](tup: tuple[T]) -> T: ... @overload -def _unpack_tuple(tup: _TupleT) -> _TupleT: ... -def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... +def _unpack_tuple[TupleT: tuple[()] | tuple[Any, Any, *tuple[Any, ...]]](tup: TupleT) -> TupleT: ... +def _display_as_base[ExceptionT: Exception](cls: type[ExceptionT]) -> type[ExceptionT]: ... diff --git a/numpy/_core/_internal.pyi b/numpy/_core/_internal.pyi index 6e37022ffd56..179e077629b6 100644 --- a/numpy/_core/_internal.pyi +++ b/numpy/_core/_internal.pyi @@ -8,9 +8,6 @@ import numpy as np import numpy.typing as npt from numpy.ctypeslib import c_intp -_CastT = TypeVar("_CastT", bound=ct._CanCastTo) -_T_co = TypeVar("_T_co", covariant=True) -_CT = TypeVar("_CT", bound=ct._CData) _PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True) ### @@ -43,18 +40,18 @@ class _ctypes(Generic[_PT_co]): def _as_parameter_(self) -> ct.c_void_p: ... # - def data_as(self, /, obj: type[_CastT]) -> _CastT: ... - def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... - def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... + def data_as[CastT: ct._CanCastTo](self, /, obj: type[CastT]) -> CastT: ... + def shape_as[CT: ct._CData](self, /, obj: type[CT]) -> ct.Array[CT]: ... + def strides_as[CT: ct._CData](self, /, obj: type[CT]) -> ct.Array[CT]: ... -class dummy_ctype(Generic[_T_co]): - _cls: type[_T_co] +class dummy_ctype[T_co]: + _cls: type[T_co] - def __init__(self, /, cls: type[_T_co]) -> None: ... + def __init__(self, /, cls: type[T_co]) -> None: ... def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] def __mul__(self, other: object, /) -> Self: ... - def __call__(self, /, *other: object) -> _T_co: ... + def __call__(self, /, *other: object) -> T_co: ... def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ... def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ... diff --git a/numpy/_core/_methods.pyi b/numpy/_core/_methods.pyi index 3c80683f003b..651c78d3530b 100644 --- a/numpy/_core/_methods.pyi +++ b/numpy/_core/_methods.pyi @@ -1,5 +1,5 @@ from collections.abc import Callable -from typing import Any, Concatenate, TypeAlias +from typing import Any, Concatenate import numpy as np @@ -7,7 +7,7 @@ from . import _exceptions as _exceptions ### -_Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any] +type _Reduce2 = Callable[Concatenate[object, ...], Any] ### diff --git a/numpy/_core/_type_aliases.pyi b/numpy/_core/_type_aliases.pyi index e28541cc8987..c7efe989caa5 100644 --- a/numpy/_core/_type_aliases.pyi +++ b/numpy/_core/_type_aliases.pyi @@ -1,5 +1,5 @@ from collections.abc import Collection -from typing import Final, Literal as L, TypeAlias, TypedDict, type_check_only +from typing import Final, Literal as L, TypedDict, type_check_only import numpy as np @@ -35,7 +35,7 @@ class _CNamesDict(TypedDict): c_names_dict: Final[_CNamesDict] -_AbstractTypeName: TypeAlias = L[ +type _AbstractTypeName = L[ "generic", "flexible", "character", diff --git a/numpy/_core/_ufunc_config.pyi b/numpy/_core/_ufunc_config.pyi index f1f0d88fe165..039aa1d51223 100644 --- a/numpy/_core/_ufunc_config.pyi +++ b/numpy/_core/_ufunc_config.pyi @@ -1,7 +1,7 @@ from _typeshed import SupportsWrite from collections.abc import Callable from types import TracebackType -from typing import Any, Final, Literal, TypeAlias, TypedDict, TypeVar, type_check_only +from typing import Any, Final, Literal, TypedDict, type_check_only __all__ = [ "seterr", @@ -13,10 +13,8 @@ __all__ = [ "errstate", ] -_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] -_ErrCall: TypeAlias = Callable[[str, int], Any] | SupportsWrite[str] - -_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) +type _ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"] +type _ErrCall = Callable[[str, int], Any] | SupportsWrite[str] @type_check_only class _ErrDict(TypedDict): @@ -45,7 +43,7 @@ class errstate: under: _ErrKind | None = None, invalid: _ErrKind | None = None, ) -> None: ... - def __call__(self, /, func: _CallableT) -> _CallableT: ... + def __call__[FuncT: Callable[..., object]](self, /, func: FuncT) -> FuncT: ... def __enter__(self) -> None: ... def __exit__( self, diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 307f844634ca..167cc3f3a097 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -3,15 +3,7 @@ from collections.abc import Callable # Using a private class is by no means ideal, but it is simply a consequence # of a `contextlib.context` returning an instance of aforementioned class from contextlib import _GeneratorContextManager -from typing import ( - Any, - Final, - Literal, - SupportsIndex, - TypeAlias, - TypedDict, - type_check_only, -) +from typing import Any, Final, Literal, SupportsIndex, TypedDict, type_check_only import numpy as np from numpy._typing import NDArray, _CharLike_co, _FloatLike_co @@ -29,12 +21,12 @@ __all__ = [ ### -_FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"] -_LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False] -_Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle] -_Sign: TypeAlias = Literal["-", "+", " "] -_Trim: TypeAlias = Literal["k", ".", "0", "-"] -_ReprFunc: TypeAlias = Callable[[NDArray[Any]], str] +type _FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"] +type _LegacyNoStyle = Literal["1.21", "1.25", "2.1", False] +type _Legacy = Literal["1.13", _LegacyNoStyle] +type _Sign = Literal["-", "+", " "] +type _Trim = Literal["k", ".", "0", "-"] +type _ReprFunc = Callable[[NDArray[Any]], str] @type_check_only class _FormatDict(TypedDict, total=False): diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 406d11ea0eb7..bc587ed846ba 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1,13 +1,6 @@ -from typing import ( - Any, - Literal as L, - Self, - SupportsIndex, - SupportsInt, - TypeAlias, - overload, -) -from typing_extensions import Buffer, TypeVar +from collections.abc import Buffer +from typing import Any, Literal as L, Self, SupportsIndex, SupportsInt, overload +from typing_extensions import TypeVar import numpy as np from numpy import _OrderKACF, bytes_, dtype, int_, ndarray, object_, str_ @@ -83,14 +76,13 @@ __all__ = [ ] _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_CharacterT = TypeVar("_CharacterT", bound=np.character) _CharDTypeT_co = TypeVar("_CharDTypeT_co", bound=dtype[np.character], default=dtype, covariant=True) -_CharArray: TypeAlias = chararray[_AnyShape, dtype[_CharacterT]] +type _CharArray[ScalarT: np.character] = chararray[_AnyShape, dtype[ScalarT]] -_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = _StringDTypeArray | NDArray[np.str_] -_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] +type _StringDTypeArray = np.ndarray[_AnyShape, np.dtypes.StringDType] +type _StringDTypeOrUnicodeArray = _StringDTypeArray | NDArray[np.str_] +type _StringDTypeSupportsArray = _SupportsArray[np.dtypes.StringDType] class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): @overload diff --git a/numpy/_core/einsumfunc.pyi b/numpy/_core/einsumfunc.pyi index 6d34883e6625..3e42ef6dc238 100644 --- a/numpy/_core/einsumfunc.pyi +++ b/numpy/_core/einsumfunc.pyi @@ -1,8 +1,8 @@ from collections.abc import Sequence -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload import numpy as np -from numpy import _OrderKACF, number +from numpy import _OrderKACF from numpy._typing import ( NDArray, _ArrayLikeBool_co, @@ -22,14 +22,9 @@ from numpy._typing import ( __all__ = ["einsum", "einsum_path"] -_ArrayT = TypeVar( - "_ArrayT", - bound=NDArray[np.bool | number], -) - -_OptimizeKind: TypeAlias = bool | Literal["greedy", "optimal"] | Sequence[Any] | None -_CastingSafe: TypeAlias = Literal["no", "equiv", "safe", "same_kind"] -_CastingUnsafe: TypeAlias = Literal["unsafe"] +type _OptimizeKind = bool | Literal["greedy", "optimal"] | Sequence[Any] | None +type _CastingSafe = Literal["no", "equiv", "safe", "same_kind"] +type _CastingUnsafe = Literal["unsafe"] # TODO: Properly handle the `casting`-based combinatorics # TODO: We need to evaluate the content `__subscripts` in order @@ -104,27 +99,27 @@ def einsum( optimize: _OptimizeKind = False, ) -> Any: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co, - out: _ArrayT, + out: OutT, dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = False, -) -> _ArrayT: ... +) -> OutT: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: Any, - out: _ArrayT, + out: OutT, casting: _CastingUnsafe, dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., optimize: _OptimizeKind = False, -) -> _ArrayT: ... +) -> OutT: ... @overload def einsum( @@ -149,27 +144,27 @@ def einsum( optimize: _OptimizeKind = False, ) -> Any: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeObject_co, - out: _ArrayT, + out: OutT, dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = False, -) -> _ArrayT: ... +) -> OutT: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: Any, - out: _ArrayT, + out: OutT, casting: _CastingUnsafe, dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., optimize: _OptimizeKind = False, -) -> _ArrayT: ... +) -> OutT: ... # NOTE: `einsum_call` is a hidden kwarg unavailable for public use. # It is therefore excluded from the signatures below. diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 2a9762240e3d..b929328a9443 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -7,9 +7,7 @@ from typing import ( Never, Protocol, SupportsIndex, - TypeAlias, TypedDict, - TypeVar, Unpack, overload, type_check_only, @@ -17,7 +15,6 @@ from typing import ( import numpy as np from numpy import ( - _AnyShapeT, _CastingKind, _ModeKind, _OrderACF, @@ -28,12 +25,10 @@ from numpy import ( complexfloating, float16, floating, - generic, int64, int_, intp, object_, - timedelta64, uint64, ) from numpy._globals import _NoValueType @@ -57,6 +52,7 @@ from numpy._typing import ( _NestedSequence, _NumberLike_co, _ScalarLike_co, + _Shape, _ShapeLike, ) @@ -107,18 +103,11 @@ __all__ = [ "var", ] -_ScalarT = TypeVar("_ScalarT", bound=generic) -_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) -_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) - @type_check_only -class _SupportsShape(Protocol[_ShapeT_co]): +class _SupportsShape[ShapeT_co: _Shape](Protocol): # NOTE: it matters that `self` is positional only @property - def shape(self, /) -> _ShapeT_co: ... + def shape(self, /) -> ShapeT_co: ... @type_check_only class _UFuncKwargs(TypedDict, total=False): @@ -129,20 +118,23 @@ class _UFuncKwargs(TypedDict, total=False): casting: _CastingKind # a "sequence" that isn't a string, bytes, bytearray, or memoryview -_T = TypeVar("_T") -_PyArray: TypeAlias = list[_T] | tuple[_T, ...] +type _PyArray[_T] = list[_T] | tuple[_T, ...] # `int` also covers `bool` -_PyScalar: TypeAlias = complex | bytes | str +type _PyScalar = complex | bytes | str + +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + +### # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _IntLike_co, axis: None = None, out: None = None, mode: _ModeKind = "raise", -) -> _ScalarT: ... +) -> ScalarT: ... @overload def take( a: ArrayLike, @@ -152,13 +144,13 @@ def take( mode: _ModeKind = "raise", ) -> Any: ... @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, mode: _ModeKind = "raise", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def take( a: ArrayLike, @@ -168,50 +160,50 @@ def take( mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... -@overload -def reshape( # shape: index - a: _ArrayLike[_ScalarT], +@overload # shape: index +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], /, shape: SupportsIndex, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... -@overload -def reshape( # shape: (int, ...) @ _AnyShapeT - a: _ArrayLike[_ScalarT], +) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... +@overload # shape: ~ShapeT +def reshape[ScalarT: np.generic, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], /, - shape: _AnyShapeT, + shape: ShapeT, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... @overload # shape: Sequence[index] -def reshape( - a: _ArrayLike[_ScalarT], +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], /, shape: Sequence[SupportsIndex], order: _OrderACF = "C", *, copy: bool | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload # shape: index def reshape( a: ArrayLike, @@ -221,15 +213,15 @@ def reshape( *, copy: bool | None = None, ) -> np.ndarray[tuple[int], np.dtype]: ... -@overload -def reshape( # shape: (int, ...) @ _AnyShapeT +@overload # shape: ~ShapeT +def reshape[ShapeT: _Shape]( a: ArrayLike, /, - shape: _AnyShapeT, + shape: ShapeT, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[_AnyShapeT, np.dtype]: ... +) -> np.ndarray[ShapeT, np.dtype]: ... @overload # shape: Sequence[index] def reshape( a: ArrayLike, @@ -248,12 +240,12 @@ def choose( mode: _ModeKind = "raise", ) -> Any: ... @overload -def choose( +def choose[ScalarT: np.generic]( a: _ArrayLikeInt_co, - choices: _ArrayLike[_ScalarT], + choices: _ArrayLike[ScalarT], out: None = None, mode: _ModeKind = "raise", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def choose( a: _ArrayLikeInt_co, @@ -262,26 +254,26 @@ def choose( mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload -def choose( +def choose[ArrayT: np.ndarray]( a: _ArrayLikeInt_co, choices: ArrayLike, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... # keep in sync with `ma.core.repeat` @overload -def repeat( - a: _ArrayLike[_ScalarT], +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: None = None, -) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... @overload -def repeat( - a: _ArrayLike[_ScalarT], +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: SupportsIndex, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def repeat( a: ArrayLike, @@ -305,17 +297,17 @@ def put( # keep in sync with `ndarray.swapaxes` and `ma.core.swapaxes` @overload -def swapaxes(a: _ArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _ArrayT: ... +def swapaxes[ArrayT: np.ndarray](a: ArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> ArrayT: ... @overload -def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[_ScalarT]: ... +def swapaxes[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[ScalarT]: ... @overload def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[Any]: ... @overload -def transpose( - a: _ArrayLike[_ScalarT], +def transpose[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axes: _ShapeLike | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def transpose( a: ArrayLike, @@ -323,19 +315,19 @@ def transpose( ) -> NDArray[Any]: ... @overload -def matrix_transpose(x: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def matrix_transpose[ScalarT: np.generic](x: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ... # @overload -def partition( - a: _ArrayLike[_ScalarT], +def partition[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], kth: _ArrayLikeInt, axis: SupportsIndex | None = -1, kind: _PartitionKind = "introselect", order: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def partition( a: _ArrayLike[np.void], @@ -364,14 +356,14 @@ def argpartition( # @overload -def sort( - a: _ArrayLike[_ScalarT], +def sort[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: SupportsIndex | None = -1, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, *, stable: bool | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def sort( a: ArrayLike, @@ -408,21 +400,21 @@ def argmax( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmax( +def argmax[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, axis: SupportsIndex | None, - out: _BoolOrIntArrayT, + out: BoolOrIntArrayT, *, keepdims: bool | _NoValueType = ..., -) -> _BoolOrIntArrayT: ... +) -> BoolOrIntArrayT: ... @overload -def argmax( +def argmax[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, axis: SupportsIndex | None = None, *, - out: _BoolOrIntArrayT, + out: BoolOrIntArrayT, keepdims: bool | _NoValueType = ..., -) -> _BoolOrIntArrayT: ... +) -> BoolOrIntArrayT: ... @overload def argmin( @@ -441,21 +433,21 @@ def argmin( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmin( +def argmin[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, axis: SupportsIndex | None, - out: _BoolOrIntArrayT, + out: BoolOrIntArrayT, *, keepdims: bool | _NoValueType = ..., -) -> _BoolOrIntArrayT: ... +) -> BoolOrIntArrayT: ... @overload -def argmin( +def argmin[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, axis: SupportsIndex | None = None, *, - out: _BoolOrIntArrayT, + out: BoolOrIntArrayT, keepdims: bool | _NoValueType = ..., -) -> _BoolOrIntArrayT: ... +) -> BoolOrIntArrayT: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload @@ -475,43 +467,36 @@ def searchsorted( # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def resize(a: _ArrayLike[_ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> _Array1D[ScalarT]: ... @overload -def resize(a: _ArrayLike[_ScalarT], new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +def resize[ScalarT: np.generic, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], new_shape: ShapeT +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... @overload -def resize(a: _ArrayLike[_ScalarT], new_shape: _ShapeLike) -> NDArray[_ScalarT]: ... +def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: _ShapeLike) -> NDArray[ScalarT]: ... @overload def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype]: ... @overload -def resize(a: ArrayLike, new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype]: ... +def resize[ShapeT: _Shape](a: ArrayLike, new_shape: ShapeT) -> np.ndarray[ShapeT, np.dtype]: ... @overload def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def squeeze( - a: _ScalarT, - axis: _ShapeLike | None = None, -) -> _ScalarT: ... +def squeeze[ScalarT: np.generic](a: ScalarT, axis: _ShapeLike | None = None) -> ScalarT: ... @overload -def squeeze( - a: _ArrayLike[_ScalarT], - axis: _ShapeLike | None = None, -) -> NDArray[_ScalarT]: ... +def squeeze[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload -def squeeze( - a: ArrayLike, - axis: _ShapeLike | None = None, -) -> NDArray[Any]: ... +def squeeze(a: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... # keep in sync with `ma.core.diagonal` @overload -def diagonal( - a: _ArrayLike[_ScalarT], +def diagonal[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, # >= 2D array -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def diagonal( a: ArrayLike, @@ -531,29 +516,27 @@ def trace( out: None = None, ) -> Any: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, # >= 2D array offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, dtype: DTypeLike | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, # >= 2D array offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... - -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + out: ArrayT, +) -> ArrayT: ... @overload -def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _Array1D[_ScalarT]: ... +def ravel[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _Array1D[ScalarT]: ... @overload def ravel(a: bytes | _NestedSequence[bytes], order: _OrderKACF = "C") -> _Array1D[np.bytes_]: ... @overload @@ -575,7 +558,7 @@ def nonzero(a: _ArrayLike[Any]) -> tuple[np.ndarray[tuple[int], np.dtype[intp]], @overload def shape(a: _SupportsShape[Never]) -> _AnyShape: ... @overload -def shape(a: _SupportsShape[_ShapeT]) -> _ShapeT: ... +def shape[ShapeT: _Shape](a: _SupportsShape[ShapeT]) -> ShapeT: ... @overload def shape(a: _PyScalar) -> tuple[()]: ... # `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are @@ -591,12 +574,12 @@ def shape(a: memoryview | bytearray) -> tuple[int]: ... def shape(a: ArrayLike) -> _AnyShape: ... @overload -def compress( +def compress[ScalarT: np.generic]( condition: _ArrayLikeBool_co, # 1D bool array - a: _ArrayLike[_ScalarT], + a: _ArrayLike[ScalarT], axis: SupportsIndex | None = None, out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def compress( condition: _ArrayLikeBool_co, # 1D bool array @@ -605,25 +588,25 @@ def compress( out: None = None, ) -> NDArray[Any]: ... @overload -def compress( +def compress[ArrayT: np.ndarray]( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, axis: SupportsIndex | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def compress( +def compress[ArrayT: np.ndarray]( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, axis: SupportsIndex | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def clip( - a: _ScalarT, +def clip[ScalarT: np.generic]( + a: ScalarT, a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., out: None = None, @@ -632,7 +615,7 @@ def clip( max: ArrayLike | _NoValueType | None = ..., dtype: None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _ScalarT: ... +) -> ScalarT: ... @overload def clip( a: _ScalarLike_co, @@ -646,8 +629,8 @@ def clip( **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... @overload -def clip( - a: _ArrayLike[_ScalarT], +def clip[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., out: None = None, @@ -656,7 +639,7 @@ def clip( max: ArrayLike | _NoValueType | None = ..., dtype: None = None, **kwargs: Unpack[_UFuncKwargs], -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def clip( a: ArrayLike, @@ -670,29 +653,29 @@ def clip( **kwargs: Unpack[_UFuncKwargs], ) -> NDArray[Any]: ... @overload -def clip( +def clip[ArrayT: np.ndarray]( a: ArrayLike, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: _ArrayT, + out: ArrayT, *, min: ArrayLike | _NoValueType | None = ..., max: ArrayLike | _NoValueType | None = ..., dtype: DTypeLike | None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def clip( +def clip[ArrayT: np.ndarray]( a: ArrayLike, a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., *, - out: _ArrayT, + out: ArrayT, min: ArrayLike | _NoValueType | None = ..., max: ArrayLike | _NoValueType | None = ..., dtype: DTypeLike | None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _ArrayT: ... +) -> ArrayT: ... @overload def clip( a: ArrayLike, @@ -707,67 +690,67 @@ def clip( ) -> Any: ... @overload -def sum( - a: _ArrayLike[_ScalarT], +def sum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: None = None, dtype: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def sum( - a: _ArrayLike[_ScalarT], +def sum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: None = None, dtype: None = None, out: None = None, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload -def sum( +def sum[ScalarT: np.generic]( a: ArrayLike, axis: None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def sum( +def sum[ScalarT: np.generic]( a: ArrayLike, axis: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def sum( +def sum[ScalarT: np.generic]( a: ArrayLike, axis: _ShapeLike | None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload -def sum( +def sum[ScalarT: np.generic]( a: ArrayLike, axis: _ShapeLike | None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload def sum( a: ArrayLike, @@ -779,26 +762,26 @@ def sum( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def sum( +def sum[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def sum( +def sum[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # keep in sync with `any` @overload @@ -820,23 +803,23 @@ def all( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Incomplete: ... @overload -def all( +def all[ArrayT: np.ndarray]( a: ArrayLike | None, axis: int | tuple[int, ...] | None, - out: _ArrayT, + out: ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def all( +def all[ArrayT: np.ndarray]( a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # keep in sync with `all` @overload @@ -858,32 +841,32 @@ def any( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Incomplete: ... @overload -def any( +def any[ArrayT: np.ndarray]( a: ArrayLike | None, axis: int | tuple[int, ...] | None, - out: _ArrayT, + out: ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def any( +def any[ArrayT: np.ndarray]( a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # @overload -def cumsum( - a: _ArrayLike[_ScalarT], +def cumsum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: SupportsIndex | None = None, dtype: None = None, out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumsum( a: ArrayLike, @@ -892,20 +875,20 @@ def cumsum( out: None = None, ) -> NDArray[Any]: ... @overload -def cumsum( +def cumsum[ScalarT: np.generic]( a: ArrayLike, axis: SupportsIndex | None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def cumsum( +def cumsum[ScalarT: np.generic]( a: ArrayLike, axis: SupportsIndex | None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumsum( a: ArrayLike, @@ -914,31 +897,31 @@ def cumsum( out: None = None, ) -> NDArray[Any]: ... @overload -def cumsum( +def cumsum[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def cumsum( +def cumsum[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def cumulative_sum( - x: _ArrayLike[_ScalarT], +def cumulative_sum[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], /, *, axis: SupportsIndex | None = None, dtype: None = None, out: None = None, include_initial: bool = False, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumulative_sum( x: ArrayLike, @@ -950,15 +933,15 @@ def cumulative_sum( include_initial: bool = False, ) -> NDArray[Any]: ... @overload -def cumulative_sum( +def cumulative_sum[ScalarT: np.generic]( x: ArrayLike, /, *, axis: SupportsIndex | None = None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, include_initial: bool = False, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumulative_sum( x: ArrayLike, @@ -970,23 +953,23 @@ def cumulative_sum( include_initial: bool = False, ) -> NDArray[Any]: ... @overload -def cumulative_sum( +def cumulative_sum[ArrayT: np.ndarray]( x: ArrayLike, /, *, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, - out: _ArrayT, + out: ArrayT, include_initial: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def ptp( - a: _ArrayLike[_ScalarT], +def ptp[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def ptp( a: ArrayLike, @@ -995,30 +978,30 @@ def ptp( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def ptp( +def ptp[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def ptp( +def ptp[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def amax( - a: _ArrayLike[_ScalarT], +def amax[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def amax( a: ArrayLike, @@ -1029,34 +1012,34 @@ def amax( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def amax( +def amax[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def amax( +def amax[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def amin( - a: _ArrayLike[_ScalarT], +def amin[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def amin( a: ArrayLike, @@ -1067,24 +1050,24 @@ def amin( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def amin( +def amin[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def amin( +def amin[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # TODO: `np.prod()``: For object arrays `initial` does not necessarily # have to be a numerical scalar. @@ -1154,26 +1137,26 @@ def prod( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def prod( +def prod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def prod( +def prod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1185,26 +1168,26 @@ def prod( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def prod( +def prod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def prod( +def prod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload @@ -1250,20 +1233,20 @@ def cumprod( out: None = None, ) -> NDArray[object_]: ... @overload -def cumprod( +def cumprod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def cumprod( +def cumprod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1272,20 +1255,20 @@ def cumprod( out: None = None, ) -> NDArray[Any]: ... @overload -def cumprod( +def cumprod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None, dtype: DTypeLike | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def cumprod( +def cumprod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload @@ -1349,15 +1332,15 @@ def cumulative_prod( include_initial: bool = False, ) -> NDArray[object_]: ... @overload -def cumulative_prod( +def cumulative_prod[ScalarT: np.generic]( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, axis: SupportsIndex | None = None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, include_initial: bool = False, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1369,15 +1352,15 @@ def cumulative_prod( include_initial: bool = False, ) -> NDArray[Any]: ... @overload -def cumulative_prod( +def cumulative_prod[ArrayT: np.ndarray]( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, - out: _ArrayT, + out: ArrayT, include_initial: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... def ndim(a: ArrayLike) -> int: ... @@ -1391,11 +1374,11 @@ def around( out: None = None, ) -> float16: ... @overload -def around( - a: _NumberOrObjectT, +def around[NumberOrObjectT: np.number | np.object_]( + a: NumberOrObjectT, decimals: SupportsIndex = 0, out: None = None, -) -> _NumberOrObjectT: ... +) -> NumberOrObjectT: ... @overload def around( a: _ComplexLike_co | object_, @@ -1409,11 +1392,11 @@ def around( out: None = None, ) -> NDArray[float16]: ... @overload -def around( - a: _ArrayLike[_NumberOrObjectT], +def around[NumberOrObjectT: np.number | np.object_]( + a: _ArrayLike[NumberOrObjectT], decimals: SupportsIndex = 0, out: None = None, -) -> NDArray[_NumberOrObjectT]: ... +) -> NDArray[NumberOrObjectT]: ... @overload def around( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1421,18 +1404,18 @@ def around( out: None = None, ) -> NDArray[Any]: ... @overload -def around( +def around[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, decimals: SupportsIndex, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def around( +def around[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, decimals: SupportsIndex = 0, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload @@ -1464,77 +1447,77 @@ def mean( keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> timedelta64: ... +) -> np.timedelta64: ... @overload -def mean( +def mean[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def mean( +def mean[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None, keepdims: Literal[True, 1], *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, *, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1573,10 +1556,10 @@ def std( correction: float | _NoValueType = ..., ) -> Any: ... @overload -def std( +def std[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., @@ -1584,20 +1567,20 @@ def std( where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def std( +def std[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1612,31 +1595,31 @@ def std( correction: float | _NoValueType = ..., ) -> Any: ... @overload -def std( +def std[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def std( +def std[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload def var( @@ -1665,10 +1648,10 @@ def var( correction: float | _NoValueType = ..., ) -> Any: ... @overload -def var( +def var[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., @@ -1676,20 +1659,20 @@ def var( where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def var( +def var[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1704,31 +1687,31 @@ def var( correction: float | _NoValueType = ..., ) -> Any: ... @overload -def var( +def var[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def var( +def var[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... max = amax min = amin diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 19e1238c4e15..060a44d416ea 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,5 +1,5 @@ from _typeshed import Incomplete -from typing import Literal as L, SupportsIndex, TypeAlias, TypeVar, overload +from typing import Literal as L, SupportsIndex, overload import numpy as np from numpy._typing import ( @@ -13,9 +13,9 @@ from numpy._typing._array_like import _DualArrayLike __all__ = ["geomspace", "linspace", "logspace"] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) +type _ToArrayFloat64 = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] -_ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] +### @overload def linspace( @@ -54,29 +54,29 @@ def linspace( device: L["cpu"] | None = None, ) -> NDArray[np.complexfloating]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex, endpoint: bool, retstep: L[False], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, retstep: L[False] = False, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -126,17 +126,17 @@ def linspace( device: L["cpu"] | None = None, ) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, *, retstep: L[True], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[_ScalarT], _ScalarT]: ... +) -> tuple[NDArray[ScalarT], ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -181,26 +181,26 @@ def logspace( axis: SupportsIndex = 0, ) -> NDArray[np.complexfloating]: ... @overload -def logspace( +def logspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex, endpoint: bool, base: _ArrayLikeComplex_co, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def logspace( +def logspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, base: _ArrayLikeComplex_co = 10.0, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -240,24 +240,24 @@ def geomspace( axis: SupportsIndex = 0, ) -> NDArray[np.complexfloating]: ... @overload -def geomspace( +def geomspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex, endpoint: bool, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def geomspace( +def geomspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def geomspace( start: _ArrayLikeComplex_co, diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 812e0b562b9d..e516c3cdab72 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1,7 +1,7 @@ # TODO: Sort out any and all missing functions in this namespace import datetime as dt from _typeshed import Incomplete, StrOrBytesPath, SupportsLenAndGetItem -from collections.abc import Callable, Iterable, Sequence +from collections.abc import Buffer, Callable, Iterable, Sequence from typing import ( Any, ClassVar, @@ -9,8 +9,6 @@ from typing import ( Literal as L, Protocol, SupportsIndex, - TypeAlias, - TypeVar, final, overload, type_check_only, @@ -19,7 +17,6 @@ from typing_extensions import CapsuleType import numpy as np from numpy import ( # type: ignore[attr-defined] # Python >=3.12 - _AnyShapeT, _CastingKind, _CopyMode, _ModeKind, @@ -27,7 +24,6 @@ from numpy import ( # type: ignore[attr-defined] # Python >=3.12 _NDIterFlagsOp, _OrderCF, _OrderKACF, - _SupportsBuffer, _SupportsFileMethods, broadcast, busdaycalendar, @@ -41,7 +37,6 @@ from numpy import ( # type: ignore[attr-defined] # Python >=3.12 float64, floating, from_dlpack, - generic, int_, interp, intp, @@ -183,22 +178,11 @@ __all__ = [ "zeros", ] -_ScalarT = TypeVar("_ScalarT", bound=generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ArrayT = TypeVar("_ArrayT", bound=ndarray) -_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, covariant=True) -_ShapeT = TypeVar("_ShapeT", bound=_Shape) -# TODO: fix the names of these typevars -_ReturnType = TypeVar("_ReturnType") -_IDType = TypeVar("_IDType") -_Nin = TypeVar("_Nin", bound=int) -_Nout = TypeVar("_Nout", bound=int) - -_Array: TypeAlias = ndarray[_ShapeT, dtype[_ScalarT]] -_Array1D: TypeAlias = ndarray[tuple[int], dtype[_ScalarT]] +type _Array[ShapeT: _Shape, ScalarT: np.generic] = ndarray[ShapeT, dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = ndarray[tuple[int], dtype[ScalarT]] # Valid time units -_UnitKind: TypeAlias = L[ +type _UnitKind = L[ "Y", "M", "D", @@ -212,7 +196,7 @@ _UnitKind: TypeAlias = L[ "fs", "as", ] -_RollKind: TypeAlias = L[ # `raise` is deliberately excluded +type _RollKind = L[ # `raise` is deliberately excluded "nat", "forward", "following", @@ -222,9 +206,16 @@ _RollKind: TypeAlias = L[ # `raise` is deliberately excluded "modifiedpreceding", ] +type _ArangeScalar = np.integer | np.floating | np.datetime64 | np.timedelta64 + +# The datetime functions perform unsafe casts to `datetime64[D]`, +# so a lot of different argument types are allowed here +type _ToDates = dt.date | _NestedSequence[dt.date] +type _ToDeltas = dt.timedelta | _NestedSequence[dt.timedelta] + @type_check_only -class _SupportsArray(Protocol[_ArrayT_co]): - def __array__(self, /) -> _ArrayT_co: ... +class _SupportsArray[ArrayT_co: np.ndarray](Protocol): + def __array__(self, /) -> ArrayT_co: ... @type_check_only class _ConstructorEmpty(Protocol): @@ -241,27 +232,27 @@ class _ConstructorEmpty(Protocol): like: _SupportsArrayFunc | None = None, ) -> _Array1D[float64]: ... @overload - def __call__( + def __call__[DTypeT: np.dtype]( self, /, shape: SupportsIndex, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> ndarray[tuple[int], _DTypeT]: ... + ) -> ndarray[tuple[int], DTypeT]: ... @overload - def __call__( + def __call__[ScalarT: np.generic]( self, /, shape: SupportsIndex, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> _Array1D[_ScalarT]: ... + ) -> _Array1D[ScalarT]: ... @overload def __call__( self, @@ -276,49 +267,49 @@ class _ConstructorEmpty(Protocol): # known shape @overload - def __call__( + def __call__[ShapeT: _Shape]( self, /, - shape: _AnyShapeT, + shape: ShapeT, dtype: None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> _Array[_AnyShapeT, float64]: ... + ) -> _Array[ShapeT, float64]: ... @overload - def __call__( + def __call__[ShapeT: _Shape, DTypeT: np.dtype]( self, /, - shape: _AnyShapeT, - dtype: _DTypeT | _SupportsDType[_DTypeT], + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> ndarray[_AnyShapeT, _DTypeT]: ... + ) -> ndarray[ShapeT, DTypeT]: ... @overload - def __call__( + def __call__[ShapeT: _Shape, ScalarT: np.generic]( self, /, - shape: _AnyShapeT, - dtype: type[_ScalarT], + shape: ShapeT, + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> _Array[_AnyShapeT, _ScalarT]: ... + ) -> _Array[ShapeT, ScalarT]: ... @overload - def __call__( + def __call__[ShapeT: _Shape]( self, /, - shape: _AnyShapeT, + shape: ShapeT, dtype: DTypeLike | None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> _Array[_AnyShapeT, Incomplete]: ... + ) -> _Array[ShapeT, Incomplete]: ... # unknown shape @overload @@ -332,25 +323,25 @@ class _ConstructorEmpty(Protocol): like: _SupportsArrayFunc | None = None, ) -> NDArray[float64]: ... @overload - def __call__( + def __call__[DTypeT: np.dtype]( self, /, shape: _ShapeLike, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> ndarray[_AnyShape, _DTypeT]: ... + ) -> ndarray[_AnyShape, DTypeT]: ... @overload - def __call__( + def __call__[ScalarT: np.generic]( self, /, shape: _ShapeLike, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> NDArray[_ScalarT]: ... + ) -> NDArray[ScalarT]: ... @overload def __call__( self, @@ -391,7 +382,7 @@ set_datetimeparse_function: Final[Callable[..., object]] = ... def get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ... def get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ... def format_longfloat(x: np.longdouble, precision: int) -> str: ... -def scalar(dtype: _DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], _DTypeT]: ... +def scalar[DTypeT: np.dtype](dtype: DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], DTypeT]: ... def set_typeDict(dict_: dict[str, np.dtype], /) -> None: ... typeinfo: Final[dict[str, np.dtype[np.generic]]] = ... @@ -410,8 +401,8 @@ zeros: Final[_ConstructorEmpty] = ... empty: Final[_ConstructorEmpty] = ... @overload -def empty_like( - prototype: _ArrayT, +def empty_like[ArrayT: np.ndarray]( + prototype: ArrayT, /, dtype: None = None, order: _OrderKACF = "K", @@ -419,10 +410,10 @@ def empty_like( shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def empty_like( - prototype: _ArrayLike[_ScalarT], +def empty_like[ScalarT: np.generic]( + prototype: _ArrayLike[ScalarT], /, dtype: None = None, order: _OrderKACF = "K", @@ -430,18 +421,18 @@ def empty_like( shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def empty_like( +def empty_like[ScalarT: np.generic]( prototype: Incomplete, /, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def empty_like( prototype: Incomplete, @@ -455,8 +446,8 @@ def empty_like( ) -> NDArray[Incomplete]: ... @overload -def array( - object: _ArrayT, +def array[ArrayT: np.ndarray]( + object: ArrayT, dtype: None = None, *, copy: bool | _CopyMode | None = True, @@ -465,10 +456,10 @@ def array( ndmin: int = 0, ndmax: int = 0, like: _SupportsArrayFunc | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def array( - object: _SupportsArray[_ArrayT], +def array[ArrayT: np.ndarray]( + object: _SupportsArray[ArrayT], dtype: None = None, *, copy: bool | _CopyMode | None = True, @@ -477,10 +468,10 @@ def array( ndmin: L[0] = 0, ndmax: int = 0, like: _SupportsArrayFunc | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def array( - object: _ArrayLike[_ScalarT], +def array[ScalarT: np.generic]( + object: _ArrayLike[ScalarT], dtype: None = None, *, copy: bool | _CopyMode | None = True, @@ -489,11 +480,11 @@ def array( ndmin: int = 0, ndmax: int = 0, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def array( +def array[ScalarT: np.generic]( object: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, copy: bool | _CopyMode | None = True, order: _OrderKACF = "K", @@ -501,7 +492,7 @@ def array( ndmin: int = 0, ndmax: int = 0, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def array( object: Any, @@ -542,25 +533,25 @@ def normalize_axis_index(axis: int, ndim: int, msg_prefix: str | None = None) -> # NOTE: Allow any sequence of array-like objects @overload -def concatenate( - arrays: _ArrayLike[_ScalarT], +def concatenate[ScalarT: np.generic]( + arrays: _ArrayLike[ScalarT], /, axis: SupportsIndex | None = 0, out: None = None, *, dtype: None = None, casting: _CastingKind | None = "same_kind", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def concatenate( +def concatenate[ScalarT: np.generic]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = 0, out: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind | None = "same_kind", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], @@ -572,25 +563,25 @@ def concatenate( casting: _CastingKind | None = "same_kind", ) -> NDArray[Incomplete]: ... @overload -def concatenate( +def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = 0, *, - out: _ArrayT, + out: OutT, dtype: DTypeLike | None = None, casting: _CastingKind | None = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... @overload -def concatenate( +def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None, - out: _ArrayT, + out: OutT, *, dtype: DTypeLike | None = None, casting: _CastingKind | None = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... def inner(a: ArrayLike, b: ArrayLike, /) -> Incomplete: ... @@ -609,7 +600,7 @@ def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... @overload def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... @overload -def dot(a: ArrayLike, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... +def dot[OutT: np.ndarray](a: ArrayLike, b: ArrayLike, out: OutT) -> OutT: ... @overload def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... @@ -633,7 +624,7 @@ def bincount(x: ArrayLike, /, weights: ArrayLike | None = None, minlength: Suppo def copyto(dst: ndarray, src: ArrayLike, casting: _CastingKind = "same_kind", where: object = True) -> None: ... def putmask(a: ndarray, /, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... -_BitOrder: TypeAlias = L["big", "little"] +type _BitOrder = L["big", "little"] @overload def packbits(a: _ArrayLikeInt_co, /, axis: None = None, bitorder: _BitOrder = "big") -> ndarray[tuple[int], dtype[uint8]]: ... @@ -657,32 +648,32 @@ def unpackbits( bitorder: _BitOrder = "big", ) -> NDArray[uint8]: ... -_MaxWork: TypeAlias = L[-1, 0] +type _MaxWork = L[-1, 0] # any two python objects will be accepted, not just `ndarray`s def shares_memory(a: object, b: object, /, max_work: _MaxWork = -1) -> bool: ... def may_share_memory(a: object, b: object, /, max_work: _MaxWork = 0) -> bool: ... @overload -def asarray( - a: _ArrayLike[_ScalarT], +def asarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def asarray( +def asarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def asarray( a: Any, @@ -695,35 +686,35 @@ def asarray( ) -> NDArray[Any]: ... @overload -def asanyarray( - a: _ArrayT, # Preserve subclass-information +def asanyarray[ArrayT: np.ndarray]( + a: ArrayT, # Preserve subclass-information dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def asanyarray( - a: _ArrayLike[_ScalarT], +def asanyarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def asanyarray( +def asanyarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def asanyarray( a: Any, @@ -736,19 +727,19 @@ def asanyarray( ) -> NDArray[Any]: ... @overload -def ascontiguousarray( - a: _ArrayLike[_ScalarT], +def ascontiguousarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def ascontiguousarray( +def ascontiguousarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def ascontiguousarray( a: Any, @@ -758,19 +749,19 @@ def ascontiguousarray( ) -> NDArray[Any]: ... @overload -def asfortranarray( - a: _ArrayLike[_ScalarT], +def asfortranarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def asfortranarray( +def asfortranarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def asfortranarray( a: Any, @@ -792,14 +783,14 @@ def fromstring( like: _SupportsArrayFunc | None = ..., ) -> NDArray[float64]: ... @overload -def fromstring( +def fromstring[ScalarT: np.generic]( string: str | bytes, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., *, sep: str, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def fromstring( string: str | bytes, @@ -811,69 +802,69 @@ def fromstring( ) -> NDArray[Any]: ... @overload -def frompyfunc( - func: Callable[[Any], _ReturnType], /, +def frompyfunc[ReturnT]( + func: Callable[[Any], ReturnT], /, nin: L[1], nout: L[1], *, identity: None = None, -) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ... +) -> _PyFunc_Nin1_Nout1[ReturnT, None]: ... @overload -def frompyfunc( - func: Callable[[Any], _ReturnType], /, +def frompyfunc[ReturnT, IdentityT]( + func: Callable[[Any], ReturnT], /, nin: L[1], nout: L[1], *, - identity: _IDType, -) -> _PyFunc_Nin1_Nout1[_ReturnType, _IDType]: ... + identity: IdentityT, +) -> _PyFunc_Nin1_Nout1[ReturnT, IdentityT]: ... @overload -def frompyfunc( - func: Callable[[Any, Any], _ReturnType], /, +def frompyfunc[ReturnT]( + func: Callable[[Any, Any], ReturnT], /, nin: L[2], nout: L[1], *, identity: None = None, -) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ... +) -> _PyFunc_Nin2_Nout1[ReturnT, None]: ... @overload -def frompyfunc( - func: Callable[[Any, Any], _ReturnType], /, +def frompyfunc[ReturnT, IdentityT]( + func: Callable[[Any, Any], ReturnT], /, nin: L[2], nout: L[1], *, - identity: _IDType, -) -> _PyFunc_Nin2_Nout1[_ReturnType, _IDType]: ... + identity: IdentityT, +) -> _PyFunc_Nin2_Nout1[ReturnT, IdentityT]: ... @overload -def frompyfunc( - func: Callable[..., _ReturnType], /, - nin: _Nin, +def frompyfunc[ReturnT, NInT: int]( + func: Callable[..., ReturnT], /, + nin: NInT, nout: L[1], *, identity: None = None, -) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ... +) -> _PyFunc_Nin3P_Nout1[ReturnT, None, NInT]: ... @overload -def frompyfunc( - func: Callable[..., _ReturnType], /, - nin: _Nin, +def frompyfunc[ReturnT, NInT: int, IdentityT]( + func: Callable[..., ReturnT], /, + nin: NInT, nout: L[1], *, - identity: _IDType, -) -> _PyFunc_Nin3P_Nout1[_ReturnType, _IDType, _Nin]: ... + identity: IdentityT, +) -> _PyFunc_Nin3P_Nout1[ReturnT, IdentityT, NInT]: ... @overload -def frompyfunc( - func: Callable[..., _2PTuple[_ReturnType]], /, - nin: _Nin, - nout: _Nout, +def frompyfunc[ReturnT, NInT: int, NOutT: int]( + func: Callable[..., _2PTuple[ReturnT]], /, + nin: NInT, + nout: NOutT, *, identity: None = None, -) -> _PyFunc_Nin1P_Nout2P[_ReturnType, None, _Nin, _Nout]: ... +) -> _PyFunc_Nin1P_Nout2P[ReturnT, None, NInT, NOutT]: ... @overload -def frompyfunc( - func: Callable[..., _2PTuple[_ReturnType]], /, - nin: _Nin, - nout: _Nout, +def frompyfunc[ReturnT, NInT: int, NOutT: int, IdentityT]( + func: Callable[..., _2PTuple[ReturnT]], /, + nin: NInT, + nout: NOutT, *, - identity: _IDType, -) -> _PyFunc_Nin1P_Nout2P[_ReturnType, _IDType, _Nin, _Nout]: ... + identity: IdentityT, +) -> _PyFunc_Nin1P_Nout2P[ReturnT, IdentityT, NInT, NOutT]: ... @overload def frompyfunc( func: Callable[..., Any], /, @@ -894,15 +885,15 @@ def fromfile( like: _SupportsArrayFunc | None = ..., ) -> NDArray[float64]: ... @overload -def fromfile( +def fromfile[ScalarT: np.generic]( file: StrOrBytesPath | _SupportsFileMethods, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def fromfile( file: StrOrBytesPath | _SupportsFileMethods, @@ -915,13 +906,13 @@ def fromfile( ) -> NDArray[Any]: ... @overload -def fromiter( +def fromiter[ScalarT: np.generic]( iter: Iterable[Any], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def fromiter( iter: Iterable[Any], @@ -933,7 +924,7 @@ def fromiter( @overload def frombuffer( - buffer: _SupportsBuffer, + buffer: Buffer, dtype: None = None, count: SupportsIndex = ..., offset: SupportsIndex = ..., @@ -941,17 +932,17 @@ def frombuffer( like: _SupportsArrayFunc | None = ..., ) -> NDArray[float64]: ... @overload -def frombuffer( - buffer: _SupportsBuffer, - dtype: _DTypeLike[_ScalarT], +def frombuffer[ScalarT: np.generic]( + buffer: Buffer, + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., offset: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def frombuffer( - buffer: _SupportsBuffer, + buffer: Buffer, dtype: DTypeLike | None = ..., count: SupportsIndex = ..., offset: SupportsIndex = ..., @@ -959,22 +950,19 @@ def frombuffer( like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... -_ArangeScalar: TypeAlias = np.integer | np.floating | np.datetime64 | np.timedelta64 -_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) - # keep in sync with ma.core.arange # NOTE: The `float64 | Any` return types needed to avoid incompatible overlapping overloads @overload # dtype= -def arange( +def arange[ScalarT: _ArangeScalar]( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, step: _ArangeScalar | float | None = 1, *, - dtype: _DTypeLike[_ArangeScalarT], + dtype: _DTypeLike[ScalarT], device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array1D[_ArangeScalarT]: ... +) -> _Array1D[ScalarT]: ... @overload # (int-like, int-like?, int-like?) def arange( start_or_stop: _IntLike_co, @@ -1056,12 +1044,6 @@ def arange( # def datetime_data(dtype: str | _DTypeLike[datetime64 | timedelta64], /) -> tuple[str, int]: ... -# The datetime functions perform unsafe casts to `datetime64[D]`, -# so a lot of different argument types are allowed here - -_ToDates: TypeAlias = dt.date | _NestedSequence[dt.date] -_ToDeltas: TypeAlias = dt.timedelta | _NestedSequence[dt.timedelta] - @overload def busday_count( begindates: _ScalarLike_co | dt.date, @@ -1081,24 +1063,24 @@ def busday_count( out: None = None, ) -> NDArray[int_]: ... @overload -def busday_count( +def busday_count[OutT: np.ndarray]( begindates: ArrayLike | _ToDates, enddates: ArrayLike | _ToDates, weekmask: ArrayLike = "1111100", holidays: ArrayLike | _ToDates = (), busdaycal: busdaycalendar | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload -def busday_count( +def busday_count[OutT: np.ndarray]( begindates: ArrayLike | _ToDates, enddates: ArrayLike | _ToDates, weekmask: ArrayLike, holidays: ArrayLike | _ToDates, busdaycal: busdaycalendar | None, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... # `roll="raise"` is (more or less?) equivalent to `casting="safe"` @overload @@ -1122,7 +1104,7 @@ def busday_offset( out: None = None, ) -> NDArray[datetime64]: ... @overload -def busday_offset( +def busday_offset[OutT: np.ndarray]( dates: _ArrayLike[datetime64] | _ToDates, offsets: _ArrayLikeTD64_co | _ToDeltas, roll: L["raise"] = "raise", @@ -1130,18 +1112,18 @@ def busday_offset( holidays: ArrayLike | _ToDates | None = None, busdaycal: busdaycalendar | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload -def busday_offset( +def busday_offset[OutT: np.ndarray]( dates: _ArrayLike[datetime64] | _ToDates, offsets: _ArrayLikeTD64_co | _ToDeltas, roll: L["raise"], weekmask: ArrayLike, holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload def busday_offset( dates: _ScalarLike_co | dt.date, @@ -1163,7 +1145,7 @@ def busday_offset( out: None = None, ) -> NDArray[datetime64]: ... @overload -def busday_offset( +def busday_offset[OutT: np.ndarray]( dates: ArrayLike | _ToDates, offsets: ArrayLike | _ToDeltas, roll: _RollKind, @@ -1171,18 +1153,18 @@ def busday_offset( holidays: ArrayLike | _ToDates | None = None, busdaycal: busdaycalendar | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload -def busday_offset( +def busday_offset[OutT: np.ndarray]( dates: ArrayLike | _ToDates, offsets: ArrayLike | _ToDeltas, roll: _RollKind, weekmask: ArrayLike, holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload def is_busday( @@ -1201,24 +1183,24 @@ def is_busday( out: None = None, ) -> NDArray[np.bool]: ... @overload -def is_busday( +def is_busday[OutT: np.ndarray]( dates: ArrayLike | _ToDates, weekmask: ArrayLike = "1111100", holidays: ArrayLike | _ToDates | None = None, busdaycal: busdaycalendar | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload -def is_busday( +def is_busday[OutT: np.ndarray]( dates: ArrayLike | _ToDates, weekmask: ArrayLike, holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... -_TimezoneContext: TypeAlias = L["naive", "UTC", "local"] | dt.tzinfo +type _TimezoneContext = L["naive", "UTC", "local"] | dt.tzinfo @overload def datetime_as_string( @@ -1252,7 +1234,7 @@ def compare_chararrays( def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ... -_GetItemKeys: TypeAlias = L[ +type _GetItemKeys = L[ "C", "CONTIGUOUS", "C_CONTIGUOUS", "F", "FORTRAN", "F_CONTIGUOUS", "W", "WRITEABLE", @@ -1265,7 +1247,7 @@ _GetItemKeys: TypeAlias = L[ "FNC", "FORC", ] -_SetItemKeys: TypeAlias = L[ +type _SetItemKeys = L[ "A", "ALIGNED", "W", "WRITEABLE", "X", "WRITEBACKIFCOPY", diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 4ad2881cf17e..f7399ef44856 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -7,7 +7,6 @@ from typing import ( Literal as L, SupportsAbs, SupportsIndex, - TypeAlias, TypeGuard, TypeVar, overload, @@ -631,23 +630,6 @@ __all__ = [ "zeros_like", ] -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=generic) -_NumberObjectT = TypeVar("_NumberObjectT", bound=number | object_) -_NumericScalarT = TypeVar("_NumericScalarT", bound=number | timedelta64 | object_) -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) -_ShapeT = TypeVar("_ShapeT", bound=_Shape) - -_AnyShapeT = TypeVar( - "_AnyShapeT", - tuple[()], - tuple[int], - tuple[int, int], - tuple[int, int, int], - tuple[int, int, int, int], - tuple[int, ...], -) _AnyNumericScalarT = TypeVar( "_AnyNumericScalarT", np.int8, np.int16, np.int32, np.int64, @@ -658,63 +640,63 @@ _AnyNumericScalarT = TypeVar( np.object_, ) -_CorrelateMode: TypeAlias = L["valid", "same", "full"] +type _CorrelateMode = L["valid", "same", "full"] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] -_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[_ScalarT]] -_Array4D: TypeAlias = np.ndarray[tuple[int, int, int, int], np.dtype[_ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] +type _Array4D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int, int], np.dtype[ScalarT]] -_Int_co: TypeAlias = np.integer | np.bool -_Float_co: TypeAlias = np.floating | _Int_co -_Number_co: TypeAlias = np.number | np.bool -_TD64_co: TypeAlias = np.timedelta64 | _Int_co +type _Int_co = np.integer | np.bool +type _Float_co = np.floating | _Int_co +type _Number_co = np.number | np.bool +type _TD64_co = np.timedelta64 | _Int_co -_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] -_ArrayLike1DBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[py_bool | np.bool] -_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] -_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] -_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] -_ArrayLike1DTD64_co: TypeAlias = _ArrayLike1D[_TD64_co] -_ArrayLike1DObject_co: TypeAlias = _ArrayLike1D[np.object_] +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] +type _ArrayLike1DBool_co = _SupportsArray[np.dtype[np.bool]] | Sequence[py_bool | np.bool] +type _ArrayLike1DInt_co = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +type _ArrayLike1DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +type _ArrayLike1DNumber_co = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +type _ArrayLike1DTD64_co = _ArrayLike1D[_TD64_co] +type _ArrayLike1DObject_co = _ArrayLike1D[np.object_] -_DTypeLikeInt: TypeAlias = type[int] | _IntCodes -_DTypeLikeFloat64: TypeAlias = type[float] | _Float64Codes | _DoubleCodes -_DTypeLikeComplex128: TypeAlias = type[complex] | _Complex128Codes | _CDoubleCodes +type _DTypeLikeInt = type[int] | _IntCodes +type _DTypeLikeFloat64 = type[float] | _Float64Codes | _DoubleCodes +type _DTypeLikeComplex128 = type[complex] | _Complex128Codes | _CDoubleCodes ### # keep in sync with `ones_like` @overload -def zeros_like( - a: _ArrayT, +def zeros_like[ArrayT: np.ndarray]( + a: ArrayT, dtype: None = None, order: _OrderKACF = "K", subok: L[True] = True, shape: None = None, *, device: L["cpu"] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def zeros_like( - a: _ArrayLike[_ScalarT], +def zeros_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def zeros_like( +def zeros_like[ScalarT: np.generic]( a: object, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def zeros_like( a: object, @@ -730,35 +712,35 @@ ones: Final[_ConstructorEmpty] # keep in sync with `zeros_like` @overload -def ones_like( - a: _ArrayT, +def ones_like[ArrayT: np.ndarray]( + a: ArrayT, dtype: None = None, order: _OrderKACF = "K", subok: L[True] = True, shape: None = None, *, device: L["cpu"] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def ones_like( - a: _ArrayLike[_ScalarT], +def ones_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def ones_like( +def ones_like[ScalarT: np.generic]( a: object, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def ones_like( a: object, @@ -773,35 +755,35 @@ def ones_like( # TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview # 1-D shape @overload -def full( +def full[ScalarT: np.generic]( shape: SupportsIndex, - fill_value: _ScalarT, + fill_value: ScalarT, dtype: None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[tuple[int], _ScalarT]: ... +) -> _Array[tuple[int], ScalarT]: ... @overload -def full( +def full[DTypeT: np.dtype]( shape: SupportsIndex, fill_value: Any, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> np.ndarray[tuple[int], _DTypeT]: ... +) -> np.ndarray[tuple[int], DTypeT]: ... @overload -def full( +def full[ScalarT: np.generic]( shape: SupportsIndex, fill_value: Any, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[tuple[int], _ScalarT]: ... +) -> _Array[tuple[int], ScalarT]: ... @overload def full( shape: SupportsIndex, @@ -814,76 +796,76 @@ def full( ) -> _Array[tuple[int], Any]: ... # known shape @overload -def full( - shape: _AnyShapeT, - fill_value: _ScalarT, +def full[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + fill_value: ScalarT, dtype: None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[_AnyShapeT, _ScalarT]: ... +) -> _Array[ShapeT, ScalarT]: ... @overload -def full( - shape: _AnyShapeT, +def full[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, fill_value: Any, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> np.ndarray[_AnyShapeT, _DTypeT]: ... +) -> np.ndarray[ShapeT, DTypeT]: ... @overload -def full( - shape: _AnyShapeT, +def full[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, fill_value: Any, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[_AnyShapeT, _ScalarT]: ... +) -> _Array[ShapeT, ScalarT]: ... @overload -def full( - shape: _AnyShapeT, +def full[ShapeT: _Shape]( + shape: ShapeT, fill_value: Any, dtype: DTypeLike | None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[_AnyShapeT, Any]: ... +) -> _Array[ShapeT, Any]: ... # unknown shape @overload -def full( +def full[ScalarT: np.generic]( shape: _ShapeLike, - fill_value: _ScalarT, + fill_value: ScalarT, dtype: None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def full( +def full[DTypeT: np.dtype]( shape: _ShapeLike, fill_value: Any, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> np.ndarray[Any, _DTypeT]: ... +) -> np.ndarray[Any, DTypeT]: ... @overload -def full( +def full[ScalarT: np.generic]( shape: _ShapeLike, fill_value: Any, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def full( shape: _ShapeLike, @@ -896,8 +878,8 @@ def full( ) -> NDArray[Any]: ... @overload -def full_like( - a: _ArrayT, +def full_like[ArrayT: np.ndarray]( + a: ArrayT, fill_value: object, dtype: None = None, order: _OrderKACF = "K", @@ -905,10 +887,10 @@ def full_like( shape: None = None, *, device: L["cpu"] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def full_like( - a: _ArrayLike[_ScalarT], +def full_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], fill_value: object, dtype: None = None, order: _OrderKACF = "K", @@ -916,18 +898,18 @@ def full_like( shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def full_like( +def full_like[ScalarT: np.generic]( a: object, fill_value: object, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def full_like( a: object, @@ -1015,7 +997,7 @@ def outer(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, out: None = None) -> @overload def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, out: None = None) -> _Array2D[np.timedelta64 | Any]: ... @overload -def outer(a: _ArrayLikeNumber_co | _ArrayLikeTD64_co, b: _ArrayLikeNumber_co | _ArrayLikeTD64_co, out: _ArrayT) -> _ArrayT: ... +def outer[ArrayT: np.ndarray](a: _ArrayLikeNumber_co | _ArrayLikeTD64_co, b: _ArrayLikeNumber_co | _ArrayLikeTD64_co, out: ArrayT) -> ArrayT: ... # keep in sync with numpy.linalg._linalg.tensordot (ignoring `/, *`) @overload @@ -1077,15 +1059,15 @@ def cross( # @overload -def roll(a: _ArrayT, shift: _ShapeLike, axis: _ShapeLike | None = None) -> _ArrayT: ... +def roll[ArrayT: np.ndarray](a: ArrayT, shift: _ShapeLike, axis: _ShapeLike | None = None) -> ArrayT: ... @overload -def roll(a: _ArrayLike[_ScalarT], shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def roll[ScalarT: np.generic](a: _ArrayLike[ScalarT], shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def roll(a: ArrayLike, shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... # -def rollaxis(a: _ArrayT, axis: int, start: int = 0) -> _ArrayT: ... -def moveaxis(a: _ArrayT, source: _ShapeLike, destination: _ShapeLike) -> _ArrayT: ... +def rollaxis[ArrayT: np.ndarray](a: ArrayT, axis: int, start: int = 0) -> ArrayT: ... +def moveaxis[ArrayT: np.ndarray](a: ArrayT, source: _ShapeLike, destination: _ShapeLike) -> ArrayT: ... def normalize_axis_tuple( axis: int | Iterable[int], ndim: int, @@ -1099,7 +1081,7 @@ def indices(dimensions: tuple[()], dtype: type[int] = int, sparse: L[False] = Fa @overload # 0d, dtype=, sparse=True def indices(dimensions: tuple[()], dtype: DTypeLike | None = int, *, sparse: L[True]) -> tuple[()]: ... @overload # 0d, dtype=, sparse=False (default) -def indices(dimensions: tuple[()], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array1D[_ScalarT]: ... +def indices[ScalarT: np.generic](dimensions: tuple[()], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array1D[ScalarT]: ... @overload # 0d, dtype=, sparse=False (default) def indices(dimensions: tuple[()], dtype: DTypeLike, sparse: L[False] = False) -> _Array1D[Any]: ... @overload # 1d, dtype=int (default), sparse=False (default) @@ -1107,9 +1089,9 @@ def indices(dimensions: tuple[int], dtype: type[int] = int, sparse: L[False] = F @overload # 1d, dtype=int (default), sparse=True def indices(dimensions: tuple[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[_Array1D[np.intp]]: ... @overload # 1d, dtype=, sparse=False (default) -def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array2D[_ScalarT]: ... +def indices[ScalarT: np.generic](dimensions: tuple[int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array2D[ScalarT]: ... @overload # 1d, dtype=, sparse=True -def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[_Array1D[_ScalarT]]: ... +def indices[ScalarT: np.generic](dimensions: tuple[int], dtype: _DTypeLike[ScalarT], sparse: L[True]) -> tuple[_Array1D[ScalarT]]: ... @overload # 1d, dtype=, sparse=False (default) def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[False] = False) -> _Array2D[Any]: ... @overload # 1d, dtype=, sparse=True @@ -1121,11 +1103,11 @@ def indices( dimensions: tuple[int, int], dtype: type[int] = int, *, sparse: L[True] ) -> tuple[_Array2D[np.intp], _Array2D[np.intp]]: ... @overload # 2d, dtype=, sparse=False (default) -def indices(dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array3D[_ScalarT]: ... +def indices[ScalarT: np.generic](dimensions: tuple[int, int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array3D[ScalarT]: ... @overload # 2d, dtype=, sparse=True -def indices( - dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[True] -) -> tuple[_Array2D[_ScalarT], _Array2D[_ScalarT]]: ... +def indices[ScalarT: np.generic]( + dimensions: tuple[int, int], dtype: _DTypeLike[ScalarT], sparse: L[True] +) -> tuple[_Array2D[ScalarT], _Array2D[ScalarT]]: ... @overload # 2d, dtype=, sparse=False (default) def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[False] = False) -> _Array3D[Any]: ... @overload # 2d, dtype=, sparse=True @@ -1135,23 +1117,23 @@ def indices(dimensions: Sequence[int], dtype: type[int] = int, sparse: L[False] @overload # ?d, dtype=int (default), sparse=True def indices(dimensions: Sequence[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[NDArray[np.intp], ...]: ... @overload # ?d, dtype=, sparse=False (default) -def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> NDArray[_ScalarT]: ... +def indices[ScalarT: np.generic](dimensions: Sequence[int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> NDArray[ScalarT]: ... @overload # ?d, dtype=, sparse=True -def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[NDArray[_ScalarT], ...]: ... +def indices[ScalarT: np.generic](dimensions: Sequence[int], dtype: _DTypeLike[ScalarT], sparse: L[True]) -> tuple[NDArray[ScalarT], ...]: ... @overload # ?d, dtype=, sparse=False (default) def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[False] = False) -> ndarray: ... @overload # ?d, dtype=, sparse=True def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[True]) -> tuple[ndarray, ...]: ... # -def fromfunction( - function: Callable[..., _T], +def fromfunction[ReturnT]( + function: Callable[..., ReturnT], shape: Sequence[int], *, dtype: DTypeLike | None = float, like: _SupportsArrayFunc | None = None, **kwargs: object, -) -> _T: ... +) -> ReturnT: ... # def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ... @@ -1164,7 +1146,7 @@ def base_repr(number: SupportsAbs[float], base: float = 2, padding: SupportsInde @overload # dtype: None (default) def identity(n: int, dtype: None = None, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64]: ... @overload # dtype: known scalar type -def identity(n: int, dtype: _DTypeLike[_ScalarT], *, like: _SupportsArrayFunc | None = None) -> _Array2D[_ScalarT]: ... +def identity[ScalarT: np.generic](n: int, dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = None) -> _Array2D[ScalarT]: ... @overload # dtype: like bool def identity(n: int, dtype: _DTypeLikeBool, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.bool]: ... @overload # dtype: like int_ @@ -1195,21 +1177,21 @@ def isclose( equal_nan: py_bool = False, ) -> np.bool: ... @overload # known shape, same shape or scalar -def isclose( - a: np.ndarray[_ShapeT], - b: np.ndarray[_ShapeT] | _NumberLike_co, +def isclose[ShapeT: _Shape]( + a: np.ndarray[ShapeT], + b: np.ndarray[ShapeT] | _NumberLike_co, rtol: ArrayLike = 1e-5, atol: ArrayLike = 1e-8, equal_nan: py_bool = False, -) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... @overload # same shape or scalar, known shape -def isclose( - a: np.ndarray[_ShapeT] | _NumberLike_co, - b: np.ndarray[_ShapeT], +def isclose[ShapeT: _Shape]( + a: np.ndarray[ShapeT] | _NumberLike_co, + b: np.ndarray[ShapeT], rtol: ArrayLike = 1e-5, atol: ArrayLike = 1e-8, equal_nan: py_bool = False, -) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... @overload # 1d sequence, <=1d array-like def isclose( a: Sequence[_NumberLike_co], @@ -1257,20 +1239,20 @@ def array_equiv(a1: ArrayLike, a2: ArrayLike) -> py_bool: ... # @overload -def astype( - x: ndarray[_ShapeT], - dtype: _DTypeLike[_ScalarT], +def astype[ShapeT: _Shape, ScalarT: np.generic]( + x: ndarray[ShapeT], + dtype: _DTypeLike[ScalarT], /, *, copy: py_bool = True, device: L["cpu"] | None = None, -) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... +) -> ndarray[ShapeT, dtype[ScalarT]]: ... @overload -def astype( - x: ndarray[_ShapeT], +def astype[ShapeT: _Shape]( + x: ndarray[ShapeT], dtype: DTypeLike | None, /, *, copy: py_bool = True, device: L["cpu"] | None = None, -) -> ndarray[_ShapeT]: ... +) -> ndarray[ShapeT]: ... diff --git a/numpy/_core/overrides.pyi b/numpy/_core/overrides.pyi index 6ef52566d782..627165e98d3d 100644 --- a/numpy/_core/overrides.pyi +++ b/numpy/_core/overrides.pyi @@ -1,13 +1,10 @@ from collections.abc import Callable, Iterable -from typing import Any, Final, NamedTuple, ParamSpec, TypeAlias, TypeVar +from typing import Any, Final, NamedTuple from numpy._utils import set_module as set_module -_T = TypeVar("_T") -_Tss = ParamSpec("_Tss") -_FuncLikeT = TypeVar("_FuncLikeT", bound=type | Callable[..., object]) - -_Dispatcher: TypeAlias = Callable[_Tss, Iterable[object]] +type _FuncLike = type | Callable[..., object] +type _Dispatcher[**_Tss] = Callable[_Tss, Iterable[object]] ### @@ -21,27 +18,27 @@ class ArgSpec(NamedTuple): defaults: tuple[Any, ...] def get_array_function_like_doc(public_api: Callable[..., object], docstring_template: str = "") -> str: ... -def finalize_array_function_like(public_api: _FuncLikeT) -> _FuncLikeT: ... +def finalize_array_function_like[FuncLikeT: _FuncLike](public_api: FuncLikeT) -> FuncLikeT: ... # -def verify_matching_signatures(implementation: Callable[_Tss, object], dispatcher: _Dispatcher[_Tss]) -> None: ... +def verify_matching_signatures[**Tss](implementation: Callable[Tss, object], dispatcher: _Dispatcher[Tss]) -> None: ... # NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with # the original wrapped callable stored in the `._implementation` attribute. It checks # for any `__array_function__` of the values of specific arguments that the dispatcher # specifies. Since the dispatcher only returns an iterable of passed array-like args, # this overridable behaviour is impossible to annotate. -def array_function_dispatch( - dispatcher: _Dispatcher[_Tss] | None = None, +def array_function_dispatch[**Tss, FuncLikeT: _FuncLike]( + dispatcher: _Dispatcher[Tss] | None = None, module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = False, -) -> Callable[[_FuncLikeT], _FuncLikeT]: ... +) -> Callable[[FuncLikeT], FuncLikeT]: ... # -def array_function_from_dispatcher( - implementation: Callable[_Tss, _T], +def array_function_from_dispatcher[**Tss, T]( + implementation: Callable[Tss, T], module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = True, -) -> Callable[[_Dispatcher[_Tss]], Callable[_Tss, _T]]: ... +) -> Callable[[_Dispatcher[Tss]], Callable[Tss, T]]: ... diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 2b133630cf35..326a0fe6e476 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,18 +1,17 @@ # ruff: noqa: ANN401 # pyright: reportSelfClsParameterName=false from _typeshed import Incomplete, StrOrBytesPath -from collections.abc import Iterable, Sequence +from collections.abc import Buffer, Iterable, Sequence from typing import ( Any, ClassVar, Literal, Protocol, SupportsIndex, - TypeAlias, overload, type_check_only, ) -from typing_extensions import Buffer, TypeVar +from typing_extensions import TypeVar import numpy as np from numpy import _ByteOrder, _OrderKACF @@ -39,12 +38,11 @@ __all__ = [ "record", ] -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=np.generic) +# Explicit covariant type variables are needed because mypy isn't very good at variance inference right now. _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_RecArray: TypeAlias = recarray[_AnyShape, np.dtype[_ScalarT]] +type _RecArray[_ScalarT: np.generic] = recarray[_AnyShape, np.dtype[_ScalarT]] @type_check_only class _SupportsReadInto(Protocol): @@ -236,8 +234,8 @@ def fromfile( # exported in `numpy.rec` @overload -def array( - obj: _ScalarT | NDArray[_ScalarT], +def array[ScalarT: np.generic]( + obj: ScalarT | NDArray[ScalarT], dtype: None = None, shape: _ShapeLike | None = None, offset: int = 0, @@ -248,7 +246,7 @@ def array( aligned: bool = False, byteorder: None = None, copy: bool = True, -) -> _RecArray[_ScalarT]: ... +) -> _RecArray[ScalarT]: ... @overload def array( obj: ArrayLike, @@ -338,4 +336,4 @@ def array( ) -> _RecArray[record]: ... # exported in `numpy.rec` -def find_duplicate(list: Iterable[_T]) -> list[_T]: ... +def find_duplicate[T](list: Iterable[T]) -> list[T]: ... diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index b87cae8a5f0f..b41602ae8d47 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -1,7 +1,8 @@ from collections.abc import Sequence -from typing import Any, SupportsIndex, TypeVar, overload +from typing import Any, SupportsIndex, overload -from numpy import _CastingKind, generic +import numpy as np +from numpy import _CastingKind from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _DTypeLike __all__ = [ @@ -15,21 +16,17 @@ __all__ = [ "vstack", ] -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT1 = TypeVar("_ScalarT1", bound=generic) -_ScalarT2 = TypeVar("_ScalarT2", bound=generic) -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) - -### - # keep in sync with `numpy.ma.extras.atleast_1d` @overload -def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def atleast_1d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +def atleast_1d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... @overload -def atleast_1d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +def atleast_1d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload def atleast_1d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -39,11 +36,15 @@ def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray # keep in sync with `numpy.ma.extras.atleast_2d` @overload -def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def atleast_2d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +def atleast_2d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... @overload -def atleast_2d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +def atleast_2d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload def atleast_2d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -53,11 +54,15 @@ def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray # keep in sync with `numpy.ma.extras.atleast_3d` @overload -def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def atleast_3d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +def atleast_3d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... @overload -def atleast_3d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +def atleast_3d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload def atleast_3d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -66,23 +71,23 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... # used by numpy.lib._shape_base_impl -def _arrays_for_stack_dispatcher(arrays: Sequence[_T]) -> tuple[_T, ...]: ... +def _arrays_for_stack_dispatcher[T](arrays: Sequence[T]) -> tuple[T, ...]: ... # keep in sync with `numpy.ma.extras.vstack` @overload -def vstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def vstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def vstack( +def vstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], @@ -93,19 +98,19 @@ def vstack( # keep in sync with `numpy.ma.extras.hstack` @overload -def hstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def hstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def hstack( +def hstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], @@ -116,23 +121,23 @@ def hstack( # keep in sync with `numpy.ma.extras.stack` @overload -def stack( - arrays: Sequence[_ArrayLike[_ScalarT]], +def stack[ScalarT: np.generic]( + arrays: Sequence[_ArrayLike[ScalarT]], axis: SupportsIndex = 0, out: None = None, *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def stack( +def stack[ScalarT: np.generic]( arrays: Sequence[ArrayLike], axis: SupportsIndex = 0, out: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], @@ -143,31 +148,31 @@ def stack( casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload -def stack( +def stack[OutT: np.ndarray]( arrays: Sequence[ArrayLike], axis: SupportsIndex, - out: _ArrayT, + out: OutT, *, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... @overload -def stack( +def stack[OutT: np.ndarray]( arrays: Sequence[ArrayLike], axis: SupportsIndex = 0, *, - out: _ArrayT, + out: OutT, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... @overload -def unstack( - array: _ArrayLike[_ScalarT], +def unstack[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], /, *, axis: int = 0, -) -> tuple[NDArray[_ScalarT], ...]: ... +) -> tuple[NDArray[ScalarT], ...]: ... @overload def unstack( array: ArrayLike, @@ -177,6 +182,6 @@ def unstack( ) -> tuple[NDArray[Any], ...]: ... @overload -def block(arrays: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def block[ScalarT: np.generic](arrays: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def block(arrays: ArrayLike) -> NDArray[Any]: ... diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 270760412670..475da159f783 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -1,4 +1,4 @@ -from typing import TypeAlias, overload +from typing import overload import numpy as np from numpy._globals import _NoValueType @@ -62,9 +62,9 @@ __all__ = [ "slice", ] -_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] -_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_AnyShape, np.dtype[np.str_]] | _StringDTypeArray +type _StringDTypeArray = np.ndarray[_AnyShape, np.dtypes.StringDType] +type _StringDTypeSupportsArray = _SupportsArray[np.dtypes.StringDType] +type _StringDTypeOrUnicodeArray = NDArray[np.str_] | _StringDTypeArray @overload def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... diff --git a/numpy/_core/umath.pyi b/numpy/_core/umath.pyi index faf6fb545d5f..498a0af2c008 100644 --- a/numpy/_core/umath.pyi +++ b/numpy/_core/umath.pyi @@ -1,7 +1,7 @@ import contextvars from _typeshed import SupportsWrite from collections.abc import Callable -from typing import Any, Final, Literal, TypeAlias, TypedDict, Unpack, type_check_only +from typing import Any, Final, Literal, TypedDict, Unpack, type_check_only from typing_extensions import CapsuleType from numpy import ( @@ -206,8 +206,8 @@ __all__ = [ ### -_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] -_ErrCall: TypeAlias = Callable[[str, int], Any] | SupportsWrite[str] +type _ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"] +type _ErrCall = Callable[[str, int], Any] | SupportsWrite[str] @type_check_only class _ExtOjbDict(TypedDict, total=False): diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 22eb6e3f30bb..82479a3fcb08 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -1,8 +1,7 @@ """Private counterpart of ``numpy.typing``.""" -import sys - from ._array_like import ( + ArrayLike, NDArray as NDArray, _ArrayLike as _ArrayLike, _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, @@ -85,6 +84,7 @@ # from ._dtype_like import ( + DTypeLike, _DTypeLike as _DTypeLike, _DTypeLikeBool as _DTypeLikeBool, _DTypeLikeBytes as _DTypeLikeBytes, @@ -157,17 +157,3 @@ _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, ) - -# wrapping the public aliases in `TypeAliasType` helps with introspection readability -if sys.version_info >= (3, 12): - from typing import TypeAliasType - - from ._array_like import ArrayLike as _ArrayLikeAlias - from ._dtype_like import DTypeLike as _DTypeLikeAlias - - ArrayLike = TypeAliasType("ArrayLike", _ArrayLikeAlias) - DTypeLike = TypeAliasType("DTypeLike", _DTypeLikeAlias) - -else: - from ._array_like import ArrayLike as ArrayLike - from ._dtype_like import DTypeLike as DTypeLike diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 6b071f4a0319..b8cb2c7872c1 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -1,27 +1,18 @@ -import sys -from collections.abc import Callable, Collection, Sequence -from typing import TYPE_CHECKING, Any, Protocol, TypeAlias, TypeVar, runtime_checkable +from collections.abc import Buffer, Callable, Collection, Sequence +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable import numpy as np -from numpy import dtype - -from ._nbit_base import _32Bit, _64Bit -from ._nested_sequence import _NestedSequence -from ._shape import _AnyShape if TYPE_CHECKING: - StringDType = np.dtypes.StringDType + from numpy.dtypes import StringDType else: - # at runtime outside of type checking importing this from numpy.dtypes - # would lead to a circular import from numpy._core.multiarray import StringDType -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_DTypeT = TypeVar("_DTypeT", bound=dtype[Any]) -_DTypeT_co = TypeVar("_DTypeT_co", covariant=True, bound=dtype[Any]) +from ._nbit_base import _32Bit, _64Bit +from ._nested_sequence import _NestedSequence +from ._shape import _AnyShape -NDArray: TypeAlias = np.ndarray[_AnyShape, dtype[_ScalarT]] +type NDArray[ScalarT: np.generic] = np.ndarray[_AnyShape, np.dtype[ScalarT]] # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned @@ -29,8 +20,8 @@ # Concrete implementations of the protocol are responsible for adding # any and all remaining overloads @runtime_checkable -class _SupportsArray(Protocol[_DTypeT_co]): - def __array__(self) -> np.ndarray[Any, _DTypeT_co]: ... +class _SupportsArray[DTypeT: np.dtype](Protocol): + def __array__(self) -> np.ndarray[Any, DTypeT]: ... @runtime_checkable @@ -46,61 +37,54 @@ def __array_function__( # TODO: Wait until mypy supports recursive objects in combination with typevars -_FiniteNestedSequence: TypeAlias = ( - _T - | Sequence[_T] - | Sequence[Sequence[_T]] - | Sequence[Sequence[Sequence[_T]]] - | Sequence[Sequence[Sequence[Sequence[_T]]]] +type _FiniteNestedSequence[T] = ( + T + | Sequence[T] + | Sequence[Sequence[T]] + | Sequence[Sequence[Sequence[T]]] + | Sequence[Sequence[Sequence[Sequence[T]]]] ) # A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` -_ArrayLike: TypeAlias = ( - _SupportsArray[dtype[_ScalarT]] - | _NestedSequence[_SupportsArray[dtype[_ScalarT]]] +type _ArrayLike[ScalarT: np.generic] = ( + _SupportsArray[np.dtype[ScalarT]] + | _NestedSequence[_SupportsArray[np.dtype[ScalarT]]] ) # A union representing array-like objects; consists of two typevars: # One representing types that can be parametrized w.r.t. `np.dtype` # and another one for the rest -_DualArrayLike: TypeAlias = ( - _SupportsArray[_DTypeT] - | _NestedSequence[_SupportsArray[_DTypeT]] - | _T - | _NestedSequence[_T] +type _DualArrayLike[DTypeT: np.dtype, BuiltinT] = ( + _SupportsArray[DTypeT] + | _NestedSequence[_SupportsArray[DTypeT]] + | BuiltinT + | _NestedSequence[BuiltinT] ) -if sys.version_info >= (3, 12): - from collections.abc import Buffer as _Buffer -else: - @runtime_checkable - class _Buffer(Protocol): - def __buffer__(self, flags: int, /) -> memoryview: ... - -ArrayLike: TypeAlias = _Buffer | _DualArrayLike[dtype[Any], complex | bytes | str] +type ArrayLike = Buffer | _DualArrayLike[np.dtype, complex | bytes | str] # `ArrayLike_co`: array-like objects that can be coerced into `X` # given the casting rules `same_kind` -_ArrayLikeBool_co: TypeAlias = _DualArrayLike[dtype[np.bool], bool] -_ArrayLikeUInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.unsignedinteger], bool] -_ArrayLikeInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer], int] -_ArrayLikeFloat_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.floating], float] -_ArrayLikeComplex_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.number], complex] -_ArrayLikeNumber_co: TypeAlias = _ArrayLikeComplex_co -_ArrayLikeTD64_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.timedelta64], int] -_ArrayLikeDT64_co: TypeAlias = _ArrayLike[np.datetime64] -_ArrayLikeObject_co: TypeAlias = _ArrayLike[np.object_] - -_ArrayLikeVoid_co: TypeAlias = _ArrayLike[np.void] -_ArrayLikeBytes_co: TypeAlias = _DualArrayLike[dtype[np.bytes_], bytes] -_ArrayLikeStr_co: TypeAlias = _DualArrayLike[dtype[np.str_], str] -_ArrayLikeString_co: TypeAlias = _DualArrayLike[StringDType, str] -_ArrayLikeAnyString_co: TypeAlias = _DualArrayLike[dtype[np.character] | StringDType, bytes | str] - -__Float64_co: TypeAlias = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool -__Complex128_co: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool -_ArrayLikeFloat64_co: TypeAlias = _DualArrayLike[dtype[__Float64_co], float] -_ArrayLikeComplex128_co: TypeAlias = _DualArrayLike[dtype[__Complex128_co], complex] +type _ArrayLikeBool_co = _DualArrayLike[np.dtype[np.bool], bool] +type _ArrayLikeUInt_co = _DualArrayLike[np.dtype[np.bool | np.unsignedinteger], bool] +type _ArrayLikeInt_co = _DualArrayLike[np.dtype[np.bool | np.integer], int] +type _ArrayLikeFloat_co = _DualArrayLike[np.dtype[np.bool | np.integer | np.floating], float] +type _ArrayLikeComplex_co = _DualArrayLike[np.dtype[np.bool | np.number], complex] +type _ArrayLikeNumber_co = _ArrayLikeComplex_co +type _ArrayLikeTD64_co = _DualArrayLike[np.dtype[np.bool | np.integer | np.timedelta64], int] +type _ArrayLikeDT64_co = _ArrayLike[np.datetime64] +type _ArrayLikeObject_co = _ArrayLike[np.object_] + +type _ArrayLikeVoid_co = _ArrayLike[np.void] +type _ArrayLikeBytes_co = _DualArrayLike[np.dtype[np.bytes_], bytes] +type _ArrayLikeStr_co = _DualArrayLike[np.dtype[np.str_], str] +type _ArrayLikeString_co = _DualArrayLike[StringDType, str] +type _ArrayLikeAnyString_co = _DualArrayLike[np.dtype[np.character] | StringDType, bytes | str] + +type __Float64_co = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool +type __Complex128_co = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +type _ArrayLikeFloat64_co = _DualArrayLike[np.dtype[__Float64_co], float] +type _ArrayLikeComplex128_co = _DualArrayLike[np.dtype[__Complex128_co], complex] # NOTE: This includes `builtins.bool`, but not `numpy.bool`. -_ArrayLikeInt: TypeAlias = _DualArrayLike[dtype[np.integer], int] +type _ArrayLikeInt = _DualArrayLike[np.dtype[np.integer], int] diff --git a/numpy/_typing/_char_codes.py b/numpy/_typing/_char_codes.py index 7b6fad228d56..6d1e06dc894c 100644 --- a/numpy/_typing/_char_codes.py +++ b/numpy/_typing/_char_codes.py @@ -1,59 +1,59 @@ from typing import Literal -_BoolCodes = Literal[ +type _BoolCodes = Literal[ "bool", "bool_", "?", "|?", "=?", "?", "b1", "|b1", "=b1", "b1", ] # fmt: skip -_UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] -_UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] -_UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] -_UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] - -_Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] -_Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] -_Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] -_Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] - -_Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] -_Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] -_Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] - -_Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] -_Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] - -_ByteCodes = Literal["byte", "b", "|b", "=b", "b"] -_ShortCodes = Literal["short", "h", "|h", "=h", "h"] -_IntCCodes = Literal["intc", "i", "|i", "=i", "i"] -_IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] -_LongCodes = Literal["long", "l", "|l", "=l", "l"] -_IntCodes = _IntPCodes -_LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] - -_UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] -_UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] -_UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] -_UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] -_ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] -_UIntCodes = _UIntPCodes -_ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] - -_HalfCodes = Literal["half", "e", "|e", "=e", "e"] -_SingleCodes = Literal["single", "f", "|f", "=f", "f"] -_DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] -_LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] - -_CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] -_CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] -_CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] - -_StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] -_BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] -_VoidCodes = Literal["void", "V", "|V", "=V", "V"] -_ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] - -_DT64Codes = Literal[ +type _UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] +type _UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] +type _UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] +type _UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] + +type _Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] +type _Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] +type _Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] +type _Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] + +type _Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] +type _Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] +type _Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] + +type _Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] +type _Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] + +type _ByteCodes = Literal["byte", "b", "|b", "=b", "b"] +type _ShortCodes = Literal["short", "h", "|h", "=h", "h"] +type _IntCCodes = Literal["intc", "i", "|i", "=i", "i"] +type _IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] +type _LongCodes = Literal["long", "l", "|l", "=l", "l"] +type _IntCodes = _IntPCodes +type _LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] + +type _UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] +type _UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] +type _UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] +type _UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] +type _ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] +type _UIntCodes = _UIntPCodes +type _ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] + +type _HalfCodes = Literal["half", "e", "|e", "=e", "e"] +type _SingleCodes = Literal["single", "f", "|f", "=f", "f"] +type _DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] +type _LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] + +type _CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] +type _CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] +type _CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] + +type _StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] +type _BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] +type _VoidCodes = Literal["void", "V", "|V", "=V", "V"] +type _ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] + +type _DT64Codes = Literal[ "datetime64", "|datetime64", "=datetime64", "datetime64", "datetime64[Y]", "|datetime64[Y]", "=datetime64[Y]", @@ -98,7 +98,7 @@ "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", ] -_TD64Codes = Literal[ +type _TD64Codes = Literal[ "timedelta64", "|timedelta64", "=timedelta64", "timedelta64", "timedelta64[Y]", "|timedelta64[Y]", "=timedelta64[Y]", @@ -146,7 +146,7 @@ # NOTE: `StringDType' has no scalar type, and therefore has no name that can # be passed to the `dtype` constructor -_StringCodes = Literal["T", "|T", "=T", "T"] +type _StringCodes = Literal["T", "|T", "=T", "T"] # NOTE: Nested literals get flattened and de-duplicated at runtime, which isn't # the case for a `Union` of `Literal`s. @@ -154,7 +154,7 @@ # Another advantage of nesting, is that they always have a "flat" # `Literal.__args__`, which is a tuple of *literally* all its literal values. -_UnsignedIntegerCodes = Literal[ +type _UnsignedIntegerCodes = Literal[ _UInt8Codes, _UInt16Codes, _UInt32Codes, @@ -166,7 +166,7 @@ _ULongCodes, _ULongLongCodes, ] -_SignedIntegerCodes = Literal[ +type _SignedIntegerCodes = Literal[ _Int8Codes, _Int16Codes, _Int32Codes, @@ -178,7 +178,7 @@ _LongCodes, _LongLongCodes, ] -_FloatingCodes = Literal[ +type _FloatingCodes = Literal[ _Float16Codes, _Float32Codes, _Float64Codes, @@ -187,21 +187,21 @@ _DoubleCodes, _LongDoubleCodes ] -_ComplexFloatingCodes = Literal[ +type _ComplexFloatingCodes = Literal[ _Complex64Codes, _Complex128Codes, _CSingleCodes, _CDoubleCodes, _CLongDoubleCodes, ] -_IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] -_InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] -_NumberCodes = Literal[_IntegerCodes, _InexactCodes] +type _IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] +type _InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] +type _NumberCodes = Literal[_IntegerCodes, _InexactCodes] -_CharacterCodes = Literal[_StrCodes, _BytesCodes] -_FlexibleCodes = Literal[_VoidCodes, _CharacterCodes] +type _CharacterCodes = Literal[_StrCodes, _BytesCodes] +type _FlexibleCodes = Literal[_VoidCodes, _CharacterCodes] -_GenericCodes = Literal[ +type _GenericCodes = Literal[ _BoolCodes, _NumberCodes, _FlexibleCodes, diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index 34c4bd44f519..09ed1a0084de 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -1,5 +1,5 @@ -from collections.abc import Sequence # noqa: F811 -from typing import Any, Protocol, TypeAlias, TypedDict, TypeVar +from collections.abc import Sequence +from typing import Any, NotRequired, Protocol, TypedDict, runtime_checkable import numpy as np @@ -18,48 +18,43 @@ _VoidCodes, ) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, covariant=True) +type _DTypeLikeNested = Any # TODO: wait for support for recursive types -_DTypeLikeNested: TypeAlias = Any # TODO: wait for support for recursive types - -# Mandatory keys -class _DTypeDictBase(TypedDict): +class _DTypeDict(TypedDict): names: Sequence[str] formats: Sequence[_DTypeLikeNested] - - -# Mandatory + optional keys -class _DTypeDict(_DTypeDictBase, total=False): # Only `str` elements are usable as indexing aliases, # but `titles` can in principle accept any object - offsets: Sequence[int] - titles: Sequence[Any] - itemsize: int - aligned: bool + offsets: NotRequired[Sequence[int]] + titles: NotRequired[Sequence[Any]] + itemsize: NotRequired[int] + aligned: NotRequired[bool] -class _HasDType(Protocol[_DTypeT_co]): +# A protocol for anything with the dtype attribute +@runtime_checkable +class _HasDType[DTypeT: np.dtype](Protocol): @property - def dtype(self) -> _DTypeT_co: ... + def dtype(self) -> DTypeT: ... -class _HasNumPyDType(Protocol[_DTypeT_co]): +class _HasNumPyDType[DTypeT: np.dtype](Protocol): @property - def __numpy_dtype__(self, /) -> _DTypeT_co: ... + def __numpy_dtype__(self, /) -> DTypeT: ... -_SupportsDType: TypeAlias = _HasDType[_DTypeT] | _HasNumPyDType[_DTypeT] +type _SupportsDType[DTypeT: np.dtype] = _HasDType[DTypeT] | _HasNumPyDType[DTypeT] # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] +type _DTypeLike[ScalarT: np.generic] = ( + type[ScalarT] | np.dtype[ScalarT] | _SupportsDType[np.dtype[ScalarT]] +) # Would create a dtype[np.void] -_VoidDTypeLike: TypeAlias = ( +type _VoidDTypeLike = ( # If a tuple, then it can be either: # - (flexible_dtype, itemsize) # - (fixed_dtype, shape) @@ -80,31 +75,29 @@ def __numpy_dtype__(self, /) -> _DTypeT_co: ... # Aliases for commonly used dtype-like objects. # Note that the precision of `np.number` subclasses is ignored herein. -_DTypeLikeBool: TypeAlias = type[bool] | _DTypeLike[np.bool] | _BoolCodes -_DTypeLikeInt: TypeAlias = ( - type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes -) -_DTypeLikeUInt: TypeAlias = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes -_DTypeLikeFloat: TypeAlias = type[float] | _DTypeLike[np.floating] | _FloatingCodes -_DTypeLikeComplex: TypeAlias = ( +type _DTypeLikeBool = type[bool] | _DTypeLike[np.bool] | _BoolCodes +type _DTypeLikeInt = type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes +type _DTypeLikeUInt = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes +type _DTypeLikeFloat = type[float] | _DTypeLike[np.floating] | _FloatingCodes +type _DTypeLikeComplex = ( type[complex] | _DTypeLike[np.complexfloating] | _ComplexFloatingCodes ) -_DTypeLikeComplex_co: TypeAlias = ( +type _DTypeLikeComplex_co = ( type[complex] | _DTypeLike[np.bool | np.number] | _BoolCodes | _NumberCodes ) -_DTypeLikeDT64: TypeAlias = _DTypeLike[np.timedelta64] | _TD64Codes -_DTypeLikeTD64: TypeAlias = _DTypeLike[np.datetime64] | _DT64Codes -_DTypeLikeBytes: TypeAlias = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes -_DTypeLikeStr: TypeAlias = type[str] | _DTypeLike[np.str_] | _StrCodes -_DTypeLikeVoid: TypeAlias = ( +type _DTypeLikeDT64 = _DTypeLike[np.timedelta64] | _TD64Codes +type _DTypeLikeTD64 = _DTypeLike[np.datetime64] | _DT64Codes +type _DTypeLikeBytes = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes +type _DTypeLikeStr = type[str] | _DTypeLike[np.str_] | _StrCodes +type _DTypeLikeVoid = ( type[memoryview] | _DTypeLike[np.void] | _VoidDTypeLike | _VoidCodes ) -_DTypeLikeObject: TypeAlias = type[object] | _DTypeLike[np.object_] | _ObjectCodes +type _DTypeLikeObject = type[object] | _DTypeLike[np.object_] | _ObjectCodes # Anything that can be coerced into numpy.dtype. # Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html -DTypeLike: TypeAlias = _DTypeLike[Any] | _VoidDTypeLike | str +type DTypeLike = type | str | np.dtype | _SupportsDType[np.dtype] | _VoidDTypeLike # NOTE: while it is possible to provide the dtype as a dict of # dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), diff --git a/numpy/_typing/_nbit.py b/numpy/_typing/_nbit.py index 60bce3245c7a..1ad5f017eeb9 100644 --- a/numpy/_typing/_nbit.py +++ b/numpy/_typing/_nbit.py @@ -1,19 +1,17 @@ """A module with the precisions of platform-specific `~numpy.number`s.""" -from typing import TypeAlias - from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit # To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin -_NBitByte: TypeAlias = _8Bit -_NBitShort: TypeAlias = _16Bit -_NBitIntC: TypeAlias = _32Bit -_NBitIntP: TypeAlias = _32Bit | _64Bit -_NBitInt: TypeAlias = _NBitIntP -_NBitLong: TypeAlias = _32Bit | _64Bit -_NBitLongLong: TypeAlias = _64Bit +type _NBitByte = _8Bit +type _NBitShort = _16Bit +type _NBitIntC = _32Bit +type _NBitIntP = _32Bit | _64Bit +type _NBitInt = _NBitIntP +type _NBitLong = _32Bit | _64Bit +type _NBitLongLong = _64Bit -_NBitHalf: TypeAlias = _16Bit -_NBitSingle: TypeAlias = _32Bit -_NBitDouble: TypeAlias = _64Bit -_NBitLongDouble: TypeAlias = _64Bit | _96Bit | _128Bit +type _NBitHalf = _16Bit +type _NBitSingle = _32Bit +type _NBitDouble = _64Bit +type _NBitLongDouble = _64Bit | _96Bit | _128Bit diff --git a/numpy/_typing/_nbit_base.py b/numpy/_typing/_nbit_base.py index 28d3e63c1769..28a60ecbe00f 100644 --- a/numpy/_typing/_nbit_base.py +++ b/numpy/_typing/_nbit_base.py @@ -30,14 +30,13 @@ class NBitBase: .. code-block:: python - >>> from typing import TypeVar, TYPE_CHECKING + >>> from typing import TYPE_CHECKING >>> import numpy as np >>> import numpy.typing as npt - >>> S = TypeVar("S", bound=npt.NBitBase) - >>> T = TypeVar("T", bound=npt.NBitBase) - - >>> def add(a: np.floating[S], b: np.integer[T]) -> np.floating[S | T]: + >>> def add[S: npt.NBitBase, T: npt.NBitBase]( + ... a: np.floating[S], b: np.integer[T] + ... ) -> np.floating[S | T]: ... return a + b >>> a = np.float16() diff --git a/numpy/_typing/_nbit_base.pyi b/numpy/_typing/_nbit_base.pyi index 81810a2caa3b..bd317c896094 100644 --- a/numpy/_typing/_nbit_base.pyi +++ b/numpy/_typing/_nbit_base.pyi @@ -8,7 +8,7 @@ from typing_extensions import deprecated # Deprecated in NumPy 2.3, 2025-05-01 @deprecated( "`NBitBase` is deprecated and will be removed from numpy.typing in the " - "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "future. Use `@typing.overload` or a type parameter with a scalar-type as upper " "bound, instead. (deprecated in NumPy 2.3)", ) @final diff --git a/numpy/_typing/_nested_sequence.py b/numpy/_typing/_nested_sequence.py index e3362a9f21fe..13711be397e9 100644 --- a/numpy/_typing/_nested_sequence.py +++ b/numpy/_typing/_nested_sequence.py @@ -1,17 +1,15 @@ """A module containing the `_NestedSequence` protocol.""" -from typing import TYPE_CHECKING, Any, Protocol, TypeVar, runtime_checkable +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable if TYPE_CHECKING: from collections.abc import Iterator __all__ = ["_NestedSequence"] -_T_co = TypeVar("_T_co", covariant=True) - @runtime_checkable -class _NestedSequence(Protocol[_T_co]): +class _NestedSequence[T](Protocol): """A protocol for representing nested sequences. Warning @@ -54,7 +52,7 @@ def __len__(self, /) -> int: """Implement ``len(self)``.""" raise NotImplementedError - def __getitem__(self, index: int, /) -> "_T_co | _NestedSequence[_T_co]": + def __getitem__(self, index: int, /) -> "T | _NestedSequence[T]": """Implement ``self[x]``.""" raise NotImplementedError @@ -62,11 +60,11 @@ def __contains__(self, x: object, /) -> bool: """Implement ``x in self``.""" raise NotImplementedError - def __iter__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + def __iter__(self, /) -> "Iterator[T | _NestedSequence[T]]": """Implement ``iter(self)``.""" raise NotImplementedError - def __reversed__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + def __reversed__(self, /) -> "Iterator[T | _NestedSequence[T]]": """Implement ``reversed(self)``.""" raise NotImplementedError diff --git a/numpy/_typing/_scalars.py b/numpy/_typing/_scalars.py index b0de66d89aa1..2d36c4961c42 100644 --- a/numpy/_typing/_scalars.py +++ b/numpy/_typing/_scalars.py @@ -1,20 +1,20 @@ -from typing import Any, TypeAlias +from typing import Any import numpy as np # NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and # `np.bytes_` are already subclasses of their builtin counterpart -_CharLike_co: TypeAlias = str | bytes +type _CharLike_co = str | bytes # The `Like_co` type-aliases below represent all scalars that can be # coerced into `` (with the casting rule `same_kind`) -_BoolLike_co: TypeAlias = bool | np.bool -_UIntLike_co: TypeAlias = bool | np.unsignedinteger | np.bool -_IntLike_co: TypeAlias = int | np.integer | np.bool -_FloatLike_co: TypeAlias = float | np.floating | np.integer | np.bool -_ComplexLike_co: TypeAlias = complex | np.number | np.bool -_NumberLike_co: TypeAlias = _ComplexLike_co -_TD64Like_co: TypeAlias = int | np.timedelta64 | np.integer | np.bool +type _BoolLike_co = bool | np.bool +type _UIntLike_co = bool | np.unsignedinteger | np.bool +type _IntLike_co = int | np.integer | np.bool +type _FloatLike_co = float | np.floating | np.integer | np.bool +type _ComplexLike_co = complex | np.number | np.bool +type _NumberLike_co = _ComplexLike_co +type _TD64Like_co = int | np.timedelta64 | np.integer | np.bool # `_VoidLike_co` is technically not a scalar, but it's close enough -_VoidLike_co: TypeAlias = tuple[Any, ...] | np.void -_ScalarLike_co: TypeAlias = complex | str | bytes | np.generic +type _VoidLike_co = tuple[Any, ...] | np.void +type _ScalarLike_co = complex | str | bytes | np.generic diff --git a/numpy/_typing/_shape.py b/numpy/_typing/_shape.py index e297aef2f554..132943b283c8 100644 --- a/numpy/_typing/_shape.py +++ b/numpy/_typing/_shape.py @@ -1,8 +1,8 @@ from collections.abc import Sequence -from typing import Any, SupportsIndex, TypeAlias +from typing import Any, SupportsIndex -_Shape: TypeAlias = tuple[int, ...] -_AnyShape: TypeAlias = tuple[Any, ...] +type _Shape = tuple[int, ...] +type _AnyShape = tuple[Any, ...] # Anything that can be coerced to a shape tuple -_ShapeLike: TypeAlias = SupportsIndex | Sequence[SupportsIndex] +type _ShapeLike = SupportsIndex | Sequence[SupportsIndex] diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 9d3fa0e5335c..b9dc5fd5b975 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -10,16 +10,13 @@ from _typeshed import Incomplete from types import EllipsisType from typing import ( Any, - Generic, Literal, LiteralString, Never, NoReturn, Protocol, SupportsIndex, - TypeAlias, TypedDict, - TypeVar, Unpack, overload, type_check_only, @@ -27,31 +24,19 @@ from typing import ( import numpy as np from numpy import _CastingKind, _OrderKACF, ufunc -from numpy.typing import NDArray -from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co +from ._array_like import ArrayLike, NDArray, _ArrayLikeBool_co, _ArrayLikeInt_co from ._dtype_like import DTypeLike from ._scalars import _ScalarLike_co from ._shape import _ShapeLike -_T = TypeVar("_T") -_2Tuple: TypeAlias = tuple[_T, _T] -_3Tuple: TypeAlias = tuple[_T, _T, _T] -_4Tuple: TypeAlias = tuple[_T, _T, _T, _T] +type _2Tuple[T] = tuple[T, T] +type _3Tuple[T] = tuple[T, T, T] +type _4Tuple[T] = tuple[T, T, T, T] -_2PTuple: TypeAlias = tuple[_T, _T, *tuple[_T, ...]] -_3PTuple: TypeAlias = tuple[_T, _T, _T, *tuple[_T, ...]] -_4PTuple: TypeAlias = tuple[_T, _T, _T, _T, *tuple[_T, ...]] - -_NTypes = TypeVar("_NTypes", bound=int, covariant=True) -_IDType = TypeVar("_IDType", covariant=True) -_NameType = TypeVar("_NameType", bound=LiteralString, covariant=True) -_Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) - -_NIn = TypeVar("_NIn", bound=int, covariant=True) -_NOut = TypeVar("_NOut", bound=int, covariant=True) -_ReturnType_co = TypeVar("_ReturnType_co", covariant=True) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +type _2PTuple[T] = tuple[T, T, *tuple[T, ...]] +type _3PTuple[T] = tuple[T, T, T, *tuple[T, ...]] +type _4PTuple[T] = tuple[T, T, T, T, *tuple[T, ...]] @type_check_only class _SupportsArrayUFunc(Protocol): @@ -89,15 +74,15 @@ class _ReduceKwargs(TypedDict, total=False): # pyright: reportIncompatibleMethodOverride=false @type_check_only -class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin1_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -158,15 +143,15 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only -class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -353,15 +338,15 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i ) -> None: ... @type_check_only -class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin1_Nout2[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -427,15 +412,15 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def at(self, a: Never, indices: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only -class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin2_Nout2[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -487,15 +472,15 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only -class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] +class _GUFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT, SignatureT: LiteralString](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -503,7 +488,7 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @property def nargs(self) -> Literal[3]: ... @property - def signature(self) -> _Signature: ... + def signature(self) -> SignatureT: ... # Scalar for 1D array-likes; ndarray otherwise @overload @@ -580,9 +565,9 @@ class _PyFunc_Kwargs_Nargs4P(TypedDict, total=False): signature: str | _4PTuple[DTypeLike] @type_check_only -class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] +class _PyFunc_Nin1_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -601,7 +586,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def __call__( self, @@ -609,15 +594,15 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, /, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def __call__( self, @@ -635,9 +620,9 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only -class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] +class _PyFunc_Nin2_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -657,7 +642,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def __call__( self, @@ -666,16 +651,16 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, x2: ArrayLike, /, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def __call__( self, @@ -705,28 +690,28 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno out: EllipsisType | None = None, ) -> NDArray[np.object_]: ... @overload - def accumulate( + def accumulate[OutT: np.ndarray]( self, array: ArrayLike, /, axis: SupportsIndex = 0, dtype: DTypeLike | None = None, *, - out: _ArrayT, - ) -> _ArrayT: ... + out: OutT, + ) -> OutT: ... @overload # type: ignore[override] - def reduce( # out=array + def reduce[OutT: np.ndarray]( # out=array self, array: ArrayLike, /, axis: _ShapeLike | None = 0, dtype: DTypeLike | None = None, *, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], keepdims: bool = False, **kwargs: Unpack[_ReduceKwargs], - ) -> _ArrayT: ... + ) -> OutT: ... @overload # out=... def reduce( self, @@ -761,10 +746,10 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno out: EllipsisType | None = None, keepdims: bool = False, **kwargs: Unpack[_ReduceKwargs], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload # type: ignore[override] - def reduceat( + def reduceat[OutT: np.ndarray]( self, array: ArrayLike, /, @@ -772,8 +757,8 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno axis: SupportsIndex = 0, dtype: DTypeLike | None = None, *, - out: _ArrayT | tuple[_ArrayT], - ) -> _ArrayT: ... + out: OutT | tuple[OutT], + ) -> OutT: ... @overload def reduceat( self, @@ -804,7 +789,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno *, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def outer( self, @@ -814,17 +799,17 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno *, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def outer( + def outer[OutT: np.ndarray]( self, A: ArrayLike, B: ArrayLike, /, *, - out: _ArrayT, + out: OutT, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def outer( self, @@ -855,11 +840,11 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno ) -> None: ... @type_check_only -class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # type: ignore[misc] +class _PyFunc_Nin3P_Nout1[ReturnT, IdentT, NInT: int](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property - def nin(self) -> _NIn: ... + def nin(self) -> NInT: ... @property def nout(self) -> Literal[1]: ... @property @@ -877,7 +862,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ *xs: _ScalarLike_co, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def __call__( self, @@ -888,18 +873,18 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ *xs: ArrayLike, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, x2: ArrayLike, x3: ArrayLike, /, *xs: ArrayLike, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def __call__( self, @@ -919,13 +904,13 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] @type_check_only -class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]): # type: ignore[misc] +class _PyFunc_Nin1P_Nout2P[ReturnT, IdentT, NInT: int, NOutT: int](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property - def nin(self) -> _NIn: ... + def nin(self) -> NInT: ... @property - def nout(self) -> _NOut: ... + def nout(self) -> NOutT: ... @property def ntypes(self) -> Literal[1]: ... @property @@ -939,7 +924,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) *xs: _ScalarLike_co, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> _2PTuple[_ReturnType_co]: ... + ) -> _2PTuple[ReturnT]: ... @overload def __call__( self, @@ -948,16 +933,16 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) *xs: ArrayLike, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... + ) -> _2PTuple[ReturnT | NDArray[np.object_]]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, /, *xs: ArrayLike, - out: _2PTuple[_ArrayT], + out: _2PTuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> _2PTuple[_ArrayT]: ... + ) -> _2PTuple[OutT]: ... @overload def __call__( self, diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index b630777ced99..7a78cabe60f3 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -1,14 +1,11 @@ from _typeshed import IdentityFunction from collections.abc import Callable, Iterable -from typing import Protocol, TypeVar, overload, type_check_only +from typing import Protocol, overload, type_check_only from ._convertions import asbytes as asbytes, asunicode as asunicode ### -_T = TypeVar("_T") -_HasModuleT = TypeVar("_HasModuleT", bound=_HasModule) - @type_check_only class _HasModule(Protocol): __module__: str @@ -18,11 +15,11 @@ class _HasModule(Protocol): @overload def set_module(module: None) -> IdentityFunction: ... @overload -def set_module(module: str) -> Callable[[_HasModuleT], _HasModuleT]: ... +def set_module[ModuleT: _HasModule](module: str) -> Callable[[ModuleT], ModuleT]: ... # -def _rename_parameter( +def _rename_parameter[T]( old_names: Iterable[str], new_names: Iterable[str], dep_version: str | None = None, -) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... +) -> Callable[[Callable[..., T]], Callable[..., T]]: ... diff --git a/numpy/_utils/_inspect.pyi b/numpy/_utils/_inspect.pyi index 40546d2f4497..dd738025b728 100644 --- a/numpy/_utils/_inspect.pyi +++ b/numpy/_utils/_inspect.pyi @@ -1,21 +1,18 @@ import types from _typeshed import SupportsLenAndGetItem from collections.abc import Callable, Mapping -from typing import Any, Final, TypeAlias, TypeVar, overload +from typing import Any, Final, overload from typing_extensions import TypeIs __all__ = ["formatargspec", "getargspec"] ### -_T = TypeVar("_T") -_RT = TypeVar("_RT") +type _StrSeq = SupportsLenAndGetItem[str] +type _NestedSeq[T] = list[T | _NestedSeq[T]] | tuple[T | _NestedSeq[T], ...] -_StrSeq: TypeAlias = SupportsLenAndGetItem[str] -_NestedSeq: TypeAlias = list[_T | _NestedSeq[_T]] | tuple[_T | _NestedSeq[_T], ...] - -_JoinFunc: TypeAlias = Callable[[list[_T]], _T] -_FormatFunc: TypeAlias = Callable[[_T], str] +type _JoinFunc[T] = Callable[[list[T]], T] +type _FormatFunc[T] = Callable[[T], str] ### @@ -43,7 +40,7 @@ def joinseq(seq: _StrSeq) -> str: ... @overload def strseq(object: _NestedSeq[str], convert: Callable[[Any], Any], join: _JoinFunc[str] = ...) -> str: ... @overload -def strseq(object: _NestedSeq[_T], convert: Callable[[_T], _RT], join: _JoinFunc[_RT]) -> _RT: ... +def strseq[VT, RT](object: _NestedSeq[VT], convert: Callable[[VT], RT], join: _JoinFunc[RT]) -> RT: ... # def formatargspec( diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi index 11ae02e57a59..593960274814 100644 --- a/numpy/_utils/_pep440.pyi +++ b/numpy/_utils/_pep440.pyi @@ -7,17 +7,15 @@ from typing import ( Generic, Literal as L, NamedTuple, - TypeVar, final, type_check_only, ) -from typing_extensions import TypeIs +from typing_extensions import TypeIs, TypeVar __all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] ### -_CmpKeyT = TypeVar("_CmpKeyT", bound=tuple[object, ...]) _CmpKeyT_co = TypeVar("_CmpKeyT_co", bound=tuple[object, ...], default=tuple[Any, ...], covariant=True) ### @@ -71,7 +69,12 @@ class _BaseVersion(Generic[_CmpKeyT_co]): def __le__(self, other: _BaseVersion, /) -> bool: ... def __ge__(self, other: _BaseVersion, /) -> bool: ... def __gt__(self, other: _BaseVersion, /) -> bool: ... - def _compare(self, /, other: _BaseVersion[_CmpKeyT], method: Callable[[_CmpKeyT_co, _CmpKeyT], bool]) -> bool: ... + def _compare[CmpKeyT: tuple[object, ...]]( + self, + /, + other: _BaseVersion[CmpKeyT], + method: Callable[[_CmpKeyT_co, CmpKeyT], bool], + ) -> bool: ... class LegacyVersion(_BaseVersion[tuple[L[-1], tuple[str, ...]]]): _version: Final[str] diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index 8881141f8ed5..1d4780aeb422 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -4,19 +4,16 @@ import ctypes from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence from ctypes import c_int64 as _c_intp -from typing import Any, ClassVar, Generic, Literal as L, TypeAlias, TypeVar, overload +from typing import Any, ClassVar, Literal as L, overload import numpy as np from numpy import ( byte, double, - dtype, - generic, intc, long, longdouble, longlong, - ndarray, short, single, ubyte, @@ -54,12 +51,7 @@ from numpy._typing import ( __all__ = ["load_library", "ndpointer", "c_intp", "as_ctypes", "as_array", "as_ctypes_type"] -# TODO: Add a proper `_Shape` bound once we've got variadic typevars -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeOptionalT = TypeVar("_DTypeOptionalT", bound=dtype | None) -_ScalarT = TypeVar("_ScalarT", bound=generic) - -_FlagsKind: TypeAlias = L[ +type _FlagsKind = L[ "C_CONTIGUOUS", "CONTIGUOUS", "C", "F_CONTIGUOUS", "FORTRAN", "F", "ALIGNED", "A", @@ -68,27 +60,28 @@ _FlagsKind: TypeAlias = L[ "WRITEBACKIFCOPY", "X", ] -# TODO: Add a shape typevar once we have variadic typevars (PEP 646) -class _ndptr(ctypes.c_void_p, Generic[_DTypeOptionalT]): +# TODO: Add a shape type parameter +class _ndptr[OptionalDTypeT: np.dtype | None](ctypes.c_void_p): # In practice these 4 classvars are defined in the dynamic class # returned by `ndpointer` - _dtype_: ClassVar[_DTypeOptionalT] - _shape_: ClassVar[_AnyShape | None] - _ndim_: ClassVar[int | None] - _flags_: ClassVar[list[_FlagsKind] | None] + _dtype_: OptionalDTypeT = ... + _shape_: ClassVar[_AnyShape | None] = ... + _ndim_: ClassVar[int | None] = ... + _flags_: ClassVar[list[_FlagsKind] | None] = ... @overload # type: ignore[override] @classmethod - def from_param(cls: type[_ndptr[None]], obj: NDArray[Any]) -> _ctypes[Any]: ... + def from_param(cls: type[_ndptr[None]], obj: np.ndarray) -> _ctypes[Any]: ... @overload @classmethod - def from_param(cls: type[_ndptr[_DTypeT]], obj: ndarray[Any, _DTypeT]) -> _ctypes[Any]: ... + def from_param[DTypeT: np.dtype](cls: type[_ndptr[DTypeT]], obj: np.ndarray[Any, DTypeT]) -> _ctypes[Any]: ... # pyright: ignore[reportIncompatibleMethodOverride] + +class _concrete_ndptr[DTypeT: np.dtype](_ndptr[DTypeT]): + _dtype_: DTypeT = ... + _shape_: ClassVar[_AnyShape] = ... # pyright: ignore[reportIncompatibleVariableOverride] -class _concrete_ndptr(_ndptr[_DTypeT]): - _dtype_: ClassVar[_DTypeT] - _shape_: ClassVar[_AnyShape] @property - def contents(self) -> ndarray[_AnyShape, _DTypeT]: ... + def contents(self) -> np.ndarray[_AnyShape, DTypeT]: ... def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ctypes.CDLL: ... @@ -102,13 +95,13 @@ def ndpointer( flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_ndptr[None]]: ... @overload -def ndpointer( - dtype: _DTypeLike[_ScalarT], +def ndpointer[ScalarT: np.generic]( + dtype: _DTypeLike[ScalarT], ndim: int | None = None, *, shape: _ShapeLike, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, -) -> type[_concrete_ndptr[dtype[_ScalarT]]]: ... +) -> type[_concrete_ndptr[np.dtype[ScalarT]]]: ... @overload def ndpointer( dtype: DTypeLike | None, @@ -116,21 +109,21 @@ def ndpointer( *, shape: _ShapeLike, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, -) -> type[_concrete_ndptr[dtype]]: ... +) -> type[_concrete_ndptr[np.dtype]]: ... @overload -def ndpointer( - dtype: _DTypeLike[_ScalarT], +def ndpointer[ScalarT: np.generic]( + dtype: _DTypeLike[ScalarT], ndim: int | None = None, shape: None = None, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, -) -> type[_ndptr[dtype[_ScalarT]]]: ... +) -> type[_ndptr[np.dtype[ScalarT]]]: ... @overload def ndpointer( dtype: DTypeLike | None, ndim: int | None = None, shape: None = None, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, -) -> type[_ndptr[dtype]]: ... +) -> type[_ndptr[np.dtype]]: ... @overload def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ... @@ -170,7 +163,7 @@ def as_ctypes_type(dtype: str) -> type[Any]: ... @overload def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... @overload -def as_array(obj: _ArrayLike[_ScalarT], shape: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def as_array[ScalarT: np.generic](obj: _ArrayLike[ScalarT], shape: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def as_array(obj: object, shape: _ShapeLike | None = None) -> NDArray[Any]: ... diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 3e34113edd4f..920e23c85f1f 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -7,7 +7,6 @@ from typing import ( Never, NoReturn, Self, - TypeAlias, final, overload, type_check_only, @@ -52,17 +51,20 @@ __all__ = [ # noqa: RUF022 "StringDType", ] -# Helper base classes (typing-only) +# Type parameters + +_ItemSizeT_co = TypeVar("_ItemSizeT_co", bound=int, default=int, covariant=True) +_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +# Helper base classes (typing-only) @type_check_only -class _SimpleDType(np.dtype[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] +class _SimpleDType[ScalarT: np.generic](np.dtype[ScalarT]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] names: None # pyright: ignore[reportIncompatibleVariableOverride] def __new__(cls, /) -> Self: ... def __getitem__(self, key: Any, /) -> NoReturn: ... @property - def base(self) -> np.dtype[_ScalarT_co]: ... + def base(self) -> np.dtype[ScalarT]: ... @property def fields(self) -> None: ... @property @@ -77,7 +79,7 @@ class _SimpleDType(np.dtype[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore def subdtype(self) -> None: ... @type_check_only -class _LiteralDType(_SimpleDType[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore[misc] +class _LiteralDType[ScalarT_co: np.generic](_SimpleDType[ScalarT_co]): # type: ignore[misc] @property def flags(self) -> L[0]: ... @property @@ -85,21 +87,17 @@ class _LiteralDType(_SimpleDType[_ScalarT_co], Generic[_ScalarT_co]): # type: i # Helper mixins (typing-only): -_KindT_co = TypeVar("_KindT_co", bound=LiteralString, covariant=True) -_CharT_co = TypeVar("_CharT_co", bound=LiteralString, covariant=True) -_NumT_co = TypeVar("_NumT_co", bound=int, covariant=True) - @type_check_only -class _TypeCodes(Generic[_KindT_co, _CharT_co, _NumT_co]): +class _TypeCodes[KindT: LiteralString, CharT: LiteralString, NumT: int]: @final @property - def kind(self) -> _KindT_co: ... + def kind(self) -> KindT: ... @final @property - def char(self) -> _CharT_co: ... + def char(self) -> CharT: ... @final @property - def num(self) -> _NumT_co: ... + def num(self) -> NumT: ... @type_check_only class _NoOrder: @@ -113,17 +111,14 @@ class _NativeOrder: @property def byteorder(self) -> L["="]: ... -_DataSize_co = TypeVar("_DataSize_co", bound=int, covariant=True) -_ItemSize_co = TypeVar("_ItemSize_co", bound=int, covariant=True, default=int) - @type_check_only -class _NBit(Generic[_DataSize_co, _ItemSize_co]): +class _NBit[AlignmentT: int, ItemSizeT: int]: @final @property - def alignment(self) -> _DataSize_co: ... + def alignment(self) -> AlignmentT: ... @final @property - def itemsize(self) -> _ItemSize_co: ... + def itemsize(self) -> ItemSizeT: ... @type_check_only class _8Bit(_NoOrder, _NBit[L[1], L[1]]): ... @@ -238,7 +233,7 @@ class UInt64DType( # type: ignore[misc] def str(self) -> L["u8"]: ... # Standard C-named version/alias: -# NOTE: Don't make these `Final`: it will break stubtest +# NOTE: Don't make these `Final[_]` or a `type _` it will break stubtest ByteDType = Int8DType UByteDType = UInt8DType ShortDType = Int16DType @@ -426,11 +421,11 @@ class ObjectDType( # type: ignore[misc] class BytesDType( # type: ignore[misc] _TypeCodes[L["S"], L["S"], L[18]], _NoOrder, - _NBit[L[1], _ItemSize_co], + _NBit[L[1], _ItemSizeT_co], _SimpleDType[np.bytes_], - Generic[_ItemSize_co], + Generic[_ItemSizeT_co], ): - def __new__(cls, size: _ItemSize_co, /) -> BytesDType[_ItemSize_co]: ... + def __new__[ItemSizeT: int](cls, size: ItemSizeT, /) -> BytesDType[ItemSizeT]: ... @property def hasobject(self) -> L[False]: ... @property @@ -442,11 +437,11 @@ class BytesDType( # type: ignore[misc] class StrDType( # type: ignore[misc] _TypeCodes[L["U"], L["U"], L[19]], _NativeOrder, - _NBit[L[4], _ItemSize_co], + _NBit[L[4], _ItemSizeT_co], _SimpleDType[np.str_], - Generic[_ItemSize_co], + Generic[_ItemSizeT_co], ): - def __new__(cls, size: _ItemSize_co, /) -> StrDType[_ItemSize_co]: ... + def __new__[ItemSizeT: int](cls, size: ItemSizeT, /) -> StrDType[ItemSizeT]: ... @property def hasobject(self) -> L[False]: ... @property @@ -458,12 +453,12 @@ class StrDType( # type: ignore[misc] class VoidDType( # type: ignore[misc] _TypeCodes[L["V"], L["V"], L[20]], _NoOrder, - _NBit[L[1], _ItemSize_co], + _NBit[L[1], _ItemSizeT_co], np.dtype[np.void], # pyright: ignore[reportGeneralTypeIssues] - Generic[_ItemSize_co], + Generic[_ItemSizeT_co], ): # NOTE: `VoidDType(...)` raises a `TypeError` at the moment - def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ... + def __new__(cls, length: _ItemSizeT_co, /) -> NoReturn: ... @property def base(self) -> Self: ... @property @@ -483,9 +478,9 @@ class VoidDType( # type: ignore[misc] # Other: -_DateUnit: TypeAlias = L["Y", "M", "W", "D"] -_TimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"] -_DateTimeUnit: TypeAlias = _DateUnit | _TimeUnit +type _DateUnit = L["Y", "M", "W", "D"] +type _TimeUnit = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"] +type _DateTimeUnit = _DateUnit | _TimeUnit @final class DateTime64DType( # type: ignore[misc] @@ -577,8 +572,6 @@ class TimeDelta64DType( # type: ignore[misc] "m8[as]", ]: ... -_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) - @final class StringDType( # type: ignore[misc] _TypeCodes[L["T"], L["T"], L[2056]], diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi index 5c85c61586fc..55ff9f7ae78d 100644 --- a/numpy/f2py/_backends/_meson.pyi +++ b/numpy/f2py/_backends/_meson.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable from pathlib import Path -from typing import Final, Literal as L -from typing_extensions import override +from typing import Final, Literal as L, override from ._backend import Backend diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index 32e381cf9a1c..fbf0ad764aae 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -1,7 +1,7 @@ from _typeshed import FileDescriptorOrPath from collections.abc import Callable, Mapping from pprint import pprint as show -from typing import Any, Final, Literal as L, Never, TypeAlias, TypeVar, overload +from typing import Any, Final, Literal as L, Never, overload from .cfuncs import errmess @@ -106,15 +106,12 @@ __all__ = [ ### -_VT = TypeVar("_VT") -_RT = TypeVar("_RT") +type _Var = Mapping[str, list[str]] +type _ROut = Mapping[str, str] +type _F2CMap = Mapping[str, Mapping[str, str]] -_Var: TypeAlias = Mapping[str, list[str]] -_ROut: TypeAlias = Mapping[str, str] -_F2CMap: TypeAlias = Mapping[str, Mapping[str, str]] - -_Bool: TypeAlias = bool | L[0, 1] -_Intent: TypeAlias = L[ +type _Bool = bool | L[0, 1] +type _Intent = L[ "INTENT_IN", "INTENT_OUT", "INTENT_INOUT", @@ -140,9 +137,9 @@ class throw_error: def __call__(self, /, var: _Var) -> Never: ... # raises F2PYError # -def l_and(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... -def l_or(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... -def l_not(f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... +def l_and[VT, RT](*f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... +def l_or[VT, RT](*f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... +def l_not[VT, RT](f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... # def outmess(t: str) -> None: ... diff --git a/numpy/f2py/cfuncs.pyi b/numpy/f2py/cfuncs.pyi index 5887177752c3..2187368797a4 100644 --- a/numpy/f2py/cfuncs.pyi +++ b/numpy/f2py/cfuncs.pyi @@ -1,11 +1,11 @@ -from typing import Final, TypeAlias +from typing import Final from .__version__ import version ### -_NeedListDict: TypeAlias = dict[str, list[str]] -_NeedDict: TypeAlias = dict[str, str] +type _NeedListDict = dict[str, list[str]] +type _NeedDict = dict[str, str] ### diff --git a/numpy/f2py/crackfortran.pyi b/numpy/f2py/crackfortran.pyi index 742d358916a2..09213e156636 100644 --- a/numpy/f2py/crackfortran.pyi +++ b/numpy/f2py/crackfortran.pyi @@ -1,28 +1,16 @@ import re from _typeshed import StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Mapping -from typing import ( - IO, - Any, - Concatenate, - Final, - Literal as L, - Never, - ParamSpec, - TypeAlias, - overload, -) +from typing import IO, Any, Concatenate, Final, Literal as L, Never, overload from .__version__ import version from .auxfuncs import isintent_dict as isintent_dict ### -_Tss = ParamSpec("_Tss") - -_VisitResult: TypeAlias = list[Any] | dict[str, Any] | None -_VisitItem: TypeAlias = tuple[str | None, _VisitResult] -_VisitFunc: TypeAlias = Callable[Concatenate[_VisitItem, list[_VisitItem], _VisitResult, _Tss], _VisitItem | None] +type _VisitResult = list[Any] | dict[str, Any] | None +type _VisitItem = tuple[str | None, _VisitResult] +type _VisitFunc[**Tss] = Callable[Concatenate[_VisitItem, list[_VisitItem], _VisitResult, Tss], _VisitItem | None] ### @@ -243,13 +231,13 @@ def crackfortran(files: StrOrBytesPath | Iterable[StrOrBytesPath]) -> list[dict[ def crack2fortran(block: Mapping[str, Any]) -> str: ... # -def traverse( +def traverse[**Tss]( obj: tuple[str | None, _VisitResult], - visit: _VisitFunc[_Tss], + visit: _VisitFunc[Tss], parents: list[tuple[str | None, _VisitResult]] = [], result: list[Any] | dict[str, Any] | None = None, - *args: _Tss.args, - **kwargs: _Tss.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> _VisitItem | _VisitResult: ... # diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi index 686898041b9d..4dd6a9f73ec3 100644 --- a/numpy/f2py/f2py2e.pyi +++ b/numpy/f2py/f2py2e.pyi @@ -2,17 +2,13 @@ import argparse import pprint from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence from types import ModuleType -from typing import Any, Final, NotRequired, TypedDict, type_check_only -from typing_extensions import TypeVar, override +from typing import Any, Final, NotRequired, TypedDict, override, type_check_only from .__version__ import version from .auxfuncs import _Bool, outmess as outmess ### -_KT = TypeVar("_KT", bound=Hashable) -_VT = TypeVar("_VT") - @type_check_only class _F2PyDict(TypedDict): csrc: list[str] @@ -54,7 +50,7 @@ def main() -> None: ... def scaninputline(inputline: Iterable[str]) -> tuple[list[str], dict[str, _Bool]]: ... def callcrackfortran(files: list[str], options: dict[str, bool]) -> list[dict[str, Any]]: ... def buildmodules(lst: Iterable[Mapping[str, object]]) -> dict[str, dict[str, Any]]: ... -def dict_append(d_out: MutableMapping[_KT, _VT], d_in: Mapping[_KT, _VT]) -> None: ... +def dict_append[KT: Hashable, VT](d_out: MutableMapping[KT, VT], d_in: Mapping[KT, VT]) -> None: ... def filter_files( prefix: str, suffix: str, diff --git a/numpy/f2py/rules.pyi b/numpy/f2py/rules.pyi index 30439f6b8351..c45d42289363 100644 --- a/numpy/f2py/rules.pyi +++ b/numpy/f2py/rules.pyi @@ -1,17 +1,12 @@ from collections.abc import Callable, Iterable, Mapping -from typing import Any, Final, Literal as L, TypeAlias -from typing_extensions import TypeVar +from typing import Any, Final, Literal as L from .__version__ import version from .auxfuncs import _Bool, _Var -### - -_VT = TypeVar("_VT", default=str) - -_Predicate: TypeAlias = Callable[[_Var], _Bool] -_RuleDict: TypeAlias = dict[str, _VT] -_DefDict: TypeAlias = dict[_Predicate, _VT] +type _Predicate = Callable[[_Var], _Bool] +type _RuleDict[VT] = dict[str, VT] +type _DefDict[VT] = dict[_Predicate, VT] ### @@ -24,9 +19,9 @@ sepdict: Final[dict[str, str]] = ... generationtime: Final[int] = ... typedef_need_dict: Final[_DefDict[str]] = ... -module_rules: Final[_RuleDict[str | list[str] | _RuleDict]] = ... -routine_rules: Final[_RuleDict[str | list[str] | _DefDict | _RuleDict]] = ... -defmod_rules: Final[list[_RuleDict[str | _DefDict]]] = ... +module_rules: Final[_RuleDict[str | list[str] | _RuleDict[str]]] = ... +routine_rules: Final[_RuleDict[str | list[str] | _DefDict[str] | _RuleDict[str]]] = ... +defmod_rules: Final[list[_RuleDict[str | _DefDict[str]]]] = ... rout_rules: Final[list[_RuleDict[str | Any]]] = ... aux_rules: Final[list[_RuleDict[str | Any]]] = ... arg_rules: Final[list[_RuleDict[str | Any]]] = ... @@ -34,8 +29,8 @@ check_rules: Final[list[_RuleDict[str | Any]]] = ... stnd: Final[dict[L[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], L["st", "nd", "rd", "th"]]] = ... -def buildmodule(m: Mapping[str, str | Any], um: Iterable[Mapping[str, str | Any]]) -> _RuleDict: ... -def buildapi(rout: Mapping[str, str]) -> tuple[_RuleDict, str]: ... +def buildmodule(m: Mapping[str, str | Any], um: Iterable[Mapping[str, str | Any]]) -> _RuleDict[str]: ... +def buildapi(rout: Mapping[str, str]) -> tuple[_RuleDict[str], str]: ... # namespace pollution k: str diff --git a/numpy/f2py/symbolic.pyi b/numpy/f2py/symbolic.pyi index 06be2bb16044..542dde12791f 100644 --- a/numpy/f2py/symbolic.pyi +++ b/numpy/f2py/symbolic.pyi @@ -1,28 +1,25 @@ from collections.abc import Callable, Mapping from enum import Enum -from typing import Any, Generic, Literal as L, ParamSpec, Self, TypeAlias, overload +from typing import Any, Generic, Literal as L, Self, overload from typing_extensions import TypeVar __all__ = ["Expr"] ### -_Tss = ParamSpec("_Tss") -_ExprT = TypeVar("_ExprT", bound=Expr) -_ExprT1 = TypeVar("_ExprT1", bound=Expr) -_ExprT2 = TypeVar("_ExprT2", bound=Expr) +# Explicit covariance is required here due to the inexpressible read-only attributes. _OpT_co = TypeVar("_OpT_co", bound=Op, default=Op, covariant=True) _LanguageT_co = TypeVar("_LanguageT_co", bound=Language, default=Language, covariant=True) _DataT_co = TypeVar("_DataT_co", default=Any, covariant=True) _LeftT_co = TypeVar("_LeftT_co", default=Any, covariant=True) _RightT_co = TypeVar("_RightT_co", default=Any, covariant=True) -_RelCOrPy: TypeAlias = L["==", "!=", "<", "<=", ">", ">="] -_RelFortran: TypeAlias = L[".eq.", ".ne.", ".lt.", ".le.", ".gt.", ".ge."] +type _RelCOrPy = L["==", "!=", "<", "<=", ">", ">="] +type _RelFortran = L[".eq.", ".ne.", ".lt.", ".le.", ".gt.", ".ge."] -_ToExpr: TypeAlias = Expr | complex | str -_ToExprN: TypeAlias = _ToExpr | tuple[_ToExprN, ...] -_NestedString: TypeAlias = str | tuple[_NestedString, ...] | list[_NestedString] +type _ToExpr = Expr | complex | str +type _ToExprN = _ToExpr | tuple[_ToExprN, ...] +type _NestedString = str | tuple[_NestedString, ...] | list[_NestedString] ### @@ -97,8 +94,8 @@ class Precedence(Enum): NONE = 100 class Expr(Generic[_OpT_co, _DataT_co]): - op: _OpT_co - data: _DataT_co + op: _OpT_co # read-only + data: _DataT_co # read-only @staticmethod def parse(s: str, language: Language = ...) -> Expr: ... @@ -149,7 +146,7 @@ class Expr(Generic[_OpT_co, _DataT_co]): # @overload - def __getitem__(self, index: _ExprT | tuple[_ExprT], /) -> Expr[L[Op.INDEXING], tuple[Self, _ExprT]]: ... + def __getitem__[ExprT: Expr](self, index: ExprT | tuple[ExprT], /) -> Expr[L[Op.INDEXING], tuple[Self, ExprT]]: ... @overload def __getitem__(self, index: _ToExpr | tuple[_ToExpr], /) -> Expr[L[Op.INDEXING], tuple[Self, Expr]]: ... @@ -158,9 +155,9 @@ class Expr(Generic[_OpT_co, _DataT_co]): # @overload - def traverse(self, /, visit: Callable[_Tss, None], *args: _Tss.args, **kwargs: _Tss.kwargs) -> Expr: ... + def traverse[**Tss](self, /, visit: Callable[Tss, None], *args: Tss.args, **kwargs: Tss.kwargs) -> Expr: ... @overload - def traverse(self, /, visit: Callable[_Tss, _ExprT], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _ExprT: ... + def traverse[**Tss, ExprT: Expr](self, /, visit: Callable[Tss, ExprT], *args: Tss.args, **kwargs: Tss.kwargs) -> ExprT: ... # def contains(self, /, other: Expr) -> bool: ... @@ -176,23 +173,23 @@ class Expr(Generic[_OpT_co, _DataT_co]): def tostring(self, /, parent_precedence: Precedence = ..., language: Language = ...) -> str: ... class _Pair(Generic[_LeftT_co, _RightT_co]): - left: _LeftT_co - right: _RightT_co + left: _LeftT_co # read-only + right: _RightT_co # read-only def __init__(self, /, left: _LeftT_co, right: _RightT_co) -> None: ... # @overload - def substitute(self: _Pair[_ExprT1, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Expr]: ... + def substitute[ExprT: Expr](self: _Pair[ExprT, ExprT], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Expr]: ... @overload - def substitute(self: _Pair[_ExprT1, object], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Any]: ... + def substitute[ExprT: Expr](self: _Pair[ExprT, object], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Any]: ... @overload - def substitute(self: _Pair[object, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Any, Expr]: ... + def substitute[ExprT: Expr](self: _Pair[object, ExprT], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Any, Expr]: ... @overload def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> _Pair: ... class _FromStringWorker(Generic[_LanguageT_co]): - language: _LanguageT_co + language: _LanguageT_co # read-only original: str | None quotes_map: dict[str, str] diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index 1ea451ec2eb1..8f1a3c7bab89 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -1,4 +1,4 @@ -from typing import Any, Final, Literal as L, TypeVar, overload +from typing import Any, Final, Literal as L, overload from numpy import complexfloating, floating, generic, integer from numpy._typing import ( @@ -12,8 +12,6 @@ from numpy._typing import ( __all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] -_ScalarT = TypeVar("_ScalarT", bound=generic) - ### integer_types: Final[tuple[type[int], type[integer]]] = ... @@ -21,13 +19,13 @@ integer_types: Final[tuple[type[int], type[integer]]] = ... ### @overload -def fftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def fftshift[ScalarT: generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... # @overload -def ifftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def ifftshift[ScalarT: generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 3234c64ed169..d34404edb149 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -1,5 +1,5 @@ from collections.abc import Sequence -from typing import Literal as L, TypeAlias +from typing import Literal as L from numpy import complex128, float64 from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co @@ -21,7 +21,7 @@ __all__ = [ "ifftn", ] -_NormKind: TypeAlias = L["backward", "ortho", "forward"] | None +type _NormKind = L["backward", "ortho", "forward"] | None def fft( a: ArrayLike, diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index e7aacea43254..da7c89859d86 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -1,20 +1,10 @@ -from typing import ( - Any, - Literal as L, - Protocol, - TypeAlias, - TypeVar, - overload, - type_check_only, -) +from typing import Any, Literal as L, Protocol, overload, type_check_only -from numpy import generic +import numpy as np from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeInt __all__ = ["pad"] -_ScalarT = TypeVar("_ScalarT", bound=generic) - @type_check_only class _ModeFunc(Protocol): def __call__( @@ -26,7 +16,7 @@ class _ModeFunc(Protocol): /, ) -> None: ... -_ModeKind: TypeAlias = L[ +type _ModeKind = L[ "constant", "edge", "linear_ramp", @@ -40,19 +30,22 @@ _ModeKind: TypeAlias = L[ "empty", ] -# TODO: In practice each keyword argument is exclusive to one or more -# specific modes. Consider adding more overloads to express this in the future. - -_PadWidth: TypeAlias = ( +type _PadWidth = ( _ArrayLikeInt | dict[int, int] | dict[int, tuple[int, int]] | dict[int, int | tuple[int, int]] ) + +### + +# TODO: In practice each keyword argument is exclusive to one or more +# specific modes. Consider adding more overloads to express this in the future. + # Expand `**kwargs` into explicit keyword-only arguments @overload -def pad( - array: _ArrayLike[_ScalarT], +def pad[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], pad_width: _PadWidth, mode: _ModeKind = "constant", *, @@ -60,7 +53,7 @@ def pad( constant_values: ArrayLike = 0, end_values: ArrayLike = 0, reflect_type: L["odd", "even"] = "even", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def pad( array: ArrayLike, @@ -73,12 +66,12 @@ def pad( reflect_type: L["odd", "even"] = "even", ) -> NDArray[Any]: ... @overload -def pad( - array: _ArrayLike[_ScalarT], +def pad[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], pad_width: _PadWidth, mode: _ModeFunc, **kwargs: Any, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def pad( array: ArrayLike, diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index da687da03dde..b0b918bd3e23 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -1,13 +1,4 @@ -from typing import ( - Any, - Generic, - Literal as L, - NamedTuple, - SupportsIndex, - TypeAlias, - overload, -) -from typing_extensions import TypeVar +from typing import Any, Literal as L, NamedTuple, SupportsIndex, TypeVar, overload import numpy as np from numpy._typing import ( @@ -32,16 +23,13 @@ __all__ = [ "unique_values", ] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_NumericT = TypeVar("_NumericT", bound=np.number | np.timedelta64 | np.object_) - # Explicitly set all allowed values to prevent accidental castings to # abstract dtypes (their common super-type). # Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) # which could result in, for example, `int64` and `float64`producing a # `number[_64Bit]` array -_EitherSCT = TypeVar( - "_EitherSCT", +_AnyScalarT = TypeVar( + "_AnyScalarT", np.bool, np.int8, np.int16, np.int32, np.int64, np.intp, np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, @@ -52,23 +40,23 @@ _EitherSCT = TypeVar( np.integer, np.floating, np.complexfloating, np.character, ) # fmt: skip -_AnyArray: TypeAlias = NDArray[Any] -_IntArray: TypeAlias = NDArray[np.intp] +type _NumericScalar = np.number | np.timedelta64 | np.object_ +type _IntArray = NDArray[np.intp] ### -class UniqueAllResult(NamedTuple, Generic[_ScalarT]): - values: NDArray[_ScalarT] +class UniqueAllResult[ScalarT: np.generic](NamedTuple): + values: NDArray[ScalarT] indices: _IntArray inverse_indices: _IntArray counts: _IntArray -class UniqueCountsResult(NamedTuple, Generic[_ScalarT]): - values: NDArray[_ScalarT] +class UniqueCountsResult[ScalarT: np.generic](NamedTuple): + values: NDArray[ScalarT] counts: _IntArray -class UniqueInverseResult(NamedTuple, Generic[_ScalarT]): - values: NDArray[_ScalarT] +class UniqueInverseResult[ScalarT: np.generic](NamedTuple): + values: NDArray[ScalarT] inverse_indices: _IntArray # @@ -79,11 +67,11 @@ def ediff1d( to_begin: ArrayLike | None = None, ) -> NDArray[np.int8]: ... @overload -def ediff1d( - ary: _ArrayLike[_NumericT], +def ediff1d[NumericT: _NumericScalar]( + ary: _ArrayLike[NumericT], to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> NDArray[_NumericT]: ... +) -> NDArray[NumericT]: ... @overload def ediff1d( ary: _ArrayLike[np.datetime64[Any]], @@ -95,12 +83,12 @@ def ediff1d( ary: _ArrayLikeNumber_co, to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> _AnyArray: ... +) -> np.ndarray: ... # @overload # known scalar-type, FFF -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, return_inverse: L[False] = False, return_counts: L[False] = False, @@ -108,7 +96,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload # unknown scalar-type, FFF def unique( ar: ArrayLike, @@ -119,10 +107,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> _AnyArray: ... +) -> np.ndarray: ... @overload # known scalar-type, TFF -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[False] = False, return_counts: L[False] = False, @@ -130,7 +118,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, TFF def unique( ar: ArrayLike, @@ -141,10 +129,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # known scalar-type, FTF (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False], return_inverse: L[True], return_counts: L[False] = False, @@ -152,10 +140,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # known scalar-type, FTF (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, *, return_inverse: L[True], @@ -163,7 +151,7 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, FTF (positional) def unique( ar: ArrayLike, @@ -174,7 +162,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # unknown scalar-type, FTF (keyword) def unique( ar: ArrayLike, @@ -185,10 +173,10 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # known scalar-type, FFT (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False], return_inverse: L[False], return_counts: L[True], @@ -196,10 +184,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # known scalar-type, FFT (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, return_inverse: L[False] = False, *, @@ -207,7 +195,7 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, FFT (positional) def unique( ar: ArrayLike, @@ -218,7 +206,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # unknown scalar-type, FFT (keyword) def unique( ar: ArrayLike, @@ -229,10 +217,10 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # known scalar-type, TTF -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[True], return_counts: L[False] = False, @@ -240,7 +228,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, TTF def unique( ar: ArrayLike, @@ -251,10 +239,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # known scalar-type, TFT (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[False], return_counts: L[True], @@ -262,10 +250,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, TFT (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[False] = False, *, @@ -273,7 +261,7 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, TFT (positional) def unique( ar: ArrayLike, @@ -284,7 +272,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, TFT (keyword) def unique( ar: ArrayLike, @@ -295,10 +283,10 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # known scalar-type, FTT (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False], return_inverse: L[True], return_counts: L[True], @@ -306,10 +294,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, FTT (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, *, return_inverse: L[True], @@ -317,7 +305,7 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, FTT (positional) def unique( ar: ArrayLike, @@ -328,7 +316,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, FTT (keyword) def unique( ar: ArrayLike, @@ -339,10 +327,10 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # known scalar-type, TTT -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[True], return_counts: L[True], @@ -350,7 +338,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, TTT def unique( ar: ArrayLike, @@ -361,69 +349,69 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray, _IntArray]: ... # @overload -def unique_all(x: _ArrayLike[_ScalarT]) -> UniqueAllResult[_ScalarT]: ... +def unique_all[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueAllResult[ScalarT]: ... @overload def unique_all(x: ArrayLike) -> UniqueAllResult[Any]: ... # @overload -def unique_counts(x: _ArrayLike[_ScalarT]) -> UniqueCountsResult[_ScalarT]: ... +def unique_counts[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueCountsResult[ScalarT]: ... @overload def unique_counts(x: ArrayLike) -> UniqueCountsResult[Any]: ... # @overload -def unique_inverse(x: _ArrayLike[_ScalarT]) -> UniqueInverseResult[_ScalarT]: ... +def unique_inverse[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueInverseResult[ScalarT]: ... @overload def unique_inverse(x: ArrayLike) -> UniqueInverseResult[Any]: ... # @overload -def unique_values(x: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def unique_values[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload -def unique_values(x: ArrayLike) -> _AnyArray: ... +def unique_values(x: ArrayLike) -> np.ndarray: ... # @overload # known scalar-type, return_indices=False (default) def intersect1d( - ar1: _ArrayLike[_EitherSCT], - ar2: _ArrayLike[_EitherSCT], + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False, return_indices: L[False] = False, -) -> NDArray[_EitherSCT]: ... +) -> NDArray[_AnyScalarT]: ... @overload # known scalar-type, return_indices=True (positional) def intersect1d( - ar1: _ArrayLike[_EitherSCT], - ar2: _ArrayLike[_EitherSCT], + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], assume_unique: bool, return_indices: L[True], -) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[_AnyScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, return_indices=True (keyword) def intersect1d( - ar1: _ArrayLike[_EitherSCT], - ar2: _ArrayLike[_EitherSCT], + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False, *, return_indices: L[True], -) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[_AnyScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, return_indices=False (default) def intersect1d( ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False, return_indices: L[False] = False, -) -> _AnyArray: ... +) -> np.ndarray: ... @overload # unknown scalar-type, return_indices=True (positional) def intersect1d( ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool, return_indices: L[True], -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, return_indices=True (keyword) def intersect1d( ar1: ArrayLike, @@ -431,25 +419,25 @@ def intersect1d( assume_unique: bool = False, *, return_indices: L[True], -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... # @overload -def setxor1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +def setxor1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False) -> NDArray[_AnyScalarT]: ... @overload -def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... +def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> np.ndarray: ... # @overload -def union1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT]) -> NDArray[_EitherSCT]: ... +def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> NDArray[_AnyScalarT]: ... @overload -def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _AnyArray: ... +def union1d(ar1: ArrayLike, ar2: ArrayLike) -> np.ndarray: ... # @overload -def setdiff1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +def setdiff1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False) -> NDArray[_AnyScalarT]: ... @overload -def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... +def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> np.ndarray: ... # def isin( diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index c70872f6e753..a1a4428885fd 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -2,7 +2,7 @@ from collections.abc import Generator from types import EllipsisType -from typing import Any, Final, TypeAlias, overload +from typing import Any, Final, overload from typing_extensions import TypeVar import numpy as np @@ -10,12 +10,11 @@ from numpy._typing import _AnyShape, _Shape __all__ = ["Arrayterator"] +# Type parameter default syntax (PEP 696) requires Python 3.13+ _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_AnyIndex: TypeAlias = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] +type _AnyIndex = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] # NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, # but its ``__getattr__` method does wrap around the former and thus has @@ -31,7 +30,7 @@ class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): @property # type: ignore[misc] def shape(self) -> _ShapeT_co: ... @property - def flat(self: Arrayterator[Any, np.dtype[_ScalarT]]) -> Generator[_ScalarT]: ... # type: ignore[override] + def flat[ScalarT: np.generic](self: Arrayterator[Any, np.dtype[ScalarT]]) -> Generator[ScalarT]: ... # type: ignore[override] # def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ... @@ -42,4 +41,4 @@ class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + def __array__[DTypeT: np.dtype](self, /, dtype: DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, DTypeT]: ... diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi index dba0434a5fab..33af9cf1b197 100644 --- a/numpy/lib/_datasource.pyi +++ b/numpy/lib/_datasource.pyi @@ -1,8 +1,8 @@ from _typeshed import OpenBinaryMode, OpenTextMode from pathlib import Path -from typing import IO, Any, TypeAlias +from typing import IO, Any -_Mode: TypeAlias = OpenBinaryMode | OpenTextMode +type _Mode = OpenBinaryMode | OpenTextMode ### diff --git a/numpy/lib/_format_impl.pyi b/numpy/lib/_format_impl.pyi index b45df02796d7..f8b9a7ab88a9 100644 --- a/numpy/lib/_format_impl.pyi +++ b/numpy/lib/_format_impl.pyi @@ -1,6 +1,6 @@ import os from _typeshed import SupportsRead, SupportsWrite -from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard +from typing import Any, BinaryIO, Final, TypeGuard import numpy as np import numpy.typing as npt @@ -8,7 +8,7 @@ from numpy.lib._utils_impl import drop_metadata as drop_metadata __all__: list[str] = [] -_DTypeDescr: TypeAlias = list[tuple[str, str]] | list[tuple[str, str, tuple[int, ...]]] +type _DTypeDescr = list[tuple[str, str]] | list[tuple[str, str, tuple[int, ...]]] ### diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index d68918560b69..769c321b9988 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -5,15 +5,13 @@ from typing import ( Concatenate, Literal as L, Never, - ParamSpec, Protocol, SupportsIndex, SupportsInt, - TypeAlias, overload, type_check_only, ) -from typing_extensions import TypeIs, TypeVar +from typing_extensions import TypeIs import numpy as np from numpy import _OrderKACF @@ -36,6 +34,7 @@ from numpy._typing import ( _NestedSequence as _SeqND, _NumberLike_co, _ScalarLike_co, + _Shape, _ShapeLike, _SupportsArray, ) @@ -80,39 +79,15 @@ __all__ = [ "quantile", ] -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -# The `{}ss` suffix refers to the PEP 695 (Python 3.12) `ParamSpec` syntax, `**P`. -_Tss = ParamSpec("_Tss") +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT1 = TypeVar("_ScalarT1", bound=np.generic) -_ScalarT2 = TypeVar("_ScalarT2", bound=np.generic) -_FloatingT = TypeVar("_FloatingT", bound=np.floating) -_InexactT = TypeVar("_InexactT", bound=np.inexact) -_InexactTimeT = TypeVar("_InexactTimeT", bound=np.inexact | np.timedelta64) -_InexactDateTimeT = TypeVar("_InexactDateTimeT", bound=np.inexact | np.timedelta64 | np.datetime64) -_ScalarNumericT = TypeVar("_ScalarNumericT", bound=np.inexact | np.timedelta64 | np.object_) -_AnyDoubleT = TypeVar("_AnyDoubleT", bound=np.float64 | np.longdouble | np.complex128 | np.clongdouble) - -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) -_ArrayFloatingT = TypeVar("_ArrayFloatingT", bound=NDArray[np.floating]) -_ArrayFloatObjT = TypeVar("_ArrayFloatObjT", bound=NDArray[np.floating | np.object_]) -_ArrayComplexT = TypeVar("_ArrayComplexT", bound=NDArray[np.complexfloating]) -_ArrayInexactT = TypeVar("_ArrayInexactT", bound=NDArray[np.inexact]) -_ArrayNumericT = TypeVar("_ArrayNumericT", bound=NDArray[np.inexact | np.timedelta64 | np.object_]) - -_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] - -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) - -_integer_co: TypeAlias = np.integer | np.bool -_float64_co: TypeAlias = np.float64 | _integer_co -_floating_co: TypeAlias = np.floating | _integer_co +type _integer_co = np.integer | np.bool +type _float64_co = np.float64 | _integer_co +type _floating_co = np.floating | _integer_co # non-trivial scalar-types that will become `complex128` in `sort_complex()`, # i.e. all numeric scalar types except for `[u]int{8,16} | longdouble` -_SortsToComplex128: TypeAlias = ( +type _SortsToComplex128 = ( np.bool | np.int32 | np.uint32 @@ -124,33 +99,37 @@ _SortsToComplex128: TypeAlias = ( | np.timedelta64 | np.object_ ) +type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ +type _InexactDouble = np.float64 | np.longdouble | np.complex128 | np.clongdouble -_Array: TypeAlias = np.ndarray[_ShapeT, np.dtype[_ScalarT]] -_Array0D: TypeAlias = np.ndarray[tuple[()], np.dtype[_ScalarT]] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] -_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[_ScalarT]] -_ArrayMax2D: TypeAlias = np.ndarray[tuple[int] | tuple[int, int], np.dtype[_ScalarT]] +type _Array[ShapeT: _Shape, ScalarT: np.generic] = np.ndarray[ShapeT, np.dtype[ScalarT]] +type _Array0D[ScalarT: np.generic] = np.ndarray[tuple[()], np.dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] +type _ArrayMax2D[ScalarT: np.generic] = np.ndarray[tuple[int] | tuple[int, int], np.dtype[ScalarT]] # workaround for mypy and pyright not following the typing spec for overloads -_ArrayNoD: TypeAlias = np.ndarray[tuple[Never, Never, Never, Never], np.dtype[_ScalarT]] +type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never, Never, Never, Never], np.dtype[ScalarT]] -_Seq1D: TypeAlias = Sequence[_T] -_Seq2D: TypeAlias = Sequence[Sequence[_T]] -_Seq3D: TypeAlias = Sequence[Sequence[Sequence[_T]]] -_ListSeqND: TypeAlias = list[_T] | _SeqND[list[_T]] +type _Seq1D[T] = Sequence[T] +type _Seq2D[T] = Sequence[Sequence[T]] +type _Seq3D[T] = Sequence[Sequence[Sequence[T]]] +type _ListSeqND[T] = list[T] | _SeqND[list[T]] -_Tuple2: TypeAlias = tuple[_T, _T] -_Tuple3: TypeAlias = tuple[_T, _T, _T] -_Tuple4: TypeAlias = tuple[_T, _T, _T, _T] +type _Tuple2[T] = tuple[T, T] +type _Tuple3[T] = tuple[T, T, T] +type _Tuple4[T] = tuple[T, T, T, T] -_Mesh1: TypeAlias = tuple[_Array1D[_ScalarT]] -_Mesh2: TypeAlias = tuple[_Array2D[_ScalarT], _Array2D[_ScalarT1]] -_Mesh3: TypeAlias = tuple[_Array3D[_ScalarT], _Array3D[_ScalarT1], _Array3D[_ScalarT2]] +type _Mesh1[ScalarT: np.generic] = tuple[_Array1D[ScalarT]] +type _Mesh2[ScalarT: np.generic, ScalarT1: np.generic] = tuple[_Array2D[ScalarT], _Array2D[ScalarT1]] +type _Mesh3[ScalarT: np.generic, ScalarT1: np.generic, ScalarT2: np.generic] = tuple[ + _Array3D[ScalarT], _Array3D[ScalarT1], _Array3D[ScalarT2] +] -_IndexLike: TypeAlias = slice | _ArrayLikeInt_co +type _IndexLike = slice | _ArrayLikeInt_co -_Indexing: TypeAlias = L["ij", "xy"] -_InterpolationMethod = L[ +type _Indexing = L["ij", "xy"] +type _InterpolationMethod = L[ "inverted_cdf", "averaged_inverted_cdf", "closest_observation", @@ -168,24 +147,24 @@ _InterpolationMethod = L[ # The resulting value will be used as `y[cond] = func(vals, *args, **kw)`, so in can # return any (usually 1d) array-like or scalar-like compatible with the input. -_PiecewiseFunction: TypeAlias = Callable[Concatenate[NDArray[_ScalarT], _Tss], ArrayLike] -_PiecewiseFunctions: TypeAlias = _SizedIterable[_PiecewiseFunction[_ScalarT, _Tss] | _ScalarLike_co] +type _PiecewiseFunction[ScalarT: np.generic, **Tss] = Callable[Concatenate[NDArray[ScalarT], Tss], ArrayLike] +type _PiecewiseFunctions[ScalarT: np.generic, **Tss] = _SizedIterable[_PiecewiseFunction[ScalarT, Tss] | _ScalarLike_co] @type_check_only -class _TrimZerosSequence(Protocol[_T_co]): +class _TrimZerosSequence[T](Protocol): def __len__(self, /) -> int: ... @overload def __getitem__(self, key: int, /) -> object: ... @overload - def __getitem__(self, key: slice, /) -> _T_co: ... + def __getitem__(self, key: slice, /) -> T: ... @type_check_only -class _SupportsRMulFloat(Protocol[_T_co]): - def __rmul__(self, other: float, /) -> _T_co: ... +class _SupportsRMulFloat[T](Protocol): + def __rmul__(self, other: float, /) -> T: ... @type_check_only -class _SizedIterable(Protocol[_T_co]): - def __iter__(self) -> Iterable[_T_co]: ... +class _SizedIterable[T](Protocol): + def __iter__(self) -> Iterable[T]: ... def __len__(self) -> int: ... ### @@ -212,18 +191,18 @@ class vectorize: def __call__(self, /, *args: Incomplete, **kwargs: Incomplete) -> Incomplete: ... @overload -def rot90(m: _ArrayT, k: int = 1, axes: tuple[int, int] = (0, 1)) -> _ArrayT: ... +def rot90[ArrayT: np.ndarray](m: ArrayT, k: int = 1, axes: tuple[int, int] = (0, 1)) -> ArrayT: ... @overload -def rot90(m: _ArrayLike[_ScalarT], k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[_ScalarT]: ... +def rot90[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[ScalarT]: ... @overload def rot90(m: ArrayLike, k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[Incomplete]: ... # NOTE: Technically `flip` also accept scalars, but that has no effect and complicates # the overloads significantly, so we ignore that case here. @overload -def flip(m: _ArrayT, axis: int | tuple[int, ...] | None = None) -> _ArrayT: ... +def flip[ArrayT: np.ndarray](m: ArrayT, axis: int | tuple[int, ...] | None = None) -> ArrayT: ... @overload -def flip(m: _ArrayLike[_ScalarT], axis: int | tuple[int, ...] | None = None) -> NDArray[_ScalarT]: ... +def flip[ScalarT: np.generic](m: _ArrayLike[ScalarT], axis: int | tuple[int, ...] | None = None) -> NDArray[ScalarT]: ... @overload def flip(m: ArrayLike, axis: int | tuple[int, ...] | None = None) -> NDArray[Incomplete]: ... @@ -235,77 +214,77 @@ def iterable(y: object) -> TypeIs[Iterable[Any]]: ... # NOTE: This assumes that if `keepdims=True` the input is at least 1d, and will # therefore always return an array. @overload # inexact array, keepdims=True -def average( - a: _ArrayInexactT, +def average[ArrayT: NDArray[np.inexact]]( + a: ArrayT, axis: int | tuple[int, ...] | None = None, weights: _ArrayLikeNumber_co | None = None, returned: L[False] = False, *, keepdims: L[True], -) -> _ArrayInexactT: ... +) -> ArrayT: ... @overload # inexact array, returned=True keepdims=True -def average( - a: _ArrayInexactT, +def average[ArrayT: NDArray[np.inexact]]( + a: ArrayT, axis: int | tuple[int, ...] | None = None, weights: _ArrayLikeNumber_co | None = None, *, returned: L[True], keepdims: L[True], -) -> _Tuple2[_ArrayInexactT]: ... +) -> _Tuple2[ArrayT]: ... @overload # inexact array-like, axis=None -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: None = None, weights: _ArrayLikeNumber_co | None = None, returned: L[False] = False, *, keepdims: L[False] | _NoValueType = ..., -) -> _InexactT: ... +) -> ScalarT: ... @overload # inexact array-like, axis= -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: int | tuple[int, ...], weights: _ArrayLikeNumber_co | None = None, returned: L[False] = False, *, keepdims: L[False] | _NoValueType = ..., -) -> NDArray[_InexactT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact array-like, keepdims=True -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: int | tuple[int, ...] | None = None, weights: _ArrayLikeNumber_co | None = None, returned: L[False] = False, *, keepdims: L[True], -) -> NDArray[_InexactT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact array-like, axis=None, returned=True -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: None = None, weights: _ArrayLikeNumber_co | None = None, *, returned: L[True], keepdims: L[False] | _NoValueType = ..., -) -> _Tuple2[_InexactT]: ... +) -> _Tuple2[ScalarT]: ... @overload # inexact array-like, axis=, returned=True -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: int | tuple[int, ...], weights: _ArrayLikeNumber_co | None = None, *, returned: L[True], keepdims: L[False] | _NoValueType = ..., -) -> _Tuple2[NDArray[_InexactT]]: ... +) -> _Tuple2[NDArray[ScalarT]]: ... @overload # inexact array-like, returned=True, keepdims=True -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: int | tuple[int, ...] | None = None, weights: _ArrayLikeNumber_co | None = None, *, returned: L[True], keepdims: L[True], -) -> _Tuple2[NDArray[_InexactT]]: ... +) -> _Tuple2[NDArray[ScalarT]]: ... @overload # bool or integer array-like, axis=None def average( a: _SeqND[float] | _ArrayLikeInt_co, @@ -471,15 +450,19 @@ def average( # @overload -def asarray_chkfinite(a: _ArrayT, dtype: None = None, order: _OrderKACF = None) -> _ArrayT: ... +def asarray_chkfinite[ArrayT: np.ndarray](a: ArrayT, dtype: None = None, order: _OrderKACF = None) -> ArrayT: ... @overload -def asarray_chkfinite( - a: np.ndarray[_ShapeT], dtype: _DTypeLike[_ScalarT], order: _OrderKACF = None -) -> _Array[_ShapeT, _ScalarT]: ... +def asarray_chkfinite[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT], dtype: _DTypeLike[ScalarT], order: _OrderKACF = None +) -> _Array[ShapeT, ScalarT]: ... @overload -def asarray_chkfinite(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF = None) -> NDArray[_ScalarT]: ... +def asarray_chkfinite[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = None +) -> NDArray[ScalarT]: ... @overload -def asarray_chkfinite(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF = None) -> NDArray[_ScalarT]: ... +def asarray_chkfinite[ScalarT: np.generic]( + a: object, dtype: _DTypeLike[ScalarT], order: _OrderKACF = None +) -> NDArray[ScalarT]: ... @overload def asarray_chkfinite(a: object, dtype: DTypeLike | None = None, order: _OrderKACF = None) -> NDArray[Incomplete]: ... @@ -488,33 +471,33 @@ def asarray_chkfinite(a: object, dtype: DTypeLike | None = None, order: _OrderKA # practice anything that `np.array(condlist, dtype=bool)` accepts will work, i.e. any # array-like. @overload -def piecewise( - x: _Array[_ShapeT, _ScalarT], +def piecewise[ShapeT: _Shape, ScalarT: np.generic, **Tss]( + x: _Array[ShapeT, ScalarT], condlist: ArrayLike, - funclist: _PiecewiseFunctions[Any, _Tss], - *args: _Tss.args, - **kw: _Tss.kwargs, -) -> _Array[_ShapeT, _ScalarT]: ... + funclist: _PiecewiseFunctions[Any, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> _Array[ShapeT, ScalarT]: ... @overload -def piecewise( - x: _ArrayLike[_ScalarT], +def piecewise[ScalarT: np.generic, **Tss]( + x: _ArrayLike[ScalarT], condlist: ArrayLike, - funclist: _PiecewiseFunctions[Any, _Tss], - *args: _Tss.args, - **kw: _Tss.kwargs, -) -> NDArray[_ScalarT]: ... + funclist: _PiecewiseFunctions[Any, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> NDArray[ScalarT]: ... @overload -def piecewise( +def piecewise[ScalarT: np.generic, **Tss]( x: ArrayLike, condlist: ArrayLike, - funclist: _PiecewiseFunctions[_ScalarT, _Tss], - *args: _Tss.args, - **kw: _Tss.kwargs, -) -> NDArray[_ScalarT]: ... + funclist: _PiecewiseFunctions[ScalarT, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> NDArray[ScalarT]: ... # NOTE: condition is usually boolean, but anything with zero/non-zero semantics works @overload -def extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> _Array1D[_ScalarT]: ... +def extract[ScalarT: np.generic](condition: ArrayLike, arr: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... @overload def extract(condition: ArrayLike, arr: _SeqND[bool]) -> _Array1D[np.bool]: ... @overload @@ -533,17 +516,17 @@ def extract(condition: ArrayLike, arr: ArrayLike) -> _Array1D[Incomplete]: ... # NOTE: unlike `extract`, passing non-boolean conditions for `condlist` will raise an # error at runtime @overload -def select( +def select[ArrayT: np.ndarray]( condlist: _SizedIterable[_ArrayLikeBool_co], - choicelist: Sequence[_ArrayT], + choicelist: Sequence[ArrayT], default: _ScalarLike_co = 0, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def select( +def select[ScalarT: np.generic]( condlist: _SizedIterable[_ArrayLikeBool_co], - choicelist: Sequence[_ArrayLike[_ScalarT]] | NDArray[_ScalarT], + choicelist: Sequence[_ArrayLike[ScalarT]] | NDArray[ScalarT], default: _ScalarLike_co = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def select( condlist: _SizedIterable[_ArrayLikeBool_co], @@ -553,44 +536,44 @@ def select( # keep roughly in sync with `ma.core.copy` @overload -def copy(a: _ArrayT, order: _OrderKACF, subok: L[True]) -> _ArrayT: ... +def copy[ArrayT: np.ndarray](a: ArrayT, order: _OrderKACF, subok: L[True]) -> ArrayT: ... @overload -def copy(a: _ArrayT, order: _OrderKACF = "K", *, subok: L[True]) -> _ArrayT: ... +def copy[ArrayT: np.ndarray](a: ArrayT, order: _OrderKACF = "K", *, subok: L[True]) -> ArrayT: ... @overload -def copy(a: _ArrayLike[_ScalarT], order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[_ScalarT]: ... +def copy[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[ScalarT]: ... @overload def copy(a: ArrayLike, order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[Incomplete]: ... # @overload # ?d, known inexact scalar-type -def gradient( - f: _ArrayNoD[_InexactTimeT], +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _ArrayNoD[ScalarT], *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, # `| Any` instead of ` | tuple` is returned to avoid several mypy_primer errors -) -> _Array1D[_InexactTimeT] | Any: ... +) -> _Array1D[ScalarT] | Any: ... @overload # 1d, known inexact scalar-type -def gradient( - f: _Array1D[_InexactTimeT], +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array1D[ScalarT], *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, -) -> _Array1D[_InexactTimeT]: ... +) -> _Array1D[ScalarT]: ... @overload # 2d, known inexact scalar-type -def gradient( - f: _Array2D[_InexactTimeT], +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array2D[ScalarT], *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, -) -> _Mesh2[_InexactTimeT, _InexactTimeT]: ... +) -> _Mesh2[ScalarT, ScalarT]: ... @overload # 3d, known inexact scalar-type -def gradient( - f: _Array3D[_InexactTimeT], +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array3D[ScalarT], *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, -) -> _Mesh3[_InexactTimeT, _InexactTimeT, _InexactTimeT]: ... +) -> _Mesh3[ScalarT, ScalarT, ScalarT]: ... @overload # ?d, datetime64 scalar-type def gradient( f: _ArrayNoD[np.datetime64], @@ -671,37 +654,37 @@ def gradient( # @overload # n == 0; return input unchanged -def diff( - a: _T, +def diff[T]( + a: T, n: L[0], axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., # = _NoValue append: ArrayLike | _NoValueType = ..., # = _NoValue -) -> _T: ... +) -> T: ... @overload # known array-type -def diff( - a: _ArrayNumericT, +def diff[ArrayT: NDArray[_ScalarNumeric]]( + a: ArrayT, n: int = 1, axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., append: ArrayLike | _NoValueType = ..., -) -> _ArrayNumericT: ... +) -> ArrayT: ... @overload # known shape, datetime64 -def diff( - a: _Array[_ShapeT, np.datetime64], +def diff[ShapeT: _Shape]( + a: _Array[ShapeT, np.datetime64], n: int = 1, axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., append: ArrayLike | _NoValueType = ..., -) -> _Array[_ShapeT, np.timedelta64]: ... +) -> _Array[ShapeT, np.timedelta64]: ... @overload # unknown shape, known scalar-type -def diff( - a: _ArrayLike[_ScalarNumericT], +def diff[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], n: int = 1, axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., append: ArrayLike | _NoValueType = ..., -) -> NDArray[_ScalarNumericT]: ... +) -> NDArray[ScalarT]: ... @overload # unknown shape, datetime64 def diff( a: _ArrayLike[np.datetime64], @@ -787,23 +770,23 @@ def interp( period: _FloatLike_co | None = None, ) -> np.complex128: ... @overload # float array -def interp( - x: _Array[_ShapeT, _floating_co], +def interp[ShapeT: _Shape]( + x: _Array[ShapeT, _floating_co], xp: _ArrayLikeFloat_co, fp: _ArrayLikeFloat_co, left: _FloatLike_co | None = None, right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> _Array[_ShapeT, np.float64]: ... +) -> _Array[ShapeT, np.float64]: ... @overload # complex array -def interp( - x: _Array[_ShapeT, _floating_co], +def interp[ShapeT: _Shape]( + x: _Array[ShapeT, _floating_co], xp: _ArrayLikeFloat_co, fp: _ArrayLike1D[np.complexfloating] | list[complex], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> _Array[_ShapeT, np.complex128]: ... +) -> _Array[ShapeT, np.complex128]: ... @overload # float sequence def interp( x: _Seq1D[_FloatLike_co], @@ -870,7 +853,7 @@ def interp( # @overload # 0d T: floating -> 0d T -def angle(z: _FloatingT, deg: bool = False) -> _FloatingT: ... +def angle[FloatingT: np.floating](z: FloatingT, deg: bool = False) -> FloatingT: ... @overload # 0d complex | float | ~integer -> 0d float64 def angle(z: complex | _integer_co, deg: bool = False) -> np.float64: ... @overload # 0d complex64 -> 0d float32 @@ -878,13 +861,13 @@ def angle(z: np.complex64, deg: bool = False) -> np.float32: ... @overload # 0d clongdouble -> 0d longdouble def angle(z: np.clongdouble, deg: bool = False) -> np.longdouble: ... @overload # T: nd floating -> T -def angle(z: _ArrayFloatingT, deg: bool = False) -> _ArrayFloatingT: ... +def angle[ArrayFloatingT: NDArray[np.floating]](z: ArrayFloatingT, deg: bool = False) -> ArrayFloatingT: ... @overload # nd T: complex128 | ~integer -> nd float64 -def angle(z: _Array[_ShapeT, np.complex128 | _integer_co], deg: bool = False) -> _Array[_ShapeT, np.float64]: ... +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.complex128 | _integer_co], deg: bool = False) -> _Array[ShapeT, np.float64]: ... @overload # nd T: complex64 -> nd float32 -def angle(z: _Array[_ShapeT, np.complex64], deg: bool = False) -> _Array[_ShapeT, np.float32]: ... +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.complex64], deg: bool = False) -> _Array[ShapeT, np.float32]: ... @overload # nd T: clongdouble -> nd longdouble -def angle(z: _Array[_ShapeT, np.clongdouble], deg: bool = False) -> _Array[_ShapeT, np.longdouble]: ... +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.clongdouble], deg: bool = False) -> _Array[ShapeT, np.longdouble]: ... @overload # 1d complex -> 1d float64 def angle(z: _Seq1D[complex], deg: bool = False) -> _Array1D[np.float64]: ... @overload # 2d complex -> 2d float64 @@ -896,21 +879,21 @@ def angle(z: _ArrayLikeComplex_co, deg: bool = False) -> NDArray[np.floating] | # @overload # known array-type -def unwrap( - p: _ArrayFloatObjT, +def unwrap[ArrayT: NDArray[np.floating | np.object_]]( + p: ArrayT, discont: float | None = None, axis: int = -1, *, period: float = ..., # = τ -) -> _ArrayFloatObjT: ... +) -> ArrayT: ... @overload # known shape, float64 -def unwrap( - p: _Array[_ShapeT, _float64_co], +def unwrap[ShapeT: _Shape]( + p: _Array[ShapeT, _float64_co], discont: float | None = None, axis: int = -1, *, period: float = ..., # = τ -) -> _Array[_ShapeT, np.float64]: ... +) -> _Array[ShapeT, np.float64]: ... @overload # 1d float64-like def unwrap( p: _Seq1D[float | _float64_co], @@ -954,28 +937,28 @@ def unwrap( # @overload -def sort_complex(a: _ArrayComplexT) -> _ArrayComplexT: ... +def sort_complex[ArrayT: NDArray[np.complexfloating]](a: ArrayT) -> ArrayT: ... @overload # complex64, shape known -def sort_complex(a: _Array[_ShapeT, np.int8 | np.uint8 | np.int16 | np.uint16]) -> _Array[_ShapeT, np.complex64]: ... +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, np.int8 | np.uint8 | np.int16 | np.uint16]) -> _Array[ShapeT, np.complex64]: ... @overload # complex64, shape unknown def sort_complex(a: _ArrayLike[np.int8 | np.uint8 | np.int16 | np.uint16]) -> NDArray[np.complex64]: ... @overload # complex128, shape known -def sort_complex(a: _Array[_ShapeT, _SortsToComplex128]) -> _Array[_ShapeT, np.complex128]: ... +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, _SortsToComplex128]) -> _Array[ShapeT, np.complex128]: ... @overload # complex128, shape unknown def sort_complex(a: _ArrayLike[_SortsToComplex128]) -> NDArray[np.complex128]: ... @overload # clongdouble, shape known -def sort_complex(a: _Array[_ShapeT, np.longdouble]) -> _Array[_ShapeT, np.clongdouble]: ... +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, np.longdouble]) -> _Array[ShapeT, np.clongdouble]: ... @overload # clongdouble, shape unknown def sort_complex(a: _ArrayLike[np.longdouble]) -> NDArray[np.clongdouble]: ... # -def trim_zeros(filt: _TrimZerosSequence[_T], trim: L["f", "b", "fb", "bf"] = "fb", axis: _ShapeLike | None = None) -> _T: ... +def trim_zeros[T](filt: _TrimZerosSequence[T], trim: L["f", "b", "fb", "bf"] = "fb", axis: _ShapeLike | None = None) -> T: ... # NOTE: keep in sync with `corrcoef` @overload # ?d, known inexact scalar-type >=64 precision, y=. -def cov( - m: _ArrayLike[_AnyDoubleT], - y: _ArrayLike[_AnyDoubleT], +def cov[ScalarT: _InexactDouble]( + m: _ArrayLike[ScalarT], + y: _ArrayLike[ScalarT], rowvar: bool = True, bias: bool = False, ddof: SupportsIndex | SupportsInt | None = None, @@ -983,10 +966,10 @@ def cov( aweights: _ArrayLikeFloat_co | None = None, *, dtype: None = None, -) -> _Array2D[_AnyDoubleT]: ... +) -> _Array2D[ScalarT]: ... @overload # ?d, known inexact scalar-type >=64 precision, y=None -> 0d or 2d -def cov( - m: _ArrayNoD[_AnyDoubleT], +def cov[ScalarT: _InexactDouble]( + m: _ArrayNoD[ScalarT], y: None = None, rowvar: bool = True, bias: bool = False, @@ -994,11 +977,11 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> NDArray[_AnyDoubleT]: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> NDArray[ScalarT]: ... @overload # 1d, known inexact scalar-type >=64 precision, y=None -def cov( - m: _Array1D[_AnyDoubleT], +def cov[ScalarT: _InexactDouble]( + m: _Array1D[ScalarT], y: None = None, rowvar: bool = True, bias: bool = False, @@ -1006,11 +989,11 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _Array0D[_AnyDoubleT]: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array0D[ScalarT]: ... @overload # nd, known inexact scalar-type >=64 precision, y=None -> 0d or 2d -def cov( - m: _ArrayLike[_AnyDoubleT], +def cov[ScalarT: _InexactDouble]( + m: _ArrayLike[ScalarT], y: None = None, rowvar: bool = True, bias: bool = False, @@ -1018,8 +1001,8 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> NDArray[_AnyDoubleT]: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> NDArray[ScalarT]: ... @overload # nd, casts to float64, y= def cov( m: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], @@ -1105,7 +1088,7 @@ def cov( dtype: _DTypeLike[np.complex128] | None = None, ) -> NDArray[np.complex128]: ... @overload # 1d complex-like, y=None, dtype= -def cov( +def cov[ScalarT: np.generic]( m: _Seq1D[_ComplexLike_co], y: None = None, rowvar: bool = True, @@ -1114,10 +1097,10 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_ScalarT], -) -> _Array0D[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], +) -> _Array0D[ScalarT]: ... @overload # nd complex-like, y=, dtype= -def cov( +def cov[ScalarT: np.generic]( m: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, rowvar: bool = True, @@ -1126,10 +1109,10 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_ScalarT], -) -> _Array2D[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT]: ... @overload # nd complex-like, y=None, dtype= -> 0d or 2d -def cov( +def cov[ScalarT: np.generic]( m: _ArrayLikeComplex_co, y: None = None, rowvar: bool = True, @@ -1138,8 +1121,8 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_ScalarT], -) -> NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], +) -> NDArray[ScalarT]: ... @overload # nd complex-like, y=, dtype=? def cov( m: _ArrayLikeComplex_co, @@ -1182,37 +1165,37 @@ def cov( # This differs from `cov`, which returns 0-D arrays instead of scalars in such cases. # NOTE: keep in sync with `cov` @overload # ?d, known inexact scalar-type >=64 precision, y=. -def corrcoef( - x: _ArrayLike[_AnyDoubleT], - y: _ArrayLike[_AnyDoubleT], +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayLike[ScalarT], + y: _ArrayLike[ScalarT], rowvar: bool = True, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _Array2D[_AnyDoubleT]: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT]: ... @overload # ?d, known inexact scalar-type >=64 precision, y=None -def corrcoef( - x: _ArrayNoD[_AnyDoubleT], +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayNoD[ScalarT], y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _Array2D[_AnyDoubleT] | _AnyDoubleT: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT] | ScalarT: ... @overload # 1d, known inexact scalar-type >=64 precision, y=None -def corrcoef( - x: _Array1D[_AnyDoubleT], +def corrcoef[ScalarT: _InexactDouble]( + x: _Array1D[ScalarT], y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _AnyDoubleT: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> ScalarT: ... @overload # nd, known inexact scalar-type >=64 precision, y=None -def corrcoef( - x: _ArrayLike[_AnyDoubleT], +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayLike[ScalarT], y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _Array2D[_AnyDoubleT] | _AnyDoubleT: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT] | ScalarT: ... @overload # nd, casts to float64, y= def corrcoef( x: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], @@ -1270,29 +1253,29 @@ def corrcoef( dtype: _DTypeLike[np.complex128] | None = None, ) -> _Array2D[np.complex128] | np.complex128: ... @overload # 1d complex-like, y=None, dtype= -def corrcoef( +def corrcoef[ScalarT: np.generic]( x: _Seq1D[_ComplexLike_co], y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_ScalarT], -) -> _ScalarT: ... + dtype: _DTypeLike[ScalarT], +) -> ScalarT: ... @overload # nd complex-like, y=, dtype= -def corrcoef( +def corrcoef[ScalarT: np.generic]( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, rowvar: bool = True, *, - dtype: _DTypeLike[_ScalarT], -) -> _Array2D[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT]: ... @overload # nd complex-like, y=None, dtype= -def corrcoef( +def corrcoef[ScalarT: np.generic]( x: _ArrayLikeComplex_co, y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_ScalarT], -) -> _Array2D[_ScalarT] | _ScalarT: ... + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT] | ScalarT: ... @overload # nd complex-like, y=, dtype=? def corrcoef( x: _ArrayLikeComplex_co, @@ -1327,7 +1310,7 @@ def kaiser(M: _FloatLike_co, beta: _FloatLike_co) -> _Array1D[np.float64]: ... # @overload -def i0(x: _Array[_ShapeT, np.floating | np.integer]) -> _Array[_ShapeT, np.float64]: ... +def i0[ShapeT: _Shape](x: _Array[ShapeT, np.floating | np.integer]) -> _Array[ShapeT, np.float64]: ... @overload def i0(x: _FloatLike_co) -> _Array0D[np.float64]: ... @overload @@ -1341,15 +1324,15 @@ def i0(x: _ArrayLikeFloat_co) -> NDArray[np.float64]: ... # @overload -def sinc(x: _InexactT) -> _InexactT: ... +def sinc[ScalarT: np.inexact](x: ScalarT) -> ScalarT: ... @overload def sinc(x: float | _float64_co) -> np.float64: ... @overload def sinc(x: complex) -> np.complex128 | Any: ... @overload -def sinc(x: _ArrayInexactT) -> _ArrayInexactT: ... +def sinc[ArrayT: NDArray[np.inexact]](x: ArrayT) -> ArrayT: ... @overload -def sinc(x: _Array[_ShapeT, _integer_co]) -> _Array[_ShapeT, np.float64]: ... +def sinc[ShapeT: _Shape](x: _Array[ShapeT, _integer_co]) -> _Array[ShapeT, np.float64]: ... @overload def sinc(x: _Seq1D[float]) -> _Array1D[np.float64]: ... @overload @@ -1370,13 +1353,13 @@ def sinc(x: _ArrayLikeComplex_co) -> np.ndarray | Any: ... # NOTE: We assume that `axis` is only provided for >=1-D arrays because for <1-D arrays # it has no effect, and would complicate the overloads significantly. @overload # known scalar-type, keepdims=False (default) -def median( - a: _ArrayLike[_InexactTimeT], +def median[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], axis: None = None, out: None = None, overwrite_input: bool = False, keepdims: L[False] = False, -) -> _InexactTimeT: ... +) -> ScalarT: ... @overload # float array-like, keepdims=False (default) def median( a: _ArrayLikeInt_co | _SeqND[float] | float, @@ -1402,31 +1385,31 @@ def median( keepdims: L[False] = False, ) -> np.complex128 | Any: ... @overload # known array-type, keepdims=True -def median( - a: _ArrayNumericT, +def median[ArrayT: NDArray[_ScalarNumeric]]( + a: ArrayT, axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, *, keepdims: L[True], -) -> _ArrayNumericT: ... +) -> ArrayT: ... @overload # known scalar-type, keepdims=True -def median( - a: _ArrayLike[_ScalarNumericT], +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, *, keepdims: L[True], -) -> NDArray[_ScalarNumericT]: ... +) -> NDArray[ScalarT]: ... @overload # known scalar-type, axis= -def median( - a: _ArrayLike[_ScalarNumericT], +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], axis: _ShapeLike, out: None = None, overwrite_input: bool = False, keepdims: bool = False, -) -> NDArray[_ScalarNumericT]: ... +) -> NDArray[ScalarT]: ... @overload # float array-like, keepdims=True def median( a: _SeqND[float], @@ -1462,22 +1445,22 @@ def median( keepdims: bool = False, ) -> NDArray[np.complex128]: ... @overload # out= (keyword) -def median( +def median[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, keepdims: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # out= (positional) -def median( +def median[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, keepdims: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # fallback def median( a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], @@ -1489,8 +1472,8 @@ def median( # NOTE: keep in sync with `quantile` @overload # inexact, scalar, axis=None -def percentile( - a: _ArrayLike[_InexactDateTimeT], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: None = None, out: None = None, @@ -1499,10 +1482,10 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _InexactDateTimeT: ... +) -> ScalarT: ... @overload # inexact, scalar, axis= -def percentile( - a: _ArrayLike[_InexactDateTimeT], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: _ShapeLike, out: None = None, @@ -1511,10 +1494,10 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact, scalar, keepdims=True -def percentile( - a: _ArrayLike[_InexactDateTimeT], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: _ShapeLike | None = None, out: None = None, @@ -1523,11 +1506,11 @@ def percentile( *, keepdims: L[True], weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact, array, axis=None -def percentile( - a: _ArrayLike[_InexactDateTimeT], - q: _Array[_ShapeT, _floating_co], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1535,10 +1518,10 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, _InexactDateTimeT]: ... +) -> _Array[ShapeT, ScalarT]: ... @overload # inexact, array-like -def percentile( - a: _ArrayLike[_InexactDateTimeT], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: NDArray[_floating_co] | _SeqND[_FloatLike_co], axis: _ShapeLike | None = None, out: None = None, @@ -1547,7 +1530,7 @@ def percentile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # float, scalar, axis=None def percentile( a: _SeqND[float] | _ArrayLikeInt_co, @@ -1585,9 +1568,9 @@ def percentile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.float64]: ... @overload # float, array, axis=None -def percentile( +def percentile[ShapeT: _Shape]( a: _SeqND[float] | _ArrayLikeInt_co, - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1595,7 +1578,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.float64]: ... +) -> _Array[ShapeT, np.float64]: ... @overload # float, array-like def percentile( a: _SeqND[float] | _ArrayLikeInt_co, @@ -1645,9 +1628,9 @@ def percentile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.complex128]: ... @overload # complex, array, axis=None -def percentile( +def percentile[ShapeT: _Shape]( a: _ListSeqND[complex], - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1655,7 +1638,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.complex128]: ... +) -> _Array[ShapeT, np.complex128]: ... @overload # complex, array-like def percentile( a: _ListSeqND[complex], @@ -1705,9 +1688,9 @@ def percentile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.object_]: ... @overload # object_, array, axis=None -def percentile( +def percentile[ShapeT: _Shape]( a: _ArrayLikeObject_co, - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1715,7 +1698,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.object_]: ... +) -> _Array[ShapeT, np.object_]: ... @overload # object_, array-like def percentile( a: _ArrayLikeObject_co, @@ -1729,29 +1712,29 @@ def percentile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.object_]: ... @overload # out= (keyword) -def percentile( +def percentile[ArrayT: np.ndarray]( a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # out= (positional) -def percentile( +def percentile[ArrayT: np.ndarray]( a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, method: _InterpolationMethod = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # fallback def percentile( a: _ArrayLikeNumber_co | _ArrayLikeObject_co, @@ -1767,8 +1750,8 @@ def percentile( # NOTE: keep in sync with `percentile` @overload # inexact, scalar, axis=None -def quantile( - a: _ArrayLike[_InexactDateTimeT], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: None = None, out: None = None, @@ -1777,10 +1760,10 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _InexactDateTimeT: ... +) -> ScalarT: ... @overload # inexact, scalar, axis= -def quantile( - a: _ArrayLike[_InexactDateTimeT], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: _ShapeLike, out: None = None, @@ -1789,10 +1772,10 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact, scalar, keepdims=True -def quantile( - a: _ArrayLike[_InexactDateTimeT], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: _ShapeLike | None = None, out: None = None, @@ -1801,11 +1784,11 @@ def quantile( *, keepdims: L[True], weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact, array, axis=None -def quantile( - a: _ArrayLike[_InexactDateTimeT], - q: _Array[_ShapeT, _floating_co], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1813,10 +1796,10 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, _InexactDateTimeT]: ... +) -> _Array[ShapeT, ScalarT]: ... @overload # inexact, array-like -def quantile( - a: _ArrayLike[_InexactDateTimeT], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: NDArray[_floating_co] | _SeqND[_FloatLike_co], axis: _ShapeLike | None = None, out: None = None, @@ -1825,7 +1808,7 @@ def quantile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # float, scalar, axis=None def quantile( a: _SeqND[float] | _ArrayLikeInt_co, @@ -1863,9 +1846,9 @@ def quantile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.float64]: ... @overload # float, array, axis=None -def quantile( +def quantile[ShapeT: _Shape]( a: _SeqND[float] | _ArrayLikeInt_co, - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1873,7 +1856,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.float64]: ... +) -> _Array[ShapeT, np.float64]: ... @overload # float, array-like def quantile( a: _SeqND[float] | _ArrayLikeInt_co, @@ -1923,9 +1906,9 @@ def quantile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.complex128]: ... @overload # complex, array, axis=None -def quantile( +def quantile[ShapeT: _Shape]( a: _ListSeqND[complex], - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1933,7 +1916,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.complex128]: ... +) -> _Array[ShapeT, np.complex128]: ... @overload # complex, array-like def quantile( a: _ListSeqND[complex], @@ -1983,9 +1966,9 @@ def quantile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.object_]: ... @overload # object_, array, axis=None -def quantile( +def quantile[ShapeT: _Shape]( a: _ArrayLikeObject_co, - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1993,7 +1976,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.object_]: ... +) -> _Array[ShapeT, np.object_]: ... @overload # object_, array-like def quantile( a: _ArrayLikeObject_co, @@ -2007,29 +1990,29 @@ def quantile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.object_]: ... @overload # out= (keyword) -def quantile( +def quantile[ArrayT: np.ndarray]( a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # out= (positional) -def quantile( +def quantile[ArrayT: np.ndarray]( a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, method: _InterpolationMethod = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # fallback def quantile( a: _ArrayLikeNumber_co | _ArrayLikeObject_co, @@ -2045,12 +2028,12 @@ def quantile( # @overload # ?d, known inexact/timedelta64 scalar-type -def trapezoid( - y: _ArrayNoD[_InexactTimeT], - x: _ArrayLike[_InexactTimeT] | _ArrayLikeFloat_co | None = None, +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _ArrayNoD[ScalarT], + x: _ArrayLike[ScalarT] | _ArrayLikeFloat_co | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> NDArray[_InexactTimeT] | _InexactTimeT: ... +) -> NDArray[ScalarT] | ScalarT: ... @overload # ?d, casts to float64 def trapezoid( y: _ArrayNoD[_integer_co], @@ -2059,12 +2042,12 @@ def trapezoid( axis: SupportsIndex = -1, ) -> NDArray[np.float64] | np.float64: ... @overload # strict 1d, known inexact/timedelta64 scalar-type -def trapezoid( - y: _Array1D[_InexactTimeT], - x: _Array1D[_InexactTimeT] | _Seq1D[float] | None = None, +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _Array1D[ScalarT], + x: _Array1D[ScalarT] | _Seq1D[float] | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> _InexactTimeT: ... +) -> ScalarT: ... @overload # strict 1d, casts to float64 def trapezoid( y: _Array1D[_float64_co] | _Seq1D[float], @@ -2087,12 +2070,12 @@ def trapezoid( axis: SupportsIndex = -1, ) -> np.complex128: ... @overload # strict 2d, known inexact/timedelta64 scalar-type -def trapezoid( - y: _Array2D[_InexactTimeT], - x: _ArrayMax2D[_InexactTimeT] | _Seq2D[float] | _Seq1D[float] | None = None, +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _Array2D[ScalarT], + x: _ArrayMax2D[ScalarT] | _Seq2D[float] | _Seq1D[float] | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> _InexactTimeT: ... +) -> ScalarT: ... @overload # strict 2d, casts to float64 def trapezoid( y: _Array2D[_float64_co] | _Seq2D[float], @@ -2115,12 +2098,12 @@ def trapezoid( axis: SupportsIndex = -1, ) -> np.complex128: ... @overload -def trapezoid( - y: _ArrayLike[_InexactTimeT], - x: _ArrayLike[_InexactTimeT] | _ArrayLikeInt_co | None = None, +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _ArrayLike[ScalarT], + x: _ArrayLike[ScalarT] | _ArrayLikeInt_co | None = None, dx: complex = 1.0, axis: SupportsIndex = -1, -) -> NDArray[_InexactTimeT] | _InexactTimeT: ... +) -> NDArray[ScalarT] | ScalarT: ... @overload def trapezoid( y: _ArrayLike[_float64_co], @@ -2150,12 +2133,12 @@ def trapezoid( axis: SupportsIndex = -1, ) -> NDArray[np.object_] | Any: ... @overload -def trapezoid( - y: _Seq1D[_SupportsRMulFloat[_T]], - x: _Seq1D[_SupportsRMulFloat[_T] | _T] | None = None, +def trapezoid[T]( + y: _Seq1D[_SupportsRMulFloat[T]], + x: _Seq1D[_SupportsRMulFloat[T] | T] | None = None, dx: complex = 1.0, axis: SupportsIndex = -1, -) -> _T: ... +) -> T: ... @overload def trapezoid( y: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], @@ -2168,14 +2151,14 @@ def trapezoid( @overload # 0d def meshgrid(*, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy") -> tuple[()]: ... @overload # 1d, known scalar-type -def meshgrid( - x1: _ArrayLike[_ScalarT], +def meshgrid[ScalarT: np.generic]( + x1: _ArrayLike[ScalarT], /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh1[_ScalarT]: ... +) -> _Mesh1[ScalarT]: ... @overload # 1d, unknown scalar-type def meshgrid( x1: ArrayLike, @@ -2186,35 +2169,35 @@ def meshgrid( indexing: _Indexing = "xy", ) -> _Mesh1[Any]: ... @overload # 2d, known scalar-types -def meshgrid( - x1: _ArrayLike[_ScalarT], - x2: _ArrayLike[_ScalarT1], +def meshgrid[ScalarT1: np.generic, ScalarT2: np.generic]( + x1: _ArrayLike[ScalarT1], + x2: _ArrayLike[ScalarT2], /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh2[_ScalarT, _ScalarT1]: ... +) -> _Mesh2[ScalarT1, ScalarT2]: ... @overload # 2d, known/unknown scalar-types -def meshgrid( - x1: _ArrayLike[_ScalarT], +def meshgrid[ScalarT: np.generic]( + x1: _ArrayLike[ScalarT], x2: ArrayLike, /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh2[_ScalarT, Any]: ... +) -> _Mesh2[ScalarT, Any]: ... @overload # 2d, unknown/known scalar-types -def meshgrid( +def meshgrid[ScalarT: np.generic]( x1: ArrayLike, - x2: _ArrayLike[_ScalarT], + x2: _ArrayLike[ScalarT], /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh2[Any, _ScalarT]: ... +) -> _Mesh2[Any, ScalarT]: ... @overload # 2d, unknown scalar-types def meshgrid( x1: ArrayLike, @@ -2226,16 +2209,16 @@ def meshgrid( indexing: _Indexing = "xy", ) -> _Mesh2[Any, Any]: ... @overload # 3d, known scalar-types -def meshgrid( - x1: _ArrayLike[_ScalarT], - x2: _ArrayLike[_ScalarT1], - x3: _ArrayLike[_ScalarT2], +def meshgrid[ScalarT1: np.generic, ScalarT2: np.generic, ScalarT3: np.generic]( + x1: _ArrayLike[ScalarT1], + x2: _ArrayLike[ScalarT2], + x3: _ArrayLike[ScalarT3], /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh3[_ScalarT, _ScalarT1, _ScalarT2]: ... +) -> _Mesh3[ScalarT1, ScalarT2, ScalarT3]: ... @overload # 3d, unknown scalar-types def meshgrid( x1: ArrayLike, @@ -2248,12 +2231,12 @@ def meshgrid( indexing: _Indexing = "xy", ) -> _Mesh3[Any, Any, Any]: ... @overload # ?d, known scalar-types -def meshgrid( - *xi: _ArrayLike[_ScalarT], +def meshgrid[ScalarT: np.generic]( + *xi: _ArrayLike[ScalarT], copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> tuple[NDArray[_ScalarT], ...]: ... +) -> tuple[NDArray[ScalarT], ...]: ... @overload # ?d, unknown scalar-types def meshgrid( *xi: ArrayLike, @@ -2267,11 +2250,11 @@ def place(arr: np.ndarray, mask: ConvertibleToInt | Sequence[ConvertibleToInt], # keep in sync with `insert` @overload # known scalar-type, axis=None (default) -def delete(arr: _ArrayLike[_ScalarT], obj: _IndexLike, axis: None = None) -> _Array1D[_ScalarT]: ... +def delete[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, axis: None = None) -> _Array1D[ScalarT]: ... @overload # known array-type, axis specified -def delete(arr: _ArrayT, obj: _IndexLike, axis: SupportsIndex) -> _ArrayT: ... +def delete[ArrayT: np.ndarray](arr: ArrayT, obj: _IndexLike, axis: SupportsIndex) -> ArrayT: ... @overload # known scalar-type, axis specified -def delete(arr: _ArrayLike[_ScalarT], obj: _IndexLike, axis: SupportsIndex) -> NDArray[_ScalarT]: ... +def delete[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, axis: SupportsIndex) -> NDArray[ScalarT]: ... @overload # known scalar-type, axis=None (default) def delete(arr: ArrayLike, obj: _IndexLike, axis: None = None) -> _Array1D[Any]: ... @overload # unknown scalar-type, axis specified @@ -2279,11 +2262,11 @@ def delete(arr: ArrayLike, obj: _IndexLike, axis: SupportsIndex) -> NDArray[Any] # keep in sync with `delete` @overload # known scalar-type, axis=None (default) -def insert(arr: _ArrayLike[_ScalarT], obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[_ScalarT]: ... +def insert[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[ScalarT]: ... @overload # known array-type, axis specified -def insert(arr: _ArrayT, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> _ArrayT: ... +def insert[ArrayT: np.ndarray](arr: ArrayT, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> ArrayT: ... @overload # known scalar-type, axis specified -def insert(arr: _ArrayLike[_ScalarT], obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[_ScalarT]: ... +def insert[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[ScalarT]: ... @overload # known scalar-type, axis=None (default) def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... @overload # unknown scalar-type, axis specified @@ -2291,27 +2274,27 @@ def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: SupportsInd # @overload # known array type, axis specified -def append(arr: _ArrayT, values: _ArrayT, axis: SupportsIndex) -> _ArrayT: ... +def append[ArrayT: np.ndarray](arr: ArrayT, values: ArrayT, axis: SupportsIndex) -> ArrayT: ... @overload # 1d, known scalar type, axis specified -def append(arr: _Seq1D[_ScalarT], values: _Seq1D[_ScalarT], axis: SupportsIndex) -> _Array1D[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _Seq1D[ScalarT], values: _Seq1D[ScalarT], axis: SupportsIndex) -> _Array1D[ScalarT]: ... @overload # 2d, known scalar type, axis specified -def append(arr: _Seq2D[_ScalarT], values: _Seq2D[_ScalarT], axis: SupportsIndex) -> _Array2D[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _Seq2D[ScalarT], values: _Seq2D[ScalarT], axis: SupportsIndex) -> _Array2D[ScalarT]: ... @overload # 3d, known scalar type, axis specified -def append(arr: _Seq3D[_ScalarT], values: _Seq3D[_ScalarT], axis: SupportsIndex) -> _Array3D[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _Seq3D[ScalarT], values: _Seq3D[ScalarT], axis: SupportsIndex) -> _Array3D[ScalarT]: ... @overload # ?d, known scalar type, axis specified -def append(arr: _SeqND[_ScalarT], values: _SeqND[_ScalarT], axis: SupportsIndex) -> NDArray[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _SeqND[ScalarT], values: _SeqND[ScalarT], axis: SupportsIndex) -> NDArray[ScalarT]: ... @overload # ?d, unknown scalar type, axis specified def append(arr: np.ndarray | _SeqND[_ScalarLike_co], values: _SeqND[_ScalarLike_co], axis: SupportsIndex) -> np.ndarray: ... @overload # known scalar type, axis=None -def append(arr: _ArrayLike[_ScalarT], values: _ArrayLike[_ScalarT], axis: None = None) -> _Array1D[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _ArrayLike[ScalarT], values: _ArrayLike[ScalarT], axis: None = None) -> _Array1D[ScalarT]: ... @overload # unknown scalar type, axis=None def append(arr: ArrayLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... # @overload -def digitize( - x: _Array[_ShapeT, np.floating | np.integer], bins: _ArrayLikeFloat_co, right: bool = False -) -> _Array[_ShapeT, np.int_]: ... +def digitize[ShapeT: _Shape]( + x: _Array[ShapeT, np.floating | np.integer], bins: _ArrayLikeFloat_co, right: bool = False +) -> _Array[ShapeT, np.int_]: ... @overload def digitize(x: _FloatLike_co, bins: _ArrayLikeFloat_co, right: bool = False) -> np.int_: ... @overload diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 72a31dcedc1f..0c4c673ef063 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -1,11 +1,11 @@ from collections.abc import Sequence -from typing import Any, Literal as L, SupportsIndex, TypeAlias +from typing import Any, Literal as L, SupportsIndex from numpy._typing import ArrayLike, NDArray __all__ = ["histogram", "histogramdd", "histogram_bin_edges"] -_BinKind: TypeAlias = L[ +type _BinKind = L[ "stone", "auto", "doane", diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index ff316f566993..97930196ecfd 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -48,14 +48,8 @@ __all__ = [ # noqa: RUF022 ### -_T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[Any, ...]) -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) _BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True) - _AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True) _MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=True) _NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True) @@ -63,9 +57,12 @@ _Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) ### -class ndenumerate(Generic[_ScalarT_co]): +class ndenumerate(Generic[_ScalarT_co]): # noqa: UP046 @overload - def __init__(self: ndenumerate[_ScalarT], arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> None: ... + def __init__[ScalarT: np.generic]( + self: ndenumerate[ScalarT], + arr: _FiniteNestedSequence[_SupportsArray[np.dtype[ScalarT]]], + ) -> None: ... @overload def __init__(self: ndenumerate[np.str_], arr: str | _NestedSequence[str]) -> None: ... @overload @@ -156,26 +153,26 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] # Keep in sync with _core.multiarray.concatenate @staticmethod @overload - def concatenate( - arrays: _ArrayLike[_ScalarT], + def concatenate[ScalarT: np.generic]( + arrays: _ArrayLike[ScalarT], /, axis: SupportsIndex | None = 0, out: None = None, *, dtype: None = None, casting: _CastingKind | None = "same_kind", - ) -> NDArray[_ScalarT]: ... + ) -> NDArray[ScalarT]: ... @staticmethod @overload - def concatenate( + def concatenate[ScalarT: np.generic]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = 0, out: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind | None = "same_kind", - ) -> NDArray[_ScalarT]: ... + ) -> NDArray[ScalarT]: ... @staticmethod @overload def concatenate( @@ -189,26 +186,26 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] ) -> NDArray[Incomplete]: ... @staticmethod @overload - def concatenate( + def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = 0, *, - out: _ArrayT, + out: OutT, dtype: DTypeLike | None = None, casting: _CastingKind | None = "same_kind", - ) -> _ArrayT: ... + ) -> OutT: ... @staticmethod @overload - def concatenate( + def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None, - out: _ArrayT, + out: OutT, *, dtype: DTypeLike | None = None, casting: _CastingKind | None = "same_kind", - ) -> _ArrayT: ... + ) -> OutT: ... @final class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): @@ -228,14 +225,14 @@ class IndexExpression(Generic[_BoolT_co]): maketuple: _BoolT_co def __init__(self, maketuple: _BoolT_co) -> None: ... @overload - def __getitem__(self, item: _TupleT) -> _TupleT: ... + def __getitem__[TupleT: tuple[Any, ...]](self, item: TupleT) -> TupleT: ... @overload - def __getitem__(self: IndexExpression[L[True]], item: _T) -> tuple[_T]: ... + def __getitem__[T](self: IndexExpression[L[True]], item: T) -> tuple[T]: ... @overload - def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ... + def __getitem__[T](self: IndexExpression[L[False]], item: T) -> T: ... @overload -def ix_(*args: _FiniteNestedSequence[_HasDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ... +def ix_[DTypeT: np.dtype](*args: _FiniteNestedSequence[_HasDType[DTypeT]]) -> tuple[np.ndarray[_AnyShape, DTypeT], ...]: ... @overload def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... @overload diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi index 3bf41fc0cdde..7baca9c78045 100644 --- a/numpy/lib/_iotools.pyi +++ b/numpy/lib/_iotools.pyi @@ -5,7 +5,6 @@ from typing import ( Final, Literal, TypedDict, - TypeVar, Unpack, overload, type_check_only, @@ -15,8 +14,6 @@ import numpy as np import numpy.typing as npt from numpy._typing._dtype_like import _DTypeLikeNested -_T = TypeVar("_T") - @type_check_only class _NameValidatorKwargs(TypedDict, total=False): excludelist: Iterable[str] | None @@ -46,7 +43,7 @@ class LineSplitter: encoding: str | None = None, ) -> None: ... def __call__(self, /, line: str | bytes) -> list[str]: ... - def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ... + def autostrip[T](self, /, method: Callable[[T], Iterable[str]]) -> Callable[[T], list[str]]: ... class NameValidator: defaultexcludelist: ClassVar[Sequence[str]] = ... diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 349edd06f57b..f13ee2e7e967 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -13,15 +13,14 @@ from typing import ( IO, Any, ClassVar, - Generic, Literal as L, Protocol, Self, - TypeAlias, overload, + override, type_check_only, ) -from typing_extensions import TypeVar, override +from typing_extensions import TypeVar import numpy as np from numpy._core.multiarray import packbits, unpackbits @@ -43,23 +42,20 @@ __all__ = [ "unpackbits", ] -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) -_FName: TypeAlias = StrPath | Iterable[str] | Iterable[bytes] -_FNameRead: TypeAlias = StrPath | SupportsRead[str] | SupportsRead[bytes] -_FNameWriteBytes: TypeAlias = StrPath | SupportsWrite[bytes] -_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[str] +type _FName = StrPath | Iterable[str] | Iterable[bytes] +type _FNameRead = StrPath | SupportsRead[str] | SupportsRead[bytes] +type _FNameWriteBytes = StrPath | SupportsWrite[bytes] +type _FNameWrite = _FNameWriteBytes | SupportsWrite[str] @type_check_only -class _SupportsReadSeek(SupportsRead[_T_co], Protocol[_T_co]): +class _SupportsReadSeek[T](SupportsRead[T], Protocol): def seek(self, offset: int, whence: int, /) -> object: ... -class BagObj(Generic[_T_co]): - def __init__(self, /, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ... - def __getattribute__(self, key: str, /) -> _T_co: ... +class BagObj[T]: + def __init__(self, /, obj: SupportsKeysAndGetItem[str, T]) -> None: ... + def __getattribute__(self, key: str, /) -> T: ... def __dir__(self) -> list[str]: ... class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): @@ -98,7 +94,7 @@ class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): @overload def get(self, key: str, default: None = None, /) -> NDArray[_ScalarT_co] | None: ... @overload - def get(self, key: str, default: NDArray[_ScalarT_co] | _T, /) -> NDArray[_ScalarT_co] | _T: ... # pyright: ignore[reportIncompatibleMethodOverride] + def get[T](self, key: str, default: NDArray[_ScalarT_co] | T, /) -> NDArray[_ScalarT_co] | T: ... # pyright: ignore[reportIncompatibleMethodOverride] # def close(self) -> None: ... @@ -139,9 +135,9 @@ def loadtxt( like: _SupportsArrayFunc | None = None, ) -> NDArray[np.float64]: ... @overload -def loadtxt( +def loadtxt[ScalarT: np.generic]( fname: _FName, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], comments: str | Sequence[str] | None = "#", delimiter: str | None = None, converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, @@ -154,7 +150,7 @@ def loadtxt( *, quotechar: str | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def loadtxt( fname: _FName, @@ -186,12 +182,12 @@ def savetxt( ) -> None: ... @overload -def fromregex( +def fromregex[ScalarT: np.generic]( file: _FNameRead, regexp: str | bytes | Pattern[Any], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], encoding: str | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def fromregex( file: _FNameRead, @@ -230,9 +226,9 @@ def genfromtxt( like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload -def genfromtxt( +def genfromtxt[ScalarT: np.generic]( fname: _FName, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], comments: str = "#", delimiter: str | int | Iterable[int] | None = None, skip_header: int = 0, @@ -257,7 +253,7 @@ def genfromtxt( *, ndmin: L[0, 1, 2] = 0, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def genfromtxt( fname: _FName, diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index 9c02a7f867c5..4899b868071c 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -1,13 +1,4 @@ -from typing import ( - Any, - Literal as L, - NoReturn, - SupportsIndex, - SupportsInt, - TypeAlias, - TypeVar, - overload, -) +from typing import Any, Literal as L, NoReturn, SupportsIndex, SupportsInt, overload import numpy as np from numpy import ( @@ -33,16 +24,10 @@ from numpy._typing import ( _ArrayLikeUInt_co, ) -_T = TypeVar("_T") +type _2Tup[T] = tuple[T, T] +type _5Tup[T] = tuple[T, NDArray[float64], NDArray[int32], NDArray[float64], NDArray[float64]] -_2Tup: TypeAlias = tuple[_T, _T] -_5Tup: TypeAlias = tuple[ - _T, - NDArray[float64], - NDArray[int32], - NDArray[float64], - NDArray[float64], -] +### __all__ = [ "poly", diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index ec4cd2c595ac..8037a01ac998 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -1,14 +1,5 @@ from collections.abc import Callable, Sequence -from typing import ( - Any, - Concatenate, - ParamSpec, - Protocol, - SupportsIndex, - TypeVar, - overload, - type_check_only, -) +from typing import Any, Concatenate, Protocol, SupportsIndex, overload, type_check_only from typing_extensions import deprecated import numpy as np @@ -16,7 +7,6 @@ from numpy import ( _CastingKind, complexfloating, floating, - generic, integer, object_, signedinteger, @@ -55,9 +45,6 @@ __all__ = [ "put_along_axis", ] -_P = ParamSpec("_P") -_ScalarT = TypeVar("_ScalarT", bound=generic) - # Signature of `__array_wrap__` @type_check_only class _ArrayWrap(Protocol): @@ -76,52 +63,46 @@ class _SupportsArrayWrap(Protocol): ### -def take_along_axis( - arr: _ScalarT | NDArray[_ScalarT], +def take_along_axis[ScalarT: np.generic]( + arr: ScalarT | NDArray[ScalarT], indices: NDArray[integer], axis: int | None = -1, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... -def put_along_axis( - arr: NDArray[_ScalarT], +def put_along_axis[ScalarT: np.generic]( + arr: NDArray[ScalarT], indices: NDArray[integer], values: ArrayLike, axis: int | None, ) -> None: ... @overload -def apply_along_axis( - func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_ScalarT]], +def apply_along_axis[**Tss, ScalarT: np.generic]( + func1d: Callable[Concatenate[np.ndarray, Tss], _ArrayLike[ScalarT]], axis: SupportsIndex, arr: ArrayLike, - *args: _P.args, - **kwargs: _P.kwargs, -) -> NDArray[_ScalarT]: ... + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> NDArray[ScalarT]: ... @overload -def apply_along_axis( - func1d: Callable[Concatenate[NDArray[Any], _P], Any], +def apply_along_axis[**Tss]( + func1d: Callable[Concatenate[NDArray[Any], Tss], Any], axis: SupportsIndex, arr: ArrayLike, - *args: _P.args, - **kwargs: _P.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> NDArray[Any]: ... -def apply_over_axes( - func: Callable[[NDArray[Any], int], NDArray[_ScalarT]], +def apply_over_axes[ScalarT: np.generic]( + func: Callable[[np.ndarray, int], NDArray[ScalarT]], a: ArrayLike, axes: int | Sequence[int], -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def expand_dims( - a: _ArrayLike[_ScalarT], - axis: _ShapeLike, -) -> NDArray[_ScalarT]: ... +def expand_dims[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: _ShapeLike) -> NDArray[ScalarT]: ... @overload -def expand_dims( - a: ArrayLike, - axis: _ShapeLike, -) -> NDArray[Any]: ... +def expand_dims(a: ArrayLike, axis: _ShapeLike) -> NDArray[Any]: ... # Deprecated in NumPy 2.0, 2023-08-18 @deprecated("`row_stack` alias is deprecated. Use `np.vstack` directly.") @@ -134,22 +115,22 @@ def row_stack( # keep in sync with `numpy.ma.extras.column_stack` @overload -def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +def column_stack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[ScalarT]: ... @overload def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... # keep in sync with `numpy.ma.extras.dstack` @overload -def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +def dstack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[ScalarT]: ... @overload def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... @overload -def array_split( - ary: _ArrayLike[_ScalarT], +def array_split[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, axis: SupportsIndex = 0, -) -> list[NDArray[_ScalarT]]: ... +) -> list[NDArray[ScalarT]]: ... @overload def array_split( ary: ArrayLike, @@ -158,11 +139,11 @@ def array_split( ) -> list[NDArray[Any]]: ... @overload -def split( - ary: _ArrayLike[_ScalarT], +def split[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, axis: SupportsIndex = 0, -) -> list[NDArray[_ScalarT]]: ... +) -> list[NDArray[ScalarT]]: ... @overload def split( ary: ArrayLike, @@ -172,10 +153,10 @@ def split( # keep in sync with `numpy.ma.extras.hsplit` @overload -def hsplit( - ary: _ArrayLike[_ScalarT], +def hsplit[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, -) -> list[NDArray[_ScalarT]]: ... +) -> list[NDArray[ScalarT]]: ... @overload def hsplit( ary: ArrayLike, @@ -183,10 +164,10 @@ def hsplit( ) -> list[NDArray[Any]]: ... @overload -def vsplit( - ary: _ArrayLike[_ScalarT], +def vsplit[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, -) -> list[NDArray[_ScalarT]]: ... +) -> list[NDArray[ScalarT]]: ... @overload def vsplit( ary: ArrayLike, @@ -194,10 +175,10 @@ def vsplit( ) -> list[NDArray[Any]]: ... @overload -def dsplit( - ary: _ArrayLike[_ScalarT], +def dsplit[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, -) -> list[NDArray[_ScalarT]]: ... +) -> list[NDArray[ScalarT]]: ... @overload def dsplit( ary: ArrayLike, @@ -225,10 +206,10 @@ def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ... def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ... @overload -def tile( - A: _ArrayLike[_ScalarT], +def tile[ScalarT: np.generic]( + A: _ArrayLike[ScalarT], reps: int | Sequence[int], -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def tile( A: ArrayLike, diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index 008f2d544414..77b9d60b9d7f 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -1,13 +1,11 @@ from collections.abc import Iterable -from typing import Any, SupportsIndex, TypeVar, overload +from typing import Any, SupportsIndex, overload -from numpy import generic +import numpy as np from numpy._typing import ArrayLike, NDArray, _AnyShape, _ArrayLike, _ShapeLike __all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"] -_ScalarT = TypeVar("_ScalarT", bound=generic) - class DummyArray: __array_interface__: dict[str, Any] base: NDArray[Any] | None @@ -18,13 +16,13 @@ class DummyArray: ) -> None: ... @overload -def as_strided( - x: _ArrayLike[_ScalarT], +def as_strided[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], shape: Iterable[int] | None = None, strides: Iterable[int] | None = None, subok: bool = False, writeable: bool = True, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def as_strided( x: ArrayLike, @@ -35,14 +33,14 @@ def as_strided( ) -> NDArray[Any]: ... @overload -def sliding_window_view( - x: _ArrayLike[_ScalarT], +def sliding_window_view[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], window_shape: int | Iterable[int], axis: SupportsIndex | None = None, *, subok: bool = False, writeable: bool = False, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def sliding_window_view( x: ArrayLike, @@ -54,11 +52,11 @@ def sliding_window_view( ) -> NDArray[Any]: ... @overload -def broadcast_to( - array: _ArrayLike[_ScalarT], +def broadcast_to[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], shape: int | Iterable[int], subok: bool = False, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def broadcast_to( array: ArrayLike, diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 58582119429a..63f5f4cdc9c0 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -1,15 +1,6 @@ from _typeshed import Incomplete from collections.abc import Callable, Sequence -from typing import ( - Any, - Literal as L, - Never, - Protocol, - TypeAlias, - TypeVar, - overload, - type_check_only, -) +from typing import Any, Literal as L, Never, Protocol, overload, type_check_only import numpy as np from numpy import _OrderCF @@ -45,37 +36,28 @@ __all__ = [ ### -_T = TypeVar("_T") -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ComplexT = TypeVar("_ComplexT", bound=np.complexfloating) -_InexactT = TypeVar("_InexactT", bound=np.inexact) -_NumberT = TypeVar("_NumberT", bound=np.number) -_NumberObjectT = TypeVar("_NumberObjectT", bound=np.number | np.object_) -_NumberCoT = TypeVar("_NumberCoT", bound=_Number_co) - -_Int_co: TypeAlias = np.integer | np.bool -_Float_co: TypeAlias = np.floating | _Int_co -_Number_co: TypeAlias = np.number | np.bool +type _Int_co = np.integer | np.bool +type _Float_co = np.floating | _Int_co +type _Number_co = np.number | np.bool -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] # Workaround for mypy's and pyright's lack of compliance with the typing spec for # overloads for gradual types. This works because only `Any` and `Never` are assignable # to `Never`. -_ArrayNoD: TypeAlias = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[_ScalarT]] +type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[ScalarT]] -_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] -_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] -_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] -_ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] -_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] +type _ArrayLike1DInt_co = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +type _ArrayLike1DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +type _ArrayLike2DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] +type _ArrayLike1DNumber_co = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] # The returned arrays dtype must be compatible with `np.equal` -_MaskFunc: TypeAlias = Callable[[NDArray[np.int_], _T], NDArray[_Number_co | np.timedelta64 | np.datetime64 | np.object_]] +type _MaskFunc[_T] = Callable[[NDArray[np.int_], _T], NDArray[_Number_co | np.timedelta64 | np.datetime64 | np.object_]] -_Indices2D: TypeAlias = tuple[_Array1D[np.intp], _Array1D[np.intp]] -_Histogram2D: TypeAlias = tuple[_Array1D[np.float64], _Array1D[_ScalarT], _Array1D[_ScalarT]] +type _Indices2D = tuple[_Array1D[np.intp], _Array1D[np.intp]] +type _Histogram2D[ScalarT: np.generic] = tuple[_Array1D[np.float64], _Array1D[ScalarT], _Array1D[ScalarT]] @type_check_only class _HasShapeAndNDim(Protocol): @@ -88,17 +70,17 @@ class _HasShapeAndNDim(Protocol): # keep in sync with `flipud` @overload -def fliplr(m: _ArrayT) -> _ArrayT: ... +def fliplr[ArrayT: np.ndarray](m: ArrayT) -> ArrayT: ... @overload -def fliplr(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def fliplr[ScalarT: np.generic](m: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def fliplr(m: ArrayLike) -> NDArray[Any]: ... # keep in sync with `fliplr` @overload -def flipud(m: _ArrayT) -> _ArrayT: ... +def flipud[ArrayT: np.ndarray](m: ArrayT) -> ArrayT: ... @overload -def flipud(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def flipud[ScalarT: np.generic](m: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def flipud(m: ArrayLike) -> NDArray[Any]: ... @@ -115,27 +97,27 @@ def eye( like: _SupportsArrayFunc | None = None, ) -> _Array2D[np.float64]: ... @overload -def eye( +def eye[ScalarT: np.generic]( N: int, M: int | None, k: int, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array2D[_ScalarT]: ... +) -> _Array2D[ScalarT]: ... @overload -def eye( +def eye[ScalarT: np.generic]( N: int, M: int | None = None, k: int = 0, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderCF = "C", device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array2D[_ScalarT]: ... +) -> _Array2D[ScalarT]: ... @overload def eye( N: int, @@ -150,23 +132,23 @@ def eye( # @overload -def diag(v: _ArrayNoD[_ScalarT] | Sequence[Sequence[_ScalarT]], k: int = 0) -> NDArray[_ScalarT]: ... +def diag[ScalarT: np.generic](v: _ArrayNoD[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> NDArray[ScalarT]: ... @overload -def diag(v: _Array2D[_ScalarT] | Sequence[Sequence[_ScalarT]], k: int = 0) -> _Array1D[_ScalarT]: ... +def diag[ScalarT: np.generic](v: _Array2D[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> _Array1D[ScalarT]: ... @overload -def diag(v: _Array1D[_ScalarT] | Sequence[_ScalarT], k: int = 0) -> _Array2D[_ScalarT]: ... +def diag[ScalarT: np.generic](v: _Array1D[ScalarT] | Sequence[ScalarT], k: int = 0) -> _Array2D[ScalarT]: ... @overload def diag(v: Sequence[Sequence[_ScalarLike_co]], k: int = 0) -> _Array1D[Incomplete]: ... @overload def diag(v: Sequence[_ScalarLike_co], k: int = 0) -> _Array2D[Incomplete]: ... @overload -def diag(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +def diag[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... @overload def diag(v: ArrayLike, k: int = 0) -> NDArray[Incomplete]: ... # keep in sync with `numpy.ma.extras.diagflat` @overload -def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _Array2D[_ScalarT]: ... +def diagflat[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _Array2D[ScalarT]: ... @overload def diagflat(v: ArrayLike, k: int = 0) -> _Array2D[Incomplete]: ... @@ -181,23 +163,23 @@ def tri( like: _SupportsArrayFunc | None = None ) -> _Array2D[np.float64]: ... @overload -def tri( +def tri[ScalarT: np.generic]( N: int, M: int | None, k: int, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = None -) -> _Array2D[_ScalarT]: ... +) -> _Array2D[ScalarT]: ... @overload -def tri( +def tri[ScalarT: np.generic]( N: int, M: int | None = None, k: int = 0, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], like: _SupportsArrayFunc | None = None -) -> _Array2D[_ScalarT]: ... +) -> _Array2D[ScalarT]: ... @overload def tri( N: int, @@ -210,23 +192,23 @@ def tri( # keep in sync with `triu` @overload -def tril(m: _ArrayT, k: int = 0) -> _ArrayT: ... +def tril[ArrayT: np.ndarray](m: ArrayT, k: int = 0) -> ArrayT: ... @overload -def tril(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +def tril[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... @overload def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... # keep in sync with `tril` @overload -def triu(m: _ArrayT, k: int = 0) -> _ArrayT: ... +def triu[ArrayT: np.ndarray](m: ArrayT, k: int = 0) -> ArrayT: ... @overload -def triu(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +def triu[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... @overload def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... # we use `list` (invariant) instead of `Sequence` (covariant) to avoid overlap @overload -def vander(x: _ArrayLike1D[_NumberObjectT], N: int | None = None, increasing: bool = False) -> _Array2D[_NumberObjectT]: ... +def vander[ScalarT: np.number | np.object_](x: _ArrayLike1D[ScalarT], N: int | None = None, increasing: bool = False) -> _Array2D[ScalarT]: ... @overload def vander(x: _ArrayLike1D[np.bool] | list[int], N: int | None = None, increasing: bool = False) -> _Array2D[np.int_]: ... @overload @@ -238,41 +220,41 @@ def vander(x: Sequence[_NumberLike_co], N: int | None = None, increasing: bool = # @overload -def histogram2d( - x: _ArrayLike1D[_ComplexT], - y: _ArrayLike1D[_ComplexT | _Float_co], +def histogram2d[ScalarT: np.complexfloating]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT | _Float_co], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_ComplexT]: ... +) -> _Histogram2D[ScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_ComplexT | _Float_co], - y: _ArrayLike1D[_ComplexT], +def histogram2d[ScalarT: np.complexfloating]( + x: _ArrayLike1D[ScalarT | _Float_co], + y: _ArrayLike1D[ScalarT], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_ComplexT]: ... +) -> _Histogram2D[ScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT], - y: _ArrayLike1D[_InexactT | _Int_co], +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT | _Int_co], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_InexactT]: ... +) -> _Histogram2D[ScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT | _Int_co], - y: _ArrayLike1D[_InexactT], +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT | _Int_co], + y: _ArrayLike1D[ScalarT], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_InexactT]: ... +) -> _Histogram2D[ScalarT]: ... @overload def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], @@ -292,41 +274,41 @@ def histogram2d( weights: _ArrayLike1DFloat_co | None = None, ) -> _Histogram2D[np.complex128 | Any]: ... @overload -def histogram2d( +def histogram2d[ScalarT: _Number_co]( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, - bins: _ArrayLike1D[_NumberCoT] | Sequence[_ArrayLike1D[_NumberCoT]], + bins: _ArrayLike1D[ScalarT] | Sequence[_ArrayLike1D[ScalarT]], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_NumberCoT]: ... +) -> _Histogram2D[ScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT], - y: _ArrayLike1D[_InexactT], - bins: Sequence[_ArrayLike1D[_NumberCoT] | int], +def histogram2d[ScalarT: np.inexact, BinsScalarT: _Number_co]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT], + bins: Sequence[_ArrayLike1D[BinsScalarT] | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_InexactT | _NumberCoT]: ... +) -> _Histogram2D[ScalarT | BinsScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT], - y: _ArrayLike1D[_InexactT], +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT], bins: Sequence[_ArrayLike1DNumber_co | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_InexactT | Any]: ... +) -> _Histogram2D[ScalarT | Any]: ... @overload -def histogram2d( +def histogram2d[ScalarT: _Number_co]( x: _ArrayLike1DInt_co | Sequence[float], y: _ArrayLike1DInt_co | Sequence[float], - bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + bins: Sequence[_ArrayLike1D[ScalarT] | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[np.float64 | _NumberCoT]: ... +) -> _Histogram2D[np.float64 | ScalarT]: ... @overload def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], @@ -337,14 +319,14 @@ def histogram2d( weights: _ArrayLike1DFloat_co | None = None, ) -> _Histogram2D[np.float64 | Any]: ... @overload -def histogram2d( +def histogram2d[ScalarT: _Number_co]( x: Sequence[complex], y: Sequence[complex], - bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + bins: Sequence[_ArrayLike1D[ScalarT] | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[np.complex128 | _NumberCoT]: ... +) -> _Histogram2D[np.complex128 | ScalarT]: ... @overload def histogram2d( x: Sequence[complex], @@ -397,7 +379,7 @@ def histogram2d( @overload def mask_indices(n: int, mask_func: _MaskFunc[int], k: int = 0) -> _Indices2D: ... @overload -def mask_indices(n: int, mask_func: _MaskFunc[_T], k: _T) -> _Indices2D: ... +def mask_indices[T](n: int, mask_func: _MaskFunc[T], k: T) -> _Indices2D: ... # def tril_indices(n: int, k: int = 0, m: int | None = None) -> _Indices2D: ... diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index dec5fc1dfaa4..b75f010d3550 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,7 +1,6 @@ from _typeshed import Incomplete from collections.abc import Container, Iterable -from typing import Any, Literal as L, Protocol, TypeAlias, overload, type_check_only -from typing_extensions import TypeVar +from typing import Any, Literal as L, Protocol, overload, type_check_only import numpy as np from numpy._typing import ( @@ -30,33 +29,28 @@ __all__ = [ "typename", ] -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) -_RealT = TypeVar("_RealT", bound=np.floating | np.integer | np.bool) - -_FloatMax32: TypeAlias = np.float32 | np.float16 -_ComplexMax128: TypeAlias = np.complex128 | np.complex64 -_RealMax64: TypeAlias = np.float64 | np.float32 | np.float16 | np.integer -_Real: TypeAlias = np.floating | np.integer -_InexactMax32: TypeAlias = np.inexact[_32Bit] | np.float16 -_NumberMax64: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer +type _FloatMax32 = np.float32 | np.float16 +type _ComplexMax128 = np.complex128 | np.complex64 +type _RealMax64 = np.float64 | np.float32 | np.float16 | np.integer +type _Real = np.floating | np.integer +type _ToReal = _Real | np.bool +type _InexactMax32 = np.inexact[_32Bit] | np.float16 +type _NumberMax64 = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer @type_check_only -class _HasReal(Protocol[_T_co]): +class _HasReal[T](Protocol): @property - def real(self, /) -> _T_co: ... + def real(self, /) -> T: ... @type_check_only -class _HasImag(Protocol[_T_co]): +class _HasImag[T](Protocol): @property - def imag(self, /) -> _T_co: ... + def imag(self, /) -> T: ... @type_check_only -class _HasDType(Protocol[_ScalarT_co]): +class _HasDType[ScalarT: np.generic](Protocol): @property - def dtype(self, /) -> np.dtype[_ScalarT_co]: ... + def dtype(self, /) -> np.dtype[ScalarT]: ... ### @@ -64,17 +58,17 @@ def mintypecode(typechars: Iterable[str | ArrayLike], typeset: str | Container[s # @overload -def real(val: _HasReal[_T]) -> _T: ... +def real[T](val: _HasReal[T]) -> T: ... @overload -def real(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +def real[RealT: _ToReal](val: _ArrayLike[RealT]) -> NDArray[RealT]: ... @overload def real(val: ArrayLike) -> NDArray[Any]: ... # @overload -def imag(val: _HasImag[_T]) -> _T: ... +def imag[T](val: _HasImag[T]) -> T: ... @overload -def imag(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +def imag[RealT: _ToReal](val: _ArrayLike[RealT]) -> NDArray[RealT]: ... @overload def imag(val: ArrayLike) -> NDArray[Any]: ... @@ -100,29 +94,29 @@ def isrealobj(x: _HasDType[Any] | ArrayLike) -> bool: ... # @overload -def nan_to_num( - x: _ScalarT, +def nan_to_num[ScalarT: np.generic]( + x: ScalarT, copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def nan_to_num( - x: NDArray[_ScalarT] | _NestedSequence[_ArrayLike[_ScalarT]], +def nan_to_num[ScalarT: np.generic]( + x: NDArray[ScalarT] | _NestedSequence[_ArrayLike[ScalarT]], copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def nan_to_num( - x: _SupportsArray[np.dtype[_ScalarT]], +def nan_to_num[ScalarT: np.generic]( + x: _SupportsArray[np.dtype[ScalarT]], copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload def nan_to_num( x: _NestedSequence[ArrayLike], @@ -143,12 +137,13 @@ def nan_to_num( # NOTE: The [overload-overlap] mypy error is a false positive @overload def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... +def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... @overload def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ... @overload def real_if_close(a: _ArrayLike[np.clongdouble], tol: float = 100) -> NDArray[np.longdouble | np.clongdouble]: ... @overload -def real_if_close(a: _ArrayLike[_RealT], tol: float = 100) -> NDArray[_RealT]: ... +def real_if_close[RealT: _ToReal](a: _ArrayLike[RealT], tol: float = 100) -> NDArray[RealT]: ... @overload def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ... diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index c16ce811e28a..d48557a7b5d7 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -1,8 +1,7 @@ -from typing import Any, TypeVar, overload +from typing import overload from typing_extensions import deprecated import numpy as np -from numpy import floating, object_ from numpy._typing import ( NDArray, _ArrayLikeFloat_co, @@ -12,49 +11,31 @@ from numpy._typing import ( __all__ = ["fix", "isneginf", "isposinf"] -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) - @overload @deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) -def fix(x: _FloatLike_co, out: None = None) -> floating: ... +def fix(x: _FloatLike_co, out: None = None) -> np.floating: ... @overload @deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) -def fix(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[floating]: ... +def fix(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.floating]: ... @overload @deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) -def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[object_]: ... +def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[np.object_]: ... @overload @deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) -def fix(x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: _ArrayT) -> _ArrayT: ... +def fix[ArrayT: np.ndarray](x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: ArrayT) -> ArrayT: ... +# @overload -def isposinf( - x: _FloatLike_co, - out: None = None, -) -> np.bool: ... +def isposinf(x: _FloatLike_co, out: None = None) -> np.bool: ... @overload -def isposinf( - x: _ArrayLikeFloat_co, - out: None = None, -) -> NDArray[np.bool]: ... +def isposinf(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.bool]: ... @overload -def isposinf( - x: _ArrayLikeFloat_co, - out: _ArrayT, -) -> _ArrayT: ... +def isposinf[ArrayT: np.ndarray](x: _ArrayLikeFloat_co, out: ArrayT) -> ArrayT: ... +# @overload -def isneginf( - x: _FloatLike_co, - out: None = None, -) -> np.bool: ... +def isneginf(x: _FloatLike_co, out: None = None) -> np.bool: ... @overload -def isneginf( - x: _ArrayLikeFloat_co, - out: None = None, -) -> NDArray[np.bool]: ... +def isneginf(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.bool]: ... @overload -def isneginf( - x: _ArrayLikeFloat_co, - out: _ArrayT, -) -> _ArrayT: ... +def isneginf[ArrayT: np.ndarray](x: _ArrayLikeFloat_co, out: ArrayT) -> ArrayT: ... diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index f2e34eacc5a6..661dc97f224c 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -1,7 +1,7 @@ from _typeshed import Incomplete from types import EllipsisType -from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload -from typing_extensions import TypeVar, deprecated, override +from typing import Any, Generic, Self, SupportsIndex, overload, override +from typing_extensions import TypeVar, deprecated import numpy as np import numpy.typing as npt @@ -11,30 +11,25 @@ from numpy._typing import ( _ArrayLikeBool_co, _ArrayLikeInt_co, _DTypeLike, + _Shape, ) ### -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) _ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) # type: ignore[deprecated] -_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) # type: ignore[deprecated] -_RealContainerT = TypeVar( - "_RealContainerT", - bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], # type: ignore[deprecated] -) -_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) # type: ignore[deprecated] +type _ArrayInt_co = npt.NDArray[np.integer | np.bool] -_ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool] +type _BoolContainer = container[Any, np.dtype[np.bool]] +type _IntegralContainer = container[Any, np.dtype[np.bool | np.integer | np.object_]] +type _RealContainer = container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]] +type _NumericContainer = container[Any, np.dtype[np.number | np.timedelta64 | np.object_]] -_ToIndexSlice: TypeAlias = slice | EllipsisType | _ArrayInt_co | None -_ToIndexSlices: TypeAlias = _ToIndexSlice | tuple[_ToIndexSlice, ...] -_ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice -_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] +type _ToIndexSlice = slice | EllipsisType | _ArrayInt_co | None +type _ToIndexSlices = _ToIndexSlice | tuple[_ToIndexSlice, ...] +type _ToIndex = SupportsIndex | _ToIndexSlice +type _ToIndices = _ToIndex | tuple[_ToIndex, ...] ### # pyright: reportDeprecated = false @@ -52,19 +47,19 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): copy: bool = True, ) -> None: ... @overload - def __init__( - self: container[Any, np.dtype[_ScalarT]], + def __init__[ScalarT: np.generic]( + self: container[Any, np.dtype[ScalarT]], /, - data: _ArrayLike[_ScalarT], + data: _ArrayLike[ScalarT], dtype: None = None, copy: bool = True, ) -> None: ... @overload - def __init__( - self: container[Any, np.dtype[_ScalarT]], + def __init__[ScalarT: np.generic]( + self: container[Any, np.dtype[ScalarT]], /, data: npt.ArrayLike, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = True, ) -> None: ... @overload @@ -112,20 +107,28 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with np.ndarray @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex64]], / + ) -> container[ShapeT, np.dtype[np.float32]]: ... @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex128]], / + ) -> container[ShapeT, np.dtype[np.float64]]: ... @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex192]], /) -> container[_ShapeT, np.dtype[np.float96]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex192]], / + ) -> container[ShapeT, np.dtype[np.float96]]: ... @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex256]], /) -> container[_ShapeT, np.dtype[np.float128]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex256]], / + ) -> container[ShapeT, np.dtype[np.float128]]: ... @overload - def __abs__(self: _RealContainerT, /) -> _RealContainerT: ... + def __abs__[ContainerT: _RealContainer](self: ContainerT, /) -> ContainerT: ... # - def __neg__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 - def __pos__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 - def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + def __neg__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... # noqa: PYI019 + def __pos__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... # noqa: PYI019 + def __invert__[ContainerT: _IntegralContainer](self: ContainerT, /) -> ContainerT: ... # noqa: PYI019 # TODO(jorenham): complete these binary ops @@ -170,40 +173,34 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # @overload - def __and__( - self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / - ) -> container[_AnyShape, np.dtype[np.bool]]: ... + def __and__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload def __and__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __rand__ = __and__ @overload - def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __iand__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... @overload def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ... # @overload - def __xor__( - self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / - ) -> container[_AnyShape, np.dtype[np.bool]]: ... + def __xor__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload def __xor__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __rxor__ = __xor__ @overload - def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ixor__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... @overload def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ... # @overload - def __or__( - self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / - ) -> container[_AnyShape, np.dtype[np.bool]]: ... + def __or__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload def __or__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __ror__ = __or__ @overload - def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ior__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... @overload def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ... @@ -211,16 +208,18 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): @overload def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, /, t: _DTypeT) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + def __array__[DTypeT: np.dtype](self, /, t: DTypeT) -> np.ndarray[_ShapeT_co, DTypeT]: ... # @overload def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ... @overload - def __array_wrap__(self, a: np.ndarray[_ShapeT, _DTypeT], c: Any = ..., s: Any = ..., /) -> container[_ShapeT, _DTypeT]: ... + def __array_wrap__[ShapeT: _Shape, DTypeT: np.dtype]( + self, a: np.ndarray[ShapeT, DTypeT], c: Any = ..., s: Any = ..., / + ) -> container[ShapeT, DTypeT]: ... # def copy(self, /) -> Self: ... def tobytes(self, /) -> bytes: ... def byteswap(self, /) -> Self: ... - def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... + def astype[ScalarT: np.generic](self, /, typecode: _DTypeLike[ScalarT]) -> container[_ShapeT_co, np.dtype[ScalarT]]: ... diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index e73ba659a31c..87fbc3aa5c4c 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -1,22 +1,18 @@ from _typeshed import SupportsWrite from typing import LiteralString -from typing_extensions import TypeVar import numpy as np __all__ = ["get_include", "info", "show_runtime"] -_ScalarOrArrayT = TypeVar("_ScalarOrArrayT", bound=np.generic | np.ndarray) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) - -### - def get_include() -> LiteralString: ... def show_runtime() -> None: ... def info( object: object = None, maxwidth: int = 76, output: SupportsWrite[str] | None = None, toplevel: str = "numpy" ) -> None: ... -def drop_metadata(dtype: _DTypeT, /) -> _DTypeT: ... +def drop_metadata[DTypeT: np.dtype](dtype: DTypeT, /) -> DTypeT: ... # used internally by `lib._function_base_impl._median` -def _median_nancheck(data: np.ndarray, result: _ScalarOrArrayT, axis: int) -> _ScalarOrArrayT: ... +def _median_nancheck[ScalarOrArrayT: np.generic | np.ndarray]( + data: np.ndarray, result: ScalarOrArrayT, axis: int +) -> ScalarOrArrayT: ... diff --git a/numpy/lib/recfunctions.pyi b/numpy/lib/recfunctions.pyi index 33713cf16331..3ba63bdb91dd 100644 --- a/numpy/lib/recfunctions.pyi +++ b/numpy/lib/recfunctions.pyi @@ -1,11 +1,11 @@ from _typeshed import Incomplete from collections.abc import Callable, Iterable, Mapping, Sequence -from typing import Any, Literal, TypeAlias, overload -from typing_extensions import TypeVar +from typing import Any, Literal, overload import numpy as np import numpy.typing as npt -from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid +from numpy import _CastingKind +from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid, _Shape from numpy.ma.mrecords import MaskedRecords __all__ = [ @@ -32,26 +32,18 @@ __all__ = [ "unstructured_to_structured", ] -_T = TypeVar("_T") -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ArrayT = TypeVar("_ArrayT", bound=npt.NDArray[Any]) -_VoidArrayT = TypeVar("_VoidArrayT", bound=npt.NDArray[np.void]) -_NonVoidDTypeT = TypeVar("_NonVoidDTypeT", bound=_NonVoidDType) +type _OneOrMany[T] = T | Iterable[T] +type _BuiltinSequence[T] = tuple[T, ...] | list[T] -_OneOrMany: TypeAlias = _T | Iterable[_T] -_BuiltinSequence: TypeAlias = tuple[_T, ...] | list[_T] +type _NestedNames = tuple[str | _NestedNames, ...] +type _NonVoid = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ +type _NonVoidDType = np.dtype[_NonVoid] | np.dtypes.StringDType -_NestedNames: TypeAlias = tuple[str | _NestedNames, ...] -_NonVoid: TypeAlias = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ -_NonVoidDType: TypeAlias = np.dtype[_NonVoid] | np.dtypes.StringDType - -_JoinType: TypeAlias = Literal["inner", "outer", "leftouter"] +type _JoinType = Literal["inner", "outer", "leftouter"] ### -def recursive_fill_fields(input: npt.NDArray[np.void], output: _VoidArrayT) -> _VoidArrayT: ... +def recursive_fill_fields[VoidArrayT: npt.NDArray[np.void]](input: npt.NDArray[np.void], output: VoidArrayT) -> VoidArrayT: ... # def get_names(adtype: np.dtype[np.void]) -> _NestedNames: ... @@ -59,7 +51,7 @@ def get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ... # @overload -def flatten_descr(ndtype: _NonVoidDTypeT) -> tuple[tuple[Literal[""], _NonVoidDTypeT]]: ... +def flatten_descr[NonVoidDTypeT: _NonVoidDType](ndtype: NonVoidDTypeT) -> tuple[tuple[Literal[""], NonVoidDTypeT]]: ... @overload def flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype]]: ... @@ -72,13 +64,13 @@ def get_fieldstructure( # @overload -def merge_arrays( - seqarrays: Sequence[np.ndarray[_ShapeT, np.dtype]] | np.ndarray[_ShapeT, np.dtype], +def merge_arrays[ShapeT: _Shape]( + seqarrays: Sequence[np.ndarray[ShapeT, np.dtype]] | np.ndarray[ShapeT, np.dtype], fill_value: float = -1, flatten: bool = False, usemask: bool = False, asrecarray: bool = False, -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload def merge_arrays( seqarrays: Sequence[npt.ArrayLike] | np.void, @@ -90,64 +82,64 @@ def merge_arrays( # @overload -def drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], usemask: bool = True, asrecarray: Literal[False] = False, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload -def drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], usemask: bool, asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], usemask: bool = True, *, asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... # @overload -def rename_fields( - base: MaskedRecords[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: MaskedRecords[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... @overload -def rename_fields( - base: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... @overload -def rename_fields( - base: np.recarray[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: np.recarray[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def rename_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... # @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None, fill_value: int, usemask: Literal[False], asrecarray: Literal[False] = False, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, @@ -155,20 +147,20 @@ def append_fields( *, usemask: Literal[False], asrecarray: Literal[False] = False, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None, fill_value: int, usemask: Literal[False], asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, @@ -176,30 +168,30 @@ def append_fields( *, usemask: Literal[False], asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, fill_value: int = -1, usemask: Literal[True] = True, asrecarray: Literal[False] = False, -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None, fill_value: int, usemask: Literal[True], asrecarray: Literal[True], -) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, @@ -207,46 +199,46 @@ def append_fields( usemask: Literal[True] = True, *, asrecarray: Literal[True], -) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... # -def rec_drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def rec_drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... # -def rec_append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def rec_append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... # TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented, # e.g. using a `TypeVar` with constraints. # https://github.com/numpy/numtype/issues/92 @overload -def repack_fields(a: _DTypeT, align: bool = False, recurse: bool = False) -> _DTypeT: ... +def repack_fields[DTypeT: np.dtype](a: DTypeT, align: bool = False, recurse: bool = False) -> DTypeT: ... @overload -def repack_fields(a: _ScalarT, align: bool = False, recurse: bool = False) -> _ScalarT: ... +def repack_fields[ScalarT: np.generic](a: ScalarT, align: bool = False, recurse: bool = False) -> ScalarT: ... @overload -def repack_fields(a: _ArrayT, align: bool = False, recurse: bool = False) -> _ArrayT: ... +def repack_fields[ArrayT: np.ndarray](a: ArrayT, align: bool = False, recurse: bool = False) -> ArrayT: ... # TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1) @overload -def structured_to_unstructured( +def structured_to_unstructured[ScalarT: np.generic]( arr: npt.NDArray[np.void], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = False, - casting: np._CastingKind = "unsafe", -) -> npt.NDArray[_ScalarT]: ... + casting: _CastingKind = "unsafe", +) -> npt.NDArray[ScalarT]: ... @overload def structured_to_unstructured( arr: npt.NDArray[np.void], dtype: npt.DTypeLike | None = None, copy: bool = False, - casting: np._CastingKind = "unsafe", + casting: _CastingKind = "unsafe", ) -> npt.NDArray[Any]: ... # @@ -280,29 +272,29 @@ def unstructured_to_structured( ) -> npt.NDArray[np.void]: ... # -def apply_along_fields( - func: Callable[[np.ndarray[_ShapeT, Any]], npt.NDArray[Any]], - arr: np.ndarray[_ShapeT, np.dtype[np.void]], -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +def apply_along_fields[ShapeT: _Shape]( + func: Callable[[np.ndarray[ShapeT]], np.ndarray], + arr: np.ndarray[ShapeT, np.dtype[np.void]], +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... # def assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ... # -def require_fields( - array: np.ndarray[_ShapeT, np.dtype[np.void]], +def require_fields[ShapeT: _Shape]( + array: np.ndarray[ShapeT, np.dtype[np.void]], required_dtype: _DTypeLikeVoid, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... # TODO(jorenham): Attempt shape-typing @overload -def stack_arrays( - arrays: _ArrayT, +def stack_arrays[ArrayT: np.ndarray]( + arrays: ArrayT, defaults: Mapping[str, object] | None = None, usemask: bool = True, asrecarray: bool = False, autoconvert: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... @overload def stack_arrays( arrays: Sequence[npt.NDArray[Any]], @@ -357,27 +349,27 @@ def stack_arrays( # @overload -def find_duplicates( - a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], key: str | None = None, ignoremask: bool = True, return_index: Literal[False] = False, -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... @overload -def find_duplicates( - a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], key: str | None, ignoremask: bool, return_index: Literal[True], -) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +) -> tuple[np.ma.MaskedArray[ShapeT, np.dtype[np.void]], np.ndarray[ShapeT, np.dtype[np.int_]]]: ... @overload -def find_duplicates( - a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], key: str | None = None, ignoremask: bool = True, *, return_index: Literal[True], -) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +) -> tuple[np.ma.MaskedArray[ShapeT, np.dtype[np.void]], np.ndarray[ShapeT, np.dtype[np.int_]]]: ... # @overload diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index b2b8ada44419..6cbd3b43a07a 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -6,8 +6,6 @@ from typing import ( Never, SupportsIndex, SupportsInt, - TypeAlias, - TypeVar, overload, ) @@ -79,10 +77,7 @@ __all__ = [ "vecdot", ] -_NumberT = TypeVar("_NumberT", bound=np.number) -_NumericScalarT = TypeVar("_NumericScalarT", bound=np.number | np.timedelta64 | np.object_) - -_ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] +type _ModeKind = L["reduced", "complete", "r", "raw"] ### @@ -102,7 +97,7 @@ class QRResult(NamedTuple): class SlogdetResult(NamedTuple): # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and - # a `(x.ndim - 2)`` dimensionl arrays otherwise + # a `(x.ndim - 2)`` dimensional arrays otherwise sign: Any logabsdet: Any @@ -188,7 +183,7 @@ def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never], /) -> NDArray[Any]: ... @overload def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload -def outer(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... +def outer[ScalarT: np.number](x1: _ArrayLike[ScalarT], x2: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -351,11 +346,11 @@ def pinv( ) -> NDArray[complexfloating]: ... # TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +# a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... # TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +# a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise def det(a: _ArrayLikeComplex_co) -> Any: ... @overload @@ -441,13 +436,13 @@ def vector_norm( # keep in sync with numpy._core.numeric.tensordot (ignoring `/, *`) @overload -def tensordot( - a: _ArrayLike[_NumericScalarT], - b: _ArrayLike[_NumericScalarT], +def tensordot[ScalarT: np.number | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + b: _ArrayLike[ScalarT], /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[_NumericScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def tensordot( a: _ArrayLikeBool_co, @@ -537,7 +532,7 @@ def cross( ) -> NDArray[complexfloating]: ... @overload -def matmul(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... +def matmul[ScalarT: np.number](x1: _ArrayLike[ScalarT], x2: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 67d6b3591a4b..a5e8e41b7709 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -4,7 +4,7 @@ import datetime as dt import types from _typeshed import Incomplete -from collections.abc import Callable, Sequence +from collections.abc import Buffer, Callable, Sequence from typing import ( Any, Concatenate, @@ -18,16 +18,15 @@ from typing import ( SupportsFloat, SupportsIndex, SupportsInt, - TypeAlias, Unpack, final, overload, + override, ) -from typing_extensions import Buffer, ParamSpec, TypeIs, TypeVar, override +from typing_extensions import TypeIs, TypeVar import numpy as np from numpy import ( - _AnyShapeT, _HasDType, _HasDTypeWithRealAndImag, _ModeKind, @@ -41,16 +40,11 @@ from numpy import ( amin, bool_, bytes_, - character, complex128, complexfloating, datetime64, dtype, - dtypes, expand_dims, - flexible, - float16, - float32, float64, floating, generic, @@ -66,9 +60,7 @@ from numpy import ( signedinteger, str_, timedelta64, - ufunc, unsignedinteger, - void, ) from numpy._core.fromnumeric import _UFuncKwargs # type-check only from numpy._globals import _NoValueType @@ -292,59 +284,44 @@ __all__ = [ "zeros_like", ] -_ShapeT = TypeVar("_ShapeT", bound=_Shape) -_ShapeOrAnyT = TypeVar("_ShapeOrAnyT", bound=_Shape, default=_AnyShape) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) -_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) -_MArrayT = TypeVar("_MArrayT", bound=MaskedArray[Any, Any]) -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) -_NumberT = TypeVar("_NumberT", bound=number) -_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) -_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) -_UFuncT_co = TypeVar( - "_UFuncT_co", - # the `| Callable` simplifies self-binding to the ufunc's callable signature - bound=np.ufunc | Callable[..., object], - default=np.ufunc, - covariant=True, -) -_Pss = ParamSpec("_Pss") -_T = TypeVar("_T") +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +# the additional `Callable[...]` bound simplifies self-binding to the ufunc's callable signature +_UFuncT_co = TypeVar("_UFuncT_co", bound=np.ufunc | Callable[..., object], default=np.ufunc, covariant=True) + +type _RealNumber = np.floating | np.integer -_Ignored: TypeAlias = object +type _Ignored = object # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` -_MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] -_Masked1D: TypeAlias = MaskedArray[tuple[int], dtype[_ScalarT]] - -_MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] -_MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] -_MaskedArrayFloat64_co: TypeAlias = _MaskedArray[floating[_64Bit] | float32 | float16 | integer | np.bool] -_MaskedArrayFloat_co: TypeAlias = _MaskedArray[floating | integer | np.bool] -_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] -_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] -_MaskedArrayNumber_co: TypeAlias = _MaskedArray[number | np.bool] -_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] - -_ArrayInt_co: TypeAlias = NDArray[integer | bool_] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] - -_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co -_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None -_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None -_ArangeScalar: TypeAlias = floating | integer | datetime64 | timedelta64 - -_NoMaskType: TypeAlias = np.bool_[Literal[False]] # type of `np.False_` -_MaskArray: TypeAlias = np.ndarray[_ShapeOrAnyT, np.dtype[np.bool_]] - -_FillValue: TypeAlias = complex | None # int | float | complex | None -_FillValueCallable: TypeAlias = Callable[[np.dtype | ArrayLike], _FillValue] -_DomainCallable: TypeAlias = Callable[..., NDArray[np.bool_]] +type _MaskedArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _Masked1D[ScalarT: np.generic] = MaskedArray[tuple[int], np.dtype[ScalarT]] + +type _MaskedArrayUInt_co = _MaskedArray[np.unsignedinteger | np.bool] +type _MaskedArrayInt_co = _MaskedArray[np.integer | np.bool] +type _MaskedArrayFloat64_co = _MaskedArray[np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool] +type _MaskedArrayFloat_co = _MaskedArray[np.floating | np.integer | np.bool] +type _MaskedArrayComplex128_co = _MaskedArray[np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool] +type _MaskedArrayComplex_co = _MaskedArray[np.inexact | np.integer | np.bool] +type _MaskedArrayNumber_co = _MaskedArray[np.number | np.bool] +type _MaskedArrayTD64_co = _MaskedArray[np.timedelta64 | np.integer | np.bool] + +type _ArrayInt_co = NDArray[np.integer | np.bool] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + +type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co +type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToComplex = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | np.character | np.number | np.timedelta64 | np.bool | None +type _ConvertibleToDT64 = dt.date | int | _CharLike_co | np.character | np.number | np.datetime64 | np.bool | None +type _ArangeScalar = _RealNumber | np.datetime64 | np.timedelta64 + +type _NoMaskType = np.bool_[Literal[False]] # type of `np.False_` +type _MaskArray[ShapeT: _Shape] = np.ndarray[ShapeT, np.dtype[np.bool]] + +type _FillValue = complex | None # int | float | complex | None +type _FillValueCallable = Callable[[np.dtype | ArrayLike], _FillValue] +type _DomainCallable = Callable[..., NDArray[np.bool]] ### @@ -374,13 +351,13 @@ class _MaskedUnaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): # https://github.com/microsoft/pyright/issues/10849 # https://github.com/microsoft/pyright/issues/10899 # https://github.com/microsoft/pyright/issues/11049 - def __call__( - self: _MaskedUnaryOperation[Callable[Concatenate[Any, _Pss], _T]], + def __call__[**Tss, T]( + self: _MaskedUnaryOperation[Callable[Concatenate[Any, Tss], T]], /, a: ArrayLike, - *args: _Pss.args, - **kwargs: _Pss.kwargs, - ) -> _T: ... + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... # not generic at runtime class _MaskedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): @@ -390,14 +367,14 @@ class _MaskedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): def __init__(self, /, mbfunc: _UFuncT_co, fillx: _FillValue = 0, filly: _FillValue = 0) -> None: ... # NOTE: See the comment in `_MaskedUnaryOperation.__call__` - def __call__( - self: _MaskedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + def __call__[**Tss, T]( + self: _MaskedBinaryOperation[Callable[Concatenate[Any, Any, Tss], T]], /, a: ArrayLike, b: ArrayLike, - *args: _Pss.args, - **kwargs: _Pss.kwargs, - ) -> _T: ... + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... # NOTE: We cannot meaningfully annotate the return (d)types of these methods until # the signatures of the corresponding `numpy.ufunc` methods are specified. @@ -421,14 +398,14 @@ class _DomainedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): ) -> None: ... # NOTE: See the comment in `_MaskedUnaryOperation.__call__` - def __call__( - self: _DomainedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + def __call__[**Tss, T]( + self: _DomainedBinaryOperation[Callable[Concatenate[Any, Any, Tss], T]], /, a: ArrayLike, b: ArrayLike, - *args: _Pss.args, - **kwargs: _Pss.kwargs, - ) -> _T: ... + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... # not generic at runtime class _extrema_operation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): @@ -446,7 +423,7 @@ class _extrema_operation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): # NOTE: This class is only used internally for `maximum` and `minimum`, so we are # able to annotate the `__call__` method specifically for those two functions. @overload - def __call__(self, /, a: _ArrayLike[_ScalarT], b: _ArrayLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... + def __call__[ScalarT: np.generic](self, /, a: _ArrayLike[ScalarT], b: _ArrayLike[ScalarT]) -> _MaskedArray[ScalarT]: ... @overload def __call__(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... @@ -530,7 +507,7 @@ def maximum_fill_value(obj: object) -> Any: ... # @overload # returns `a.fill_value` if `a` is a `MaskedArray` -def get_fill_value(a: _MaskedArray[_ScalarT]) -> _ScalarT: ... +def get_fill_value[ScalarT: np.generic](a: _MaskedArray[ScalarT]) -> ScalarT: ... @overload # otherwise returns `default_fill_value(a)` def get_fill_value(a: object) -> Any: ... @@ -540,33 +517,36 @@ def set_fill_value(a: MaskedArray, fill_value: _ScalarLike_co) -> None: ... # the return type depends on the *values* of `a` and `b` (which cannot be known # statically), which is why we need to return an awkward `_ | None` @overload -def common_fill_value(a: _MaskedArray[_ScalarT], b: MaskedArray) -> _ScalarT | None: ... +def common_fill_value[ScalarT: np.generic](a: _MaskedArray[ScalarT], b: MaskedArray) -> ScalarT | None: ... @overload def common_fill_value(a: object, b: object) -> Any: ... # keep in sync with `fix_invalid`, but return `ndarray` instead of `MaskedArray` @overload -def filled(a: ndarray[_ShapeT, _DTypeT], fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT, _DTypeT]: ... +def filled[ShapeT: _Shape, DTypeT: np.dtype]( + a: ndarray[ShapeT, DTypeT], + fill_value: _ScalarLike_co | None = None, +) -> ndarray[ShapeT, DTypeT]: ... @overload -def filled(a: _ArrayLike[_ScalarT], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT]: ... +def filled[ScalarT: np.generic](a: _ArrayLike[ScalarT], fill_value: _ScalarLike_co | None = None) -> NDArray[ScalarT]: ... @overload def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Incomplete]: ... # keep in sync with `filled`, but return `MaskedArray` instead of `ndarray` @overload -def fix_invalid( - a: np.ndarray[_ShapeT, _DTypeT], +def fix_invalid[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], mask: _ArrayLikeBool_co = nomask, copy: bool = True, fill_value: _ScalarLike_co | None = None, -) -> MaskedArray[_ShapeT, _DTypeT]: ... +) -> MaskedArray[ShapeT, DTypeT]: ... @overload -def fix_invalid( - a: _ArrayLike[_ScalarT], +def fix_invalid[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], mask: _ArrayLikeBool_co = nomask, copy: bool = True, fill_value: _ScalarLike_co | None = None, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def fix_invalid( a: ArrayLike, @@ -580,9 +560,12 @@ def get_masked_subclass(*arrays: object) -> type[MaskedArray]: ... # @overload -def getdata(a: np.ndarray[_ShapeT, _DTypeT], subok: bool = True) -> np.ndarray[_ShapeT, _DTypeT]: ... +def getdata[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + subok: bool = True, +) -> np.ndarray[ShapeT, DTypeT]: ... @overload -def getdata(a: _ArrayLike[_ScalarT], subok: bool = True) -> NDArray[_ScalarT]: ... +def getdata[ScalarT: np.generic](a: _ArrayLike[ScalarT], subok: bool = True) -> NDArray[ScalarT]: ... @overload def getdata(a: ArrayLike, subok: bool = True) -> NDArray[Incomplete]: ... @@ -592,9 +575,9 @@ get_data = getdata @overload def getmask(a: _ScalarLike_co) -> _NoMaskType: ... @overload -def getmask(a: MaskedArray[_ShapeT, Any]) -> _MaskArray[_ShapeT] | _NoMaskType: ... +def getmask[ShapeT: _Shape](a: MaskedArray[ShapeT, Any]) -> _MaskArray[ShapeT] | _NoMaskType: ... @overload -def getmask(a: ArrayLike) -> _MaskArray | _NoMaskType: ... +def getmask(a: ArrayLike) -> _MaskArray[_AnyShape] | _NoMaskType: ... get_mask = getmask @@ -602,7 +585,7 @@ get_mask = getmask @overload def getmaskarray(arr: _ScalarLike_co) -> _MaskArray[tuple[()]]: ... @overload -def getmaskarray(arr: np.ndarray[_ShapeT, Any]) -> _MaskArray[_ShapeT]: ... +def getmaskarray[ShapeT: _Shape](arr: np.ndarray[ShapeT, Any]) -> _MaskArray[ShapeT]: ... # It's sufficient for `m` to have dtype with type: `type[np.bool_]`, # which isn't necessarily a ndarray. Please open an issue if this causes issues. @@ -623,35 +606,35 @@ def make_mask( dtype: _DTypeLikeBool = ..., ) -> _NoMaskType: ... @overload # m: ndarray, shrink=True (default), dtype: bool-like (default) -def make_mask( - m: np.ndarray[_ShapeT], +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], copy: bool = False, shrink: Literal[True] = True, dtype: _DTypeLikeBool = ..., -) -> _MaskArray[_ShapeT] | _NoMaskType: ... +) -> _MaskArray[ShapeT] | _NoMaskType: ... @overload # m: ndarray, shrink=False (kwarg), dtype: bool-like (default) -def make_mask( - m: np.ndarray[_ShapeT], +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], copy: bool = False, *, shrink: Literal[False], dtype: _DTypeLikeBool = ..., -) -> _MaskArray[_ShapeT]: ... +) -> _MaskArray[ShapeT]: ... @overload # m: ndarray, dtype: void-like -def make_mask( - m: np.ndarray[_ShapeT], +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], copy: bool = False, shrink: bool = True, *, dtype: _DTypeLikeVoid, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload # m: array-like, shrink=True (default), dtype: bool-like (default) def make_mask( m: ArrayLike, copy: bool = False, shrink: Literal[True] = True, dtype: _DTypeLikeBool = ..., -) -> _MaskArray | _NoMaskType: ... +) -> _MaskArray[_AnyShape] | _NoMaskType: ... @overload # m: array-like, shrink=False (kwarg), dtype: bool-like (default) def make_mask( m: ArrayLike, @@ -659,7 +642,7 @@ def make_mask( *, shrink: Literal[False], dtype: _DTypeLikeBool = ..., -) -> _MaskArray: ... +) -> _MaskArray[_AnyShape]: ... @overload # m: array-like, dtype: void-like def make_mask( m: ArrayLike, @@ -679,11 +662,11 @@ def make_mask( # @overload # known shape, dtype: unstructured (default) -def make_mask_none(newshape: _ShapeT, dtype: np.dtype | type | str | None = None) -> _MaskArray[_ShapeT]: ... +def make_mask_none[ShapeT: _Shape](newshape: ShapeT, dtype: np.dtype | type | str | None = None) -> _MaskArray[ShapeT]: ... @overload # known shape, dtype: structured -def make_mask_none(newshape: _ShapeT, dtype: _VoidDTypeLike) -> np.ndarray[_ShapeT, dtype[np.void]]: ... +def make_mask_none[ShapeT: _Shape](newshape: ShapeT, dtype: _VoidDTypeLike) -> np.ndarray[ShapeT, dtype[np.void]]: ... @overload # unknown shape, dtype: unstructured (default) -def make_mask_none(newshape: _ShapeLike, dtype: np.dtype | type | str | None = None) -> _MaskArray: ... +def make_mask_none(newshape: _ShapeLike, dtype: np.dtype | type | str | None = None) -> _MaskArray[_AnyShape]: ... @overload # unknown shape, dtype: structured def make_mask_none(newshape: _ShapeLike, dtype: _VoidDTypeLike) -> NDArray[np.void]: ... @@ -719,141 +702,206 @@ def mask_or( shrink: Literal[False], ) -> _MaskArray[tuple[()]]: ... @overload # ndarray, ndarray | nomask, shrink=True (default) -def mask_or( - m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], - m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], copy: bool = False, shrink: Literal[True] = True, -) -> _MaskArray[_ShapeT] | _NoMaskType: ... +) -> _MaskArray[ShapeT] | _NoMaskType: ... @overload # ndarray, ndarray | nomask, shrink=False (kwarg) -def mask_or( - m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], - m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], copy: bool = False, *, shrink: Literal[False], -) -> _MaskArray[_ShapeT]: ... +) -> _MaskArray[ShapeT]: ... @overload # ndarray | nomask, ndarray, shrink=True (default) -def mask_or( - m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], - m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]], copy: bool = False, shrink: Literal[True] = True, -) -> _MaskArray[_ShapeT] | _NoMaskType: ... +) -> _MaskArray[ShapeT] | _NoMaskType: ... @overload # ndarray | nomask, ndarray, shrink=False (kwarg) -def mask_or( - m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], - m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]], copy: bool = False, *, shrink: Literal[False], -) -> _MaskArray[_ShapeT]: ... +) -> _MaskArray[ShapeT]: ... # @overload -def flatten_mask(mask: np.ndarray[_ShapeT]) -> _MaskArray[_ShapeT]: ... +def flatten_mask[ShapeT: _Shape](mask: np.ndarray[ShapeT]) -> _MaskArray[ShapeT]: ... @overload -def flatten_mask(mask: ArrayLike) -> _MaskArray: ... +def flatten_mask(mask: ArrayLike) -> _MaskArray[_AnyShape]: ... # NOTE: we currently don't know the field types of `void` dtypes, so it's not possible # to know the output dtype of the returned array. @overload -def flatten_structured_array(a: MaskedArray[_ShapeT, np.dtype[np.void]]) -> MaskedArray[_ShapeT]: ... +def flatten_structured_array[ShapeT: _Shape](a: MaskedArray[ShapeT, np.dtype[np.void]]) -> MaskedArray[ShapeT]: ... @overload -def flatten_structured_array(a: np.ndarray[_ShapeT, np.dtype[np.void]]) -> np.ndarray[_ShapeT]: ... +def flatten_structured_array[ShapeT: _Shape](a: np.ndarray[ShapeT, np.dtype[np.void]]) -> np.ndarray[ShapeT]: ... @overload # for some reason this accepts unstructured array-likes, hence this fallback overload def flatten_structured_array(a: ArrayLike) -> np.ndarray: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_invalid(a: ndarray[_ShapeT, _DTypeT], copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_invalid[ShapeT: _Shape, DTypeT: np.dtype]( + a: ndarray[ShapeT, DTypeT], + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_invalid(a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_invalid[ScalarT: np.generic](a: _ArrayLike[ScalarT], copy: bool = True) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_invalid(a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # array-like of known scalar-type -def masked_where( - condition: _ArrayLikeBool_co, a: ndarray[_ShapeT, _DTypeT], copy: bool = True -) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_where[ShapeT: _Shape, DTypeT: np.dtype]( + condition: _ArrayLikeBool_co, + a: ndarray[ShapeT, DTypeT], + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_where(condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_where[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_where(condition: _ArrayLikeBool_co, a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_greater(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_greater[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_greater(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_greater[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_greater(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_greater_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_greater_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_greater_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_greater_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_greater_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_less(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_less[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_less(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_less[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_less(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_less_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_less_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_less_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_less_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_less_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_not_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_not_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_not_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_not_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_not_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_equal[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_inside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_inside[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_inside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_inside[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_inside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_outside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_outside[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_outside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_outside[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_outside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # only intended for object arrays, so we assume that's how it's always used in practice @overload -def masked_object( - x: np.ndarray[_ShapeT, np.dtype[np.object_]], +def masked_object[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.object_]], value: object, copy: bool = True, shrink: bool = True, -) -> MaskedArray[_ShapeT, np.dtype[np.object_]]: ... +) -> MaskedArray[ShapeT, np.dtype[np.object_]]: ... @overload def masked_object( x: _ArrayLikeObject_co, @@ -864,23 +912,23 @@ def masked_object( # keep roughly in sync with `filled` @overload -def masked_values( - x: np.ndarray[_ShapeT, _DTypeT], +def masked_values[ShapeT: _Shape, DTypeT: np.dtype]( + x: np.ndarray[ShapeT, DTypeT], value: _ScalarLike_co, rtol: float = 1e-5, atol: float = 1e-8, copy: bool = True, - shrink: bool = True -) -> MaskedArray[_ShapeT, _DTypeT]: ... + shrink: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload -def masked_values( - x: _ArrayLike[_ScalarT], +def masked_values[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], value: _ScalarLike_co, rtol: float = 1e-5, atol: float = 1e-8, copy: bool = True, - shrink: bool = True -) -> _MaskedArray[_ScalarT]: ... + shrink: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload def masked_values( x: ArrayLike, @@ -888,7 +936,7 @@ def masked_values( rtol: float = 1e-5, atol: float = 1e-8, copy: bool = True, - shrink: bool = True + shrink: bool = True, ) -> _MaskedArray[Incomplete]: ... # TODO: Support non-boolean mask dtypes, such as `np.void`. This will require adding an @@ -914,7 +962,7 @@ class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): # Similar to `ndarray.__setitem__` but without the `void` case. @overload # flexible | object_ | bool def __setitem__( - self: MaskedIterator[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + self: MaskedIterator[Any, dtype[np.flexible | object_ | np.bool] | np.dtypes.StringDType], index: _ToIndices, value: object, /, @@ -958,15 +1006,15 @@ class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): def __setitem__(self, index: _ToIndices, value: ArrayLike, /) -> None: ... # TODO: Returns `mvoid[(), _DTypeT_co]` for masks with `np.void` dtype. - def __next__(self: MaskedIterator[Any, np.dtype[_ScalarT]]) -> _ScalarT: ... + def __next__[ScalarT: np.generic](self: MaskedIterator[Any, np.dtype[ScalarT]]) -> ScalarT: ... class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: Final[Literal[15]] = 15 @overload - def __new__( + def __new__[ScalarT: np.generic]( cls, - data: _ArrayLike[_ScalarT], + data: _ArrayLike[ScalarT], mask: _ArrayLikeBool_co = nomask, dtype: None = None, copy: bool = False, @@ -977,13 +1025,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): hard_mask: bool | None = None, shrink: bool = True, order: _OrderKACF | None = None, - ) -> _MaskedArray[_ScalarT]: ... + ) -> _MaskedArray[ScalarT]: ... @overload - def __new__( + def __new__[ScalarT: np.generic]( cls, data: object, mask: _ArrayLikeBool_co, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = False, subok: bool = True, ndmin: int = 0, @@ -992,14 +1040,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): hard_mask: bool | None = None, shrink: bool = True, order: _OrderKACF | None = None, - ) -> _MaskedArray[_ScalarT]: ... + ) -> _MaskedArray[ScalarT]: ... @overload - def __new__( + def __new__[ScalarT: np.generic]( cls, data: object, mask: _ArrayLikeBool_co = nomask, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = False, subok: bool = True, ndmin: int = 0, @@ -1008,7 +1056,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): hard_mask: bool | None = None, shrink: bool = True, order: _OrderKACF | None = None, - ) -> _MaskedArray[_ScalarT]: ... + ) -> _MaskedArray[ScalarT]: ... @overload def __new__( cls, @@ -1025,44 +1073,56 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): order: _OrderKACF | None = None, ) -> _MaskedArray[Any]: ... - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, DTypeT: np.dtype]( self, - obj: ndarray[_ShapeT, _DTypeT], - context: tuple[ufunc, tuple[Any, ...], int] | None = None, + obj: ndarray[ShapeT, DTypeT], + context: tuple[np.ufunc, tuple[Any, ...], int] | None = None, return_scalar: bool = False, - ) -> MaskedArray[_ShapeT, _DTypeT]: ... + ) -> MaskedArray[ShapeT, DTypeT]: ... @overload # type: ignore[override] # () def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... @overload # (dtype: DTypeT) - def view( + def view[DTypeT: np.dtype]( self, /, - dtype: _DTypeT | _HasDType[_DTypeT], + dtype: DTypeT | _HasDType[DTypeT], type: None = None, - fill_value: _ScalarLike_co | None = None - ) -> MaskedArray[_ShapeT_co, _DTypeT]: ... + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, DTypeT]: ... @overload # (dtype: dtype[ScalarT]) - def view( + def view[ScalarT: np.generic]( self, /, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], type: None = None, - fill_value: _ScalarLike_co | None = None - ) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload # ([dtype: _, ]*, type: ArrayT) - def view( + def view[ArrayT: np.ndarray]( self, /, dtype: DTypeLike | None = None, *, - type: type[_ArrayT], - fill_value: _ScalarLike_co | None = None - ) -> _ArrayT: ... + type: type[ArrayT], + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... @overload # (dtype: _, type: ArrayT) - def view(self, /, dtype: DTypeLike | None, type: type[_ArrayT], fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + def view[ArrayT: np.ndarray]( + self, + /, + dtype: DTypeLike | None, + type: type[ArrayT], + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... @overload # (dtype: ArrayT, /) - def view(self, /, dtype: type[_ArrayT], type: None = None, fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + def view[ArrayT: np.ndarray]( + self, + /, + dtype: type[ArrayT], + type: None = None, + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... @overload # (dtype: ?) def view( self, @@ -1071,8 +1131,8 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # overlaps with previous overloads. dtype: _VoidDTypeLike | str | None, type: None = None, - fill_value: _ScalarLike_co | None = None - ) -> MaskedArray[_ShapeT_co, dtype]: ... + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, np.dtype]: ... # Keep in sync with `ndarray.__getitem__` @overload @@ -1082,14 +1142,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __getitem__(self, key: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... @overload - def __getitem__(self: _MaskedArray[void], indx: str, /) -> MaskedArray[_ShapeT_co, dtype]: ... + def __getitem__(self: _MaskedArray[np.void], indx: str, /) -> MaskedArray[_ShapeT_co]: ... @overload - def __getitem__(self: _MaskedArray[void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[void]]: ... + def __getitem__(self: _MaskedArray[np.void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[np.void]]: ... @property def shape(self) -> _ShapeT_co: ... - @shape.setter - def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... + @shape.setter # type: ignore[override] + def shape[ShapeT: _Shape](self: MaskedArray[ShapeT, Any], shape: ShapeT, /) -> None: ... def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property @@ -1123,11 +1183,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def flat(self, value: ArrayLike, /) -> None: ... @property - def fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... + def fill_value[ScalarT: np.generic](self: _MaskedArray[ScalarT]) -> ScalarT: ... @fill_value.setter def fill_value(self, value: _ScalarLike_co | None = None, /) -> None: ... - def get_fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... + def get_fill_value[ScalarT: np.generic](self: _MaskedArray[ScalarT]) -> ScalarT: ... def set_fill_value(self, /, value: _ScalarLike_co | None = None) -> None: ... def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -1135,33 +1195,33 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # keep roughly in sync with `ma.core.compress`, but swap the first two arguments @overload # type: ignore[override] - def compress( + def compress[ArrayT: np.ndarray]( self, condition: _ArrayLikeBool_co, axis: _ShapeLike | None, - out: _ArrayT - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload - def compress( + def compress[ArrayT: np.ndarray]( self, condition: _ArrayLikeBool_co, axis: _ShapeLike | None = None, *, - out: _ArrayT - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload def compress( self, condition: _ArrayLikeBool_co, axis: None = None, - out: None = None + out: None = None, ) -> MaskedArray[tuple[int], _DTypeT_co]: ... @overload def compress( self, condition: _ArrayLikeBool_co, axis: _ShapeLike | None = None, - out: None = None + out: None = None, ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # TODO: How to deal with the non-commutative nature of `==` and `!=`? @@ -1176,13 +1236,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__add__` @overload # type: ignore[override] - def __add__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __add__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __add__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __add__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __add__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __add__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1213,10 +1277,10 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __add__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... @overload def __add__( - self: MaskedArray[Any, dtypes.StringDType], + self: MaskedArray[Any, np.dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, /, - ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + ) -> MaskedArray[_AnyShape, np.dtypes.StringDType]: ... @overload def __add__(self: _MaskedArray[object_], other: Any, /) -> Any: ... @overload @@ -1224,13 +1288,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__radd__` @overload # type: ignore[override] # signature equivalent to __add__ - def __radd__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __radd__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __radd__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __radd__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __radd__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __radd__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1261,10 +1329,10 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __radd__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... @overload def __radd__( - self: MaskedArray[Any, dtypes.StringDType], + self: MaskedArray[Any, np.dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, /, - ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + ) -> MaskedArray[_AnyShape, np.dtypes.StringDType]: ... @overload def __radd__(self: _MaskedArray[object_], other: Any, /) -> Any: ... @overload @@ -1272,13 +1340,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__sub__` @overload # type: ignore[override] - def __sub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __sub__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __sub__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __sub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __sub__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __sub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1310,13 +1382,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__rsub__` @overload # type: ignore[override] - def __rsub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __rsub__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __rsub__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __rsub__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __rsub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1348,13 +1424,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__mul__` @overload # type: ignore[override] - def __mul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __mul__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __mul__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __mul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __mul__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __mul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1379,7 +1459,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... @overload def __mul__( - self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + self: MaskedArray[Any, dtype[np.character] | np.dtypes.StringDType], other: _ArrayLikeInt, /, ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... @@ -1390,13 +1470,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__rmul__` @overload # type: ignore[override] # signature equivalent to __mul__ - def __rmul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __rmul__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __rmul__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __rmul__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __rmul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1421,7 +1505,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... @overload def __rmul__( - self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + self: MaskedArray[Any, dtype[np.character] | np.dtypes.StringDType], other: _ArrayLikeInt, /, ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... @@ -1494,13 +1578,21 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__floordiv__` @overload # type: ignore[override] - def __floordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __floordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... + def __floordiv__[ScalarT: _RealNumber](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... + def __floordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[np.bool], + other: _ArrayLike[ScalarT], + /, + ) -> _MaskedArray[ScalarT]: ... @overload def __floordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1524,13 +1616,25 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__rfloordiv__` @overload # type: ignore[override] - def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: _ArrayLikeBool_co, + /, + ) -> _MaskedArray[ScalarT]: ... @overload def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[np.bool], + other: _ArrayLike[ScalarT], + /, + ) -> _MaskedArray[ScalarT]: ... @overload def __rfloordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1552,13 +1656,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__pow__` (minus the `mod` parameter) @overload # type: ignore[override] - def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __pow__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __pow__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __pow__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1584,13 +1692,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__rpow__` (minus the `mod` parameter) @overload # type: ignore[override] - def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __rpow__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __rpow__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __rpow__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1616,13 +1728,25 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # @property # type: ignore[misc] - def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] - def get_imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + def imag[ScalarT: np.generic]( # type: ignore[override] + self: _HasDTypeWithRealAndImag[object, ScalarT], + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + def get_imag[ScalarT: np.generic]( + self: _HasDTypeWithRealAndImag[object, ScalarT], + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... # @property # type: ignore[misc] - def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] - def get_real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + def real[ScalarT: np.generic]( # type: ignore[override] + self: _HasDTypeWithRealAndImag[ScalarT, object], + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + def get_real[ScalarT: np.generic]( + self: _HasDTypeWithRealAndImag[ScalarT, object], + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... # keep in sync with `np.ma.count` @overload @@ -1639,7 +1763,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # (None) def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: bool | None = None) -> Self: ... @overload # (empty_sequence) - def reshape( # mypy false positive + def reshape( self, shape: Sequence[Never], /, @@ -1648,14 +1772,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): copy: bool | None = None, ) -> MaskedArray[tuple[()], _DTypeT_co]: ... @overload # (() | (int) | (int, int) | ....) # up to 8-d - def reshape( + def reshape[ShapeT: _Shape]( self, - shape: _AnyShapeT, + shape: ShapeT, /, *, order: _OrderACF = "C", copy: bool | None = None, - ) -> MaskedArray[_AnyShapeT, _DTypeT_co]: ... + ) -> MaskedArray[ShapeT, _DTypeT_co]: ... @overload # (index) def reshape( self, @@ -1753,20 +1877,20 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> bool_ | _MaskedArray[bool_]: ... @overload - def all( + def all[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def all( + def all[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in sync with `ma.core.any` @overload # type: ignore[override] @@ -1799,20 +1923,20 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> bool_ | _MaskedArray[bool_]: ... @overload - def any( + def any[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def any( + def any[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in sync with `ndarray.trace` and `ma.core.trace` @overload @@ -1825,30 +1949,30 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): out: None = None, ) -> Any: ... @overload - def trace( + def trace[ArrayT: np.ndarray]( self, # >= 2D MaskedArray offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, dtype: DTypeLike | None = None, *, - out: _ArrayT, - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload - def trace( + def trace[ArrayT: np.ndarray]( self, # >= 2D MaskedArray offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, dtype: DTypeLike | None, - out: _ArrayT, - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... # This differs from `ndarray.dot`, in that 1D dot 1D returns a 0D array. @overload def dot(self, b: ArrayLike, out: None = None, strict: bool = False) -> _MaskedArray[Any]: ... @overload - def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = False) -> _ArrayT: ... + def dot[ArrayT: np.ndarray](self, b: ArrayLike, out: ArrayT, strict: bool = False) -> ArrayT: ... # Keep in sync with `ma.core.sum` @overload # type: ignore[override] @@ -1861,32 +1985,39 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def sum( + def sum[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def sum( + def sum[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in sync with `ndarray.cumsum` and `ma.core.cumsum` @overload # out: None (default) - def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> MaskedArray: ... @overload # out: ndarray - def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: np.ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: np.ndarray]( + self, + /, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... # Keep in sync with `ma.core.prod` @overload # type: ignore[override] @@ -1899,34 +2030,41 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def prod( + def prod[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def prod( + def prod[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... product = prod # Keep in sync with `ndarray.cumprod` and `ma.core.cumprod` @overload # out: None (default) - def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> MaskedArray: ... @overload # out: ndarray - def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def cumprod[ArrayT: np.ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def cumprod[ArrayT: np.ndarray]( + self, + /, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... # Keep in sync with `ma.core.mean` @overload # type: ignore[override] @@ -1938,24 +2076,24 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def mean( + def mean[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def mean( + def mean[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # keep roughly in sync with `ma.core.anom` @overload @@ -1977,26 +2115,26 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> Any: ... @overload - def var( + def var[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def var( + def var[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # keep in sync with `var` and `ma.core.std` @overload # type: ignore[override] @@ -2010,34 +2148,34 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> Any: ... @overload - def std( + def std[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def std( + def std[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in sync with `ndarray.round` @overload # out=None (default) def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... @overload # out=ndarray - def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: np.ndarray](self, /, decimals: SupportsIndex, out: ArrayT) -> ArrayT: ... @overload - def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: np.ndarray](self, /, decimals: SupportsIndex = 0, *, out: ArrayT) -> ArrayT: ... def argsort( # type: ignore[override] self, @@ -2070,23 +2208,23 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def argmin( + def argmin[ArrayT: np.ndarray]( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def argmin( + def argmin[ArrayT: np.ndarray]( self, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in-sync with np.ma.argmax @overload # type: ignore[override] @@ -2108,23 +2246,23 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def argmax( + def argmax[ArrayT: np.ndarray]( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def argmax( + def argmax[ArrayT: np.ndarray]( self, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # def sort( # type: ignore[override] @@ -2140,13 +2278,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # @overload # type: ignore[override] - def min( - self: _MaskedArray[_ScalarT], + def min[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def min( self, @@ -2156,32 +2294,32 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ... ) -> Any: ... @overload - def min( + def min[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def min( + def min[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload # type: ignore[override] - def max( - self: _MaskedArray[_ScalarT], + def max[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def max( self, @@ -2191,32 +2329,32 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ... ) -> Any: ... @overload - def max( + def max[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def max( + def max[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload - def ptp( - self: _MaskedArray[_ScalarT], + def ptp[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] = False, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def ptp( self, @@ -2226,22 +2364,22 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool = False, ) -> Any: ... @overload - def ptp( + def ptp[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool = False, - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def ptp( + def ptp[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool = False, - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload @@ -2285,38 +2423,38 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in-sync with np.ma.take @overload # type: ignore[override] - def take( - self: _MaskedArray[_ScalarT], + def take[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], indices: _IntLike_co, axis: None = None, out: None = None, mode: _ModeKind = "raise" - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload - def take( - self: _MaskedArray[_ScalarT], + def take[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, mode: _ModeKind = "raise", - ) -> _MaskedArray[_ScalarT]: ... + ) -> _MaskedArray[ScalarT]: ... @overload - def take( + def take[ArrayT: np.ndarray]( self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def take( + def take[ArrayT: np.ndarray]( self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", - ) -> _ArrayT: ... + ) -> ArrayT: ... # keep in sync with `ndarray.diagonal` @override @@ -2370,19 +2508,23 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # keep in sync with `ndarray.tolist` @override @overload - def tolist(self: MaskedArray[tuple[Never], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... + def tolist[T](self: MaskedArray[tuple[Never], dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... @overload - def tolist(self: MaskedArray[tuple[()], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> _T: ... + def tolist[T](self: MaskedArray[tuple[()], dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> T: ... @overload - def tolist(self: MaskedArray[tuple[int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> list[_T]: ... + def tolist[T](self: _Masked1D[np.generic[T]], /, fill_value: _ScalarLike_co | None = None) -> list[T]: ... @overload - def tolist( - self: MaskedArray[tuple[int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None - ) -> list[list[_T]]: ... + def tolist[T]( + self: MaskedArray[tuple[int, int], dtype[generic[T]]], + /, + fill_value: _ScalarLike_co | None = None, + ) -> list[list[T]]: ... @overload - def tolist( - self: MaskedArray[tuple[int, int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None - ) -> list[list[list[_T]]]: ... + def tolist[T]( + self: MaskedArray[tuple[int, int, int], dtype[generic[T]]], + /, + fill_value: _ScalarLike_co | None = None, + ) -> list[list[list[T]]]: ... @overload def tolist(self, /, fill_value: _ScalarLike_co | None = None) -> Any: ... @@ -2398,7 +2540,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @property def dtype(self) -> _DTypeT_co: ... @dtype.setter - def dtype(self: MaskedArray[_AnyShape, _DTypeT], dtype: _DTypeT, /) -> None: ... + def dtype[DTypeT: np.dtype](self: MaskedArray[_AnyShape, DTypeT], dtype: DTypeT, /) -> None: ... class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __new__( @@ -2447,12 +2589,12 @@ class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): masked: Final[MaskedConstant] = ... masked_singleton: Final[MaskedConstant] = ... -masked_array: TypeAlias = MaskedArray +type masked_array = MaskedArray # keep in sync with `MaskedArray.__new__` @overload -def array( - data: _ArrayLike[_ScalarT], +def array[ScalarT: np.generic]( + data: _ArrayLike[ScalarT], dtype: None = None, copy: bool = False, order: _OrderKACF | None = None, @@ -2463,11 +2605,11 @@ def array( shrink: bool = True, subok: bool = True, ndmin: int = 0, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def array( +def array[ScalarT: np.generic]( data: object, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = False, order: _OrderKACF | None = None, mask: _ArrayLikeBool_co = nomask, @@ -2477,9 +2619,9 @@ def array( shrink: bool = True, subok: bool = True, ndmin: int = 0, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def array( +def array[ScalarT: np.generic]( data: object, dtype: DTypeLike | None = None, copy: bool = False, @@ -2491,37 +2633,61 @@ def array( shrink: bool = True, subok: bool = True, ndmin: int = 0, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... # keep in sync with `array` @overload -def asarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def asarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asarray[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def asarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asarray[ScalarT: np.generic]( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... # keep in sync with `asarray` (but note the additional first overload) @overload -def asanyarray(a: _MArrayT, dtype: None = None, order: _OrderKACF | None = None) -> _MArrayT: ... +def asanyarray[MArrayT: MaskedArray](a: MArrayT, dtype: None = None, order: _OrderKACF | None = None) -> MArrayT: ... @overload -def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asanyarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def asanyarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asanyarray[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def asanyarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asanyarray[ScalarT: np.generic]( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... # def is_masked(x: object) -> bool: ... @overload -def min( - obj: _ArrayLike[_ScalarT], +def min[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def min( obj: ArrayLike, @@ -2531,31 +2697,31 @@ def min( keepdims: bool | _NoValueType = ... ) -> Any: ... @overload -def min( +def min[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def min( +def min[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def max( - obj: _ArrayLike[_ScalarT], +def max[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def max( obj: ArrayLike, @@ -2565,31 +2731,31 @@ def max( keepdims: bool | _NoValueType = ... ) -> Any: ... @overload -def max( +def max[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def max( +def max[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def ptp( - obj: _ArrayLike[_ScalarT], +def ptp[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def ptp( obj: ArrayLike, @@ -2599,22 +2765,22 @@ def ptp( keepdims: bool | _NoValueType = ... ) -> Any: ... @overload -def ptp( +def ptp[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def ptp( +def ptp[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # we cannot meaningfully annotate `frommethod` further, because the callable signature # of the return type fully depends on the *value* of `methodname` and `reversed` in @@ -2625,44 +2791,47 @@ def _frommethod(methodname: str, reversed: bool = False) -> types.FunctionType: # since their use-cases are specific to masks, they only accept `MaskedArray` inputs. # keep in sync with `MaskedArray.harden_mask` -def harden_mask(a: _MArrayT) -> _MArrayT: ... +def harden_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... # keep in sync with `MaskedArray.soften_mask` -def soften_mask(a: _MArrayT) -> _MArrayT: ... +def soften_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... # keep in sync with `MaskedArray.shrink_mask` -def shrink_mask(a: _MArrayT) -> _MArrayT: ... +def shrink_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... # keep in sync with `MaskedArray.ids` def ids(a: ArrayLike) -> tuple[int, int]: ... # keep in sync with `ndarray.nonzero` -def nonzero(a: ArrayLike) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... +def nonzero(a: ArrayLike) -> tuple[_Array1D[np.intp], ...]: ... # keep first overload in sync with `MaskedArray.ravel` @overload -def ravel(a: np.ndarray[Any, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT]: ... +def ravel[DTypeT: np.dtype](a: np.ndarray[Any, DTypeT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], DTypeT]: ... @overload -def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +def ravel[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _Masked1D[ScalarT]: ... @overload def ravel(a: ArrayLike, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... # keep roughly in sync with `lib._function_base_impl.copy` @overload -def copy(a: _MArrayT, order: _OrderKACF = "C") -> _MArrayT: ... +def copy[MArrayT: MaskedArray](a: MArrayT, order: _OrderKACF = "C") -> MArrayT: ... @overload -def copy(a: np.ndarray[_ShapeT, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[_ShapeT, _DTypeT]: ... +def copy[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + order: _OrderKACF = "C", +) -> MaskedArray[ShapeT, DTypeT]: ... @overload -def copy(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _MaskedArray[_ScalarT]: ... +def copy[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _MaskedArray[ScalarT]: ... @overload def copy(a: ArrayLike, order: _OrderKACF = "C") -> _MaskedArray[Incomplete]: ... # keep in sync with `_core.fromnumeric.diagonal` @overload -def diagonal( - a: _ArrayLike[_ScalarT], +def diagonal[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def diagonal( a: ArrayLike, @@ -2673,32 +2842,49 @@ def diagonal( # keep in sync with `_core.fromnumeric.repeat` @overload -def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[_ScalarT]]: ... +def repeat[ScalarT: np.generic](a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: None = None) -> _Masked1D[ScalarT]: ... @overload -def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, +) -> _MaskedArray[ScalarT]: ... @overload -def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[Incomplete]]: ... +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None) -> _Masked1D[Incomplete]: ... @overload def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[Incomplete]: ... # keep in sync with `_core.fromnumeric.swapaxes` @overload -def swapaxes(a: _MArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _MArrayT: ... +def swapaxes[MArrayT: MaskedArray](a: MArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> MArrayT: ... @overload -def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +def swapaxes[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis1: SupportsIndex, + axis2: SupportsIndex, +) -> _MaskedArray[ScalarT]: ... @overload def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[Incomplete]: ... # NOTE: The `MaskedArray.anom` definition is specific to `MaskedArray`, so we need # additional overloads to cover the array-like input here. @overload # a: MaskedArray, dtype=None -def anom(a: _MArrayT, axis: SupportsIndex | None = None, dtype: None = None) -> _MArrayT: ... +def anom[MArrayT: MaskedArray](a: MArrayT, axis: SupportsIndex | None = None, dtype: None = None) -> MArrayT: ... @overload # a: array-like, dtype=None -def anom(a: _ArrayLike[_ScalarT], axis: SupportsIndex | None = None, dtype: None = None) -> _MaskedArray[_ScalarT]: ... +def anom[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex | None = None, + dtype: None = None, +) -> _MaskedArray[ScalarT]: ... @overload # a: unknown array-like, dtype: dtype-like (positional) -def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +def anom[ScalarT: np.generic](a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[ScalarT]) -> _MaskedArray[ScalarT]: ... @overload # a: unknown array-like, dtype: dtype-like (keyword) -def anom(a: ArrayLike, axis: SupportsIndex | None = None, *, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +def anom[ScalarT: np.generic]( + a: ArrayLike, + axis: SupportsIndex | None = None, + *, + dtype: _DTypeLike[ScalarT], +) -> _MaskedArray[ScalarT]: ... @overload # a: unknown array-like, dtype: unknown dtype-like (positional) def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... @overload # a: unknown array-like, dtype: unknown dtype-like (keyword) @@ -2715,12 +2901,26 @@ def all(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True def all(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... @overload def all( - a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., ) -> np.bool | _MaskedArray[np.bool]: ... @overload -def all(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +def all[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... @overload -def all(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +def all[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... # Keep in sync with `all` and `MaskedArray.any` @overload @@ -2731,53 +2931,99 @@ def any(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True def any(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... @overload def any( - a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., ) -> np.bool | _MaskedArray[np.bool]: ... @overload -def any(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +def any[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... @overload -def any(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +def any[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... # NOTE: The `MaskedArray.compress` definition uses its `DTypeT_co` type parameter, # which wouldn't work here for array-like inputs, so we need additional overloads. @overload -def compress( - condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: None = None, out: None = None -) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +def compress[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, +) -> _Masked1D[ScalarT]: ... @overload -def compress( - condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: _ShapeLike | None = None, out: None = None -) -> MaskedArray[_AnyShape, np.dtype[_ScalarT]]: ... +def compress[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + axis: _ShapeLike | None = None, + out: None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None = None, out: None = None) -> MaskedArray[tuple[int]]: ... +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None = None, out: None = None) -> _Masked1D[Incomplete]: ... @overload def compress( - condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, out: None = None + condition: _ArrayLikeBool_co, + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, ) -> _MaskedArray[Incomplete]: ... @overload -def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... +def compress[ArrayT: np.ndarray](condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None, out: ArrayT) -> ArrayT: ... @overload -def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... +def compress[ArrayT: np.ndarray]( + condition: _ArrayLikeBool_co, + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... # Keep in sync with `cumprod` and `MaskedArray.cumsum` @overload # out: None (default) def cumsum( - a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, ) -> _MaskedArray[Incomplete]: ... @overload # out: ndarray (positional) -def cumsum(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +def cumsum[ArrayT: np.ndarray](a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload # out: ndarray (kwarg) -def cumsum(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... +def cumsum[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... # Keep in sync with `cumsum` and `MaskedArray.cumsum` @overload # out: None (default) def cumprod( - a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, ) -> _MaskedArray[Incomplete]: ... @overload # out: ndarray (positional) -def cumprod(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +def cumprod[ArrayT: np.ndarray](a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload # out: ndarray (kwarg) -def cumprod(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... +def cumprod[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... # Keep in sync with `sum`, `prod`, `product`, and `MaskedArray.mean` @overload @@ -2789,22 +3035,22 @@ def mean( keepdims: bool | _NoValueType = ..., ) -> Incomplete: ... @overload -def mean( +def mean[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def mean( +def mean[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # Keep in sync with `mean`, `prod`, `product`, and `MaskedArray.sum` @overload @@ -2816,22 +3062,22 @@ def sum( keepdims: bool | _NoValueType = ..., ) -> Incomplete: ... @overload -def sum( +def sum[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def sum( +def sum[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # Keep in sync with `product` and `MaskedArray.prod` @overload @@ -2843,22 +3089,22 @@ def prod( keepdims: bool | _NoValueType = ..., ) -> Incomplete: ... @overload -def prod( +def prod[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def prod( +def prod[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # Keep in sync with `prod` and `MaskedArray.prod` @overload @@ -2870,22 +3116,22 @@ def product( keepdims: bool | _NoValueType = ..., ) -> Incomplete: ... @overload -def product( +def product[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def product( +def product[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # Keep in sync with `MaskedArray.trace` and `_core.fromnumeric.trace` @overload @@ -2898,24 +3144,24 @@ def trace( out: None = None, ) -> Incomplete: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, dtype: DTypeLike | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... # keep in sync with `std` and `MaskedArray.var` @overload @@ -2929,26 +3175,26 @@ def std( mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> Incomplete: ... @overload -def std( +def std[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def std( +def std[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # keep in sync with `std` and `MaskedArray.var` @overload @@ -2962,26 +3208,26 @@ def var( mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> Incomplete: ... @overload -def var( +def var[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def var( +def var[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # (a, b) minimum: _extrema_operation = ... @@ -3017,23 +3263,23 @@ def argmin( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmin( +def argmin[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def argmin( +def argmin[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # keep in sync with `argmin` @overload @@ -3055,40 +3301,40 @@ def argmax( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmax( +def argmax[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def argmax( +def argmax[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _IntLike_co, axis: None = None, out: None = None, - mode: _ModeKind = "raise" -) -> _ScalarT: ... + mode: _ModeKind = "raise", +) -> ScalarT: ... @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, mode: _ModeKind = "raise", -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def take( a: ArrayLike, @@ -3106,28 +3352,29 @@ def take( mode: _ModeKind = "raise", ) -> _MaskedArray[Any]: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... def power(a, b, third=None): ... def argsort(a, axis=..., kind=None, order=None, endwith=True, fill_value=None, *, stable=None): ... + @overload -def sort( - a: _ArrayT, +def sort[ArrayT: np.ndarray]( + a: ArrayT, axis: SupportsIndex = -1, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, @@ -3135,7 +3382,7 @@ def sort( fill_value: _ScalarLike_co | None = None, *, stable: Literal[False] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload def sort( a: ArrayLike, @@ -3147,10 +3394,12 @@ def sort( *, stable: Literal[False] | None = None, ) -> NDArray[Any]: ... + @overload -def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... +def compressed[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... @overload def compressed(x: ArrayLike) -> _Array1D[Any]: ... + def concatenate(arrays, axis=0): ... def diag(v, k=0): ... def left_shift(a, n): ... @@ -3197,18 +3446,18 @@ def _convert2ma( # keep in sync with `_core.multiarray.arange` @overload # dtype= -def arange( +def arange[ScalarT: _ArangeScalar]( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, step: _ArangeScalar | float | None = 1, *, - dtype: _DTypeLike[_ArangeScalarT], + dtype: _DTypeLike[ScalarT], device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> _Masked1D[_ArangeScalarT]: ... +) -> _Masked1D[ScalarT]: ... @overload # (int-like, int-like?, int-like?) def arange( start_or_stop: _IntLike_co, @@ -3303,8 +3552,8 @@ def arange( # based on `_core.fromnumeric.clip` @overload -def clip( - a: _ScalarT, +def clip[ScalarT: np.generic]( + a: ScalarT, a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., out: None = None, @@ -3315,10 +3564,10 @@ def clip( hardmask: bool = False, dtype: None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def clip( - a: NDArray[_ScalarT], +def clip[ScalarT: np.generic]( + a: NDArray[ScalarT], a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., out: None = None, @@ -3329,13 +3578,13 @@ def clip( hardmask: bool = False, dtype: None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def clip( +def clip[MArrayT: MaskedArray]( a: ArrayLike, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: _MArrayT, + out: MArrayT, *, min: ArrayLike | _NoValueType | None = ..., max: ArrayLike | _NoValueType | None = ..., @@ -3343,21 +3592,21 @@ def clip( hardmask: bool = False, dtype: DTypeLike | None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _MArrayT: ... +) -> MArrayT: ... @overload -def clip( +def clip[MArrayT: MaskedArray]( a: ArrayLike, a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., *, - out: _MArrayT, + out: MArrayT, min: ArrayLike | _NoValueType | None = ..., max: ArrayLike | _NoValueType | None = ..., fill_value: _FillValue | None = None, hardmask: bool = False, dtype: DTypeLike | None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _MArrayT: ... +) -> MArrayT: ... @overload def clip( a: ArrayLike, @@ -3384,29 +3633,29 @@ def empty( like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[np.float64]]: ... +) -> _Masked1D[np.float64]: ... @overload -def empty( +def empty[DTypeT: np.dtype]( shape: SupportsIndex, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], _DTypeT]: ... +) -> MaskedArray[tuple[int], DTypeT]: ... @overload -def empty( +def empty[ScalarT: np.generic]( shape: SupportsIndex, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +) -> _Masked1D[ScalarT]: ... @overload def empty( shape: SupportsIndex, @@ -3417,10 +3666,10 @@ def empty( like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int]]: ... +) -> _Masked1D[Any]: ... @overload # known shape -def empty( - shape: _AnyShapeT, +def empty[ShapeT: _Shape]( + shape: ShapeT, dtype: None = None, order: _OrderCF = "C", *, @@ -3428,32 +3677,32 @@ def empty( like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShapeT, np.dtype[np.float64]]: ... +) -> MaskedArray[ShapeT, np.dtype[np.float64]]: ... @overload -def empty( - shape: _AnyShapeT, - dtype: _DTypeT | _SupportsDType[_DTypeT], +def empty[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShapeT, _DTypeT]: ... +) -> MaskedArray[ShapeT, DTypeT]: ... @overload -def empty( - shape: _AnyShapeT, - dtype: type[_ScalarT], +def empty[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShapeT, np.dtype[_ScalarT]]: ... +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... @overload -def empty( - shape: _AnyShapeT, +def empty[ShapeT: _Shape]( + shape: ShapeT, dtype: DTypeLike | None = None, order: _OrderCF = "C", *, @@ -3461,9 +3710,9 @@ def empty( like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShapeT]: ... +) -> MaskedArray[ShapeT]: ... @overload # unknown shape -def empty( +def empty[ShapeT: _Shape]( shape: _ShapeLike, dtype: None = None, order: _OrderCF = "C", @@ -3474,27 +3723,27 @@ def empty( hardmask: bool = False, ) -> _MaskedArray[np.float64]: ... @overload -def empty( +def empty[DTypeT: np.dtype]( shape: _ShapeLike, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShape, _DTypeT]: ... +) -> MaskedArray[_AnyShape, DTypeT]: ... @overload -def empty( +def empty[ScalarT: np.generic]( shape: _ShapeLike, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def empty( shape: _ShapeLike, @@ -3508,8 +3757,8 @@ def empty( # keep in sync with `_core.multiarray.empty_like` @overload -def empty_like( - a: _MArrayT, +def empty_like[MArrayT: MaskedArray]( + a: MArrayT, /, dtype: None = None, order: _OrderKACF = "K", @@ -3517,10 +3766,10 @@ def empty_like( shape: _ShapeLike | None = None, *, device: Literal["cpu"] | None = None, -) -> _MArrayT: ... +) -> MArrayT: ... @overload -def empty_like( - a: _ArrayLike[_ScalarT], +def empty_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], /, dtype: None = None, order: _OrderKACF = "K", @@ -3528,18 +3777,18 @@ def empty_like( shape: _ShapeLike | None = None, *, device: Literal["cpu"] | None = None, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def empty_like( +def empty_like[ScalarT: np.generic]( a: Incomplete, /, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: bool = True, shape: _ShapeLike | None = None, *, device: Literal["cpu"] | None = None, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def empty_like( a: Incomplete, @@ -3571,14 +3820,14 @@ def frombuffer( like: _SupportsArrayFunc | None = None, ) -> _MaskedArray[np.float64]: ... @overload -def frombuffer( +def frombuffer[ScalarT: np.generic]( buffer: Buffer, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = -1, offset: SupportsIndex = 0, *, like: _SupportsArrayFunc | None = None, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def frombuffer( buffer: Buffer, @@ -3590,14 +3839,14 @@ def frombuffer( ) -> _MaskedArray[Incomplete]: ... # keep roughly in sync with `_core.numeric.fromfunction` -def fromfunction( - function: Callable[..., np.ndarray[_ShapeT, _DTypeT]], +def fromfunction[ShapeT: _Shape, DTypeT: np.dtype]( + function: Callable[..., np.ndarray[ShapeT, DTypeT]], shape: Sequence[int], *, dtype: DTypeLike | None = float, like: _SupportsArrayFunc | None = None, **kwargs: object, -) -> MaskedArray[_ShapeT, _DTypeT]: ... +) -> MaskedArray[ShapeT, DTypeT]: ... # keep roughly in sync with `_core.numeric.identity` @overload @@ -3610,14 +3859,14 @@ def identity( hardmask: bool = False, ) -> MaskedArray[tuple[int, int], np.dtype[np.float64]]: ... @overload -def identity( +def identity[ScalarT: np.generic]( n: int, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int, int], np.dtype[_ScalarT]]: ... +) -> MaskedArray[tuple[int, int], np.dtype[ScalarT]]: ... @overload def identity( n: int, @@ -3657,23 +3906,23 @@ def indices( hardmask: bool = False, ) -> tuple[_MaskedArray[np.intp], ...]: ... @overload -def indices( +def indices[ScalarT: np.generic]( dimensions: Sequence[int], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], sparse: Literal[False] = False, *, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def indices( +def indices[ScalarT: np.generic]( dimensions: Sequence[int], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], sparse: Literal[True], *, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> tuple[_MaskedArray[_ScalarT], ...]: ... +) -> tuple[_MaskedArray[ScalarT], ...]: ... @overload def indices( dimensions: Sequence[int], @@ -3704,13 +3953,13 @@ def indices( # keep roughly in sync with `_core.fromnumeric.squeeze` @overload -def squeeze( - a: _ArrayLike[_ScalarT], +def squeeze[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: _ShapeLike | None = None, *, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def squeeze( a: ArrayLike, diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 580dcd486c28..83f3bee761ce 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,6 +1,6 @@ from _typeshed import Incomplete from collections.abc import Sequence -from typing import SupportsIndex, TypeAlias, TypeVar, overload +from typing import SupportsIndex, overload import numpy as np from numpy import _CastingKind @@ -66,24 +66,21 @@ __all__ = [ "vstack", ] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT1 = TypeVar("_ScalarT1", bound=np.generic) -_ScalarT2 = TypeVar("_ScalarT2", bound=np.generic) -_MArrayT = TypeVar("_MArrayT", bound=MaskedArray) - -_MArray: TypeAlias = MaskedArray[_AnyShape, np.dtype[_ScalarT]] +type _MArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] ### # keep in sync with `numpy._core.shape_base.atleast_1d` @overload -def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +def atleast_1d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... @overload -def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +def atleast_1d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... @overload -def atleast_1d( - a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] -) -> tuple[_MArray[_ScalarT], ...]: ... +def atleast_1d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... @overload def atleast_1d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... @overload @@ -93,13 +90,15 @@ def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray # keep in sync with `numpy._core.shape_base.atleast_2d` @overload -def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +def atleast_2d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... @overload -def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +def atleast_2d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... @overload -def atleast_2d( - a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] -) -> tuple[_MArray[_ScalarT], ...]: ... +def atleast_2d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... @overload def atleast_2d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... @overload @@ -109,13 +108,15 @@ def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray # keep in sync with `numpy._core.shape_base.atleast_2d` @overload -def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +def atleast_3d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... @overload -def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +def atleast_3d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... @overload -def atleast_3d( - a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] -) -> tuple[_MArray[_ScalarT], ...]: ... +def atleast_3d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... @overload def atleast_3d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... @overload @@ -125,19 +126,19 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray # keep in sync with `numpy._core.shape_base.vstack` @overload -def vstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def vstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload -def vstack( +def vstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], @@ -150,19 +151,19 @@ row_stack = vstack # keep in sync with `numpy._core.shape_base.hstack` @overload -def hstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def hstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload -def hstack( +def hstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], @@ -173,35 +174,35 @@ def hstack( # keep in sync with `numpy._core.shape_base_impl.column_stack` @overload -def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +def column_stack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> _MArray[ScalarT]: ... @overload def column_stack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... # keep in sync with `numpy._core.shape_base_impl.dstack` @overload -def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +def dstack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> _MArray[ScalarT]: ... @overload def dstack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... # keep in sync with `numpy._core.shape_base.stack` @overload -def stack( - arrays: Sequence[_ArrayLike[_ScalarT]], +def stack[ScalarT: np.generic]( + arrays: Sequence[_ArrayLike[ScalarT]], axis: SupportsIndex = 0, out: None = None, *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload -def stack( +def stack[ScalarT: np.generic]( arrays: Sequence[ArrayLike], axis: SupportsIndex = 0, out: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], @@ -212,33 +213,33 @@ def stack( casting: _CastingKind = "same_kind" ) -> _MArray[Incomplete]: ... @overload -def stack( +def stack[MArrayT: MaskedArray]( arrays: Sequence[ArrayLike], axis: SupportsIndex, - out: _MArrayT, + out: MArrayT, *, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _MArrayT: ... +) -> MArrayT: ... @overload -def stack( +def stack[MArrayT: MaskedArray]( arrays: Sequence[ArrayLike], axis: SupportsIndex = 0, *, - out: _MArrayT, + out: MArrayT, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _MArrayT: ... +) -> MArrayT: ... # keep in sync with `numpy._core.shape_base_impl.hsplit` @overload -def hsplit(ary: _ArrayLike[_ScalarT], indices_or_sections: _ShapeLike) -> list[_MArray[_ScalarT]]: ... +def hsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[_MArray[ScalarT]]: ... @overload def hsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[_MArray[Incomplete]]: ... # keep in sync with `numpy._core.twodim_base_impl.hsplit` @overload -def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _MArray[_ScalarT]: ... +def diagflat[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _MArray[ScalarT]: ... @overload def diagflat(v: ArrayLike, k: int = 0) -> _MArray[Incomplete]: ... diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index 53ac58c85d2e..f446dbf1c4b9 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -1,4 +1,4 @@ -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload import numpy as np import numpy.typing as npt @@ -505,9 +505,8 @@ __all__ += np.__all__ ### -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_ScalarT]] -_Order: TypeAlias = Literal["C", "F"] +type _Matrix[ScalarT: np.generic] = np.matrix[tuple[int, int], np.dtype[ScalarT]] +type _Order = Literal["C", "F"] ### @@ -515,7 +514,7 @@ _Order: TypeAlias = Literal["C", "F"] @overload def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def empty[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -523,7 +522,7 @@ def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def ones[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -531,7 +530,7 @@ def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C" @overload def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def zeros[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -539,7 +538,7 @@ def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... @overload -def identity(n: int, dtype: _DTypeLike[_ScalarT]) -> _Matrix[_ScalarT]: ... +def identity[ScalarT: np.generic](n: int, dtype: _DTypeLike[ScalarT]) -> _Matrix[ScalarT]: ... @overload def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... @@ -553,9 +552,9 @@ def eye( order: _Order = "C", ) -> _Matrix[np.float64]: ... @overload -def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def eye[ScalarT: np.generic](n: int, M: int | None, k: int, dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload -def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def eye[ScalarT: np.generic](n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike | None = ..., order: _Order = "C") -> _Matrix[Any]: ... @@ -573,8 +572,8 @@ def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... # @overload -def repmat(a: _Matrix[_ScalarT], m: int, n: int) -> _Matrix[_ScalarT]: ... +def repmat[ScalarT: np.generic](a: _Matrix[ScalarT], m: int, n: int) -> _Matrix[ScalarT]: ... @overload -def repmat(a: _ArrayLike[_ScalarT], m: int, n: int) -> npt.NDArray[_ScalarT]: ... +def repmat[ScalarT: np.generic](a: _ArrayLike[ScalarT], m: int, n: int) -> npt.NDArray[ScalarT]: ... @overload def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index 40c747d1ae3d..55b9d795078c 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -1,7 +1,7 @@ from _typeshed import Incomplete from collections.abc import Mapping, Sequence from types import EllipsisType -from typing import Any, ClassVar, Literal as L, Self, SupportsIndex, TypeAlias, overload +from typing import Any, ClassVar, Literal as L, Self, SupportsIndex, overload from typing_extensions import TypeVar import numpy as np @@ -17,17 +17,15 @@ from numpy._typing import ( __all__ = ["asmatrix", "bmat", "matrix"] -_T = TypeVar("_T") -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) -_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) _ShapeT_co = TypeVar("_ShapeT_co", bound=_2D, default=_2D, covariant=True) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -_2D: TypeAlias = tuple[int, int] -_Matrix: TypeAlias = matrix[_2D, np.dtype[_ScalarT]] -_ToIndex1: TypeAlias = slice | EllipsisType | NDArray[np.integer | np.bool] | _NestedSequence[int] | None -_ToIndex2: TypeAlias = tuple[_ToIndex1, _ToIndex1 | SupportsIndex] | tuple[_ToIndex1 | SupportsIndex, _ToIndex1] +type _2D = tuple[int, int] +type _Matrix[ScalarT: np.generic] = matrix[_2D, np.dtype[ScalarT]] +type _ToIndex1 = slice | EllipsisType | NDArray[np.integer | np.bool] | _NestedSequence[int] | None +type _ToIndex2 = tuple[_ToIndex1, _ToIndex1 | SupportsIndex] | tuple[_ToIndex1 | SupportsIndex, _ToIndex1] + +### class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] @@ -57,7 +55,7 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): # def __pow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # keep in sync with `prod` and `mean` @overload # type: ignore[override] @@ -65,9 +63,9 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... @overload - def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def sum[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... @overload - def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def sum[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `sum` and `mean` @overload # type: ignore[override] @@ -75,9 +73,9 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... @overload - def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def prod[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... @overload - def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def prod[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `sum` and `prod` @overload # type: ignore[override] @@ -85,9 +83,9 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... @overload - def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def mean[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... @overload - def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def mean[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `var` @overload # type: ignore[override] @@ -95,11 +93,11 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def std(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... @overload - def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + def std[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT, ddof: float = 0) -> OutT: ... @overload - def std( # pyright: ignore[reportIncompatibleMethodOverride] - self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 - ) -> _ArrayT: ... + def std[OutT: np.ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT, ddof: float = 0 + ) -> OutT: ... # keep in sync with `std` @overload # type: ignore[override] @@ -107,11 +105,11 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def var(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... @overload - def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + def var[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT, ddof: float = 0) -> OutT: ... @overload - def var( # pyright: ignore[reportIncompatibleMethodOverride] - self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 - ) -> _ArrayT: ... + def var[OutT: np.ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT, ddof: float = 0 + ) -> OutT: ... # keep in sync with `all` @overload # type: ignore[override] @@ -119,9 +117,9 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def any(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... @overload - def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def any[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def any[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `any` @overload # type: ignore[override] @@ -129,70 +127,70 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def all(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... @overload - def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def all[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def all[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `min` and `ptp` @overload # type: ignore[override] - def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + def max[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... @overload def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... @overload - def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def max[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def max[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `max` and `ptp` @overload # type: ignore[override] - def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + def min[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... @overload def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... @overload - def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def min[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def min[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `max` and `min` @overload - def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + def ptp[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... @overload def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... @overload - def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def ptp[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def ptp[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `argmin` @overload # type: ignore[override] - def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + def argmax[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> np.intp: ... @overload def argmax(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... @overload - def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + def argmax[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def argmax[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `argmax` @overload # type: ignore[override] - def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + def argmin[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> np.intp: ... @overload def argmin(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... @overload - def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + def argmin[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def argmin[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # the second overload handles the (rare) case that the matrix is not 2-d @overload - def tolist(self: _Matrix[np.generic[_T]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] + def tolist[T](self: _Matrix[np.generic[T]]) -> list[list[T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] @overload def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] # these three methods will at least return a `2-d` array of shape (1, n) def squeeze(self, /, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... - def ravel(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def flatten(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def ravel(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] + def flatten(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # matrix.T is inherited from _ScalarOrArrayCommon def getT(self) -> Self: ... diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index b16b06c8a734..2fdfd24db7a9 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -1,16 +1,7 @@ import abc import decimal from collections.abc import Iterator, Sequence -from typing import ( - Any, - ClassVar, - Generic, - Literal, - Self, - SupportsIndex, - TypeAlias, - overload, -) +from typing import Any, ClassVar, Generic, Literal, Self, SupportsIndex, overload from typing_extensions import TypeIs, TypeVar import numpy as np @@ -38,10 +29,12 @@ from ._polytypes import ( __all__ = ["ABCPolyBase"] _NameT_co = TypeVar("_NameT_co", bound=str | None, default=str | None, covariant=True) -_PolyT = TypeVar("_PolyT", bound=ABCPolyBase) -_AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co -class ABCPolyBase(Generic[_NameT_co], abc.ABC): +type _AnyOther = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co + +### + +class ABCPolyBase(Generic[_NameT_co], abc.ABC): # noqa: UP046 __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] __array_ufunc__: ClassVar[None] = None maxpower: ClassVar[Literal[100]] = 100 @@ -76,7 +69,7 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): # @overload - def __call__(self, /, arg: _PolyT) -> _PolyT: ... + def __call__[PolyT: ABCPolyBase](self, /, arg: PolyT) -> PolyT: ... @overload def __call__(self, /, arg: _FloatLike_co | decimal.Decimal) -> np.float64 | Any: ... @overload @@ -134,22 +127,22 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): # @overload - def convert( + def convert[PolyT: ABCPolyBase]( self, /, domain: _SeriesLikeCoef_co | None, - kind: type[_PolyT], + kind: type[PolyT], window: _SeriesLikeCoef_co | None = None, - ) -> _PolyT: ... + ) -> PolyT: ... @overload - def convert( + def convert[PolyT: ABCPolyBase]( self, /, domain: _SeriesLikeCoef_co | None = None, *, - kind: type[_PolyT], + kind: type[PolyT], window: _SeriesLikeCoef_co | None = None, - ) -> _PolyT: ... + ) -> PolyT: ... @overload def convert( self, diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index b5a603b6ca85..46d17ac6353c 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -1,5 +1,3 @@ -# ruff: noqa: PYI046 - from collections.abc import Sequence from typing import ( Any, @@ -9,8 +7,6 @@ from typing import ( Self, SupportsIndex, SupportsInt, - TypeAlias, - TypeVar, overload, type_check_only, ) @@ -19,75 +15,75 @@ import numpy as np import numpy.typing as npt from numpy._typing import ( _ArrayLikeComplex_co, - # array-likes _ArrayLikeFloat_co, _ArrayLikeNumber_co, _ComplexLike_co, _FloatLike_co, - # scalar-likes _IntLike_co, _NestedSequence, _NumberLike_co, _SupportsArray, ) -_T = TypeVar("_T") -_T_contra = TypeVar("_T_contra", contravariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.number | np.bool | np.object_) - # compatible with e.g. int, float, complex, Decimal, Fraction, and ABCPolyBase @type_check_only -class _SupportsCoefOps(Protocol[_T_contra]): +class _SupportsCoefOps[T](Protocol): def __eq__(self, x: object, /) -> bool: ... def __ne__(self, x: object, /) -> bool: ... def __neg__(self, /) -> Self: ... def __pos__(self, /) -> Self: ... - def __add__(self, x: _T_contra, /) -> Self: ... - def __sub__(self, x: _T_contra, /) -> Self: ... - def __mul__(self, x: _T_contra, /) -> Self: ... - def __pow__(self, x: _T_contra, /) -> Self | float: ... - def __radd__(self, x: _T_contra, /) -> Self: ... - def __rsub__(self, x: _T_contra, /) -> Self: ... - def __rmul__(self, x: _T_contra, /) -> Self: ... + def __add__(self, x: T, /) -> Self: ... + def __sub__(self, x: T, /) -> Self: ... + def __mul__(self, x: T, /) -> Self: ... + def __pow__(self, x: T, /) -> Self | float: ... + def __radd__(self, x: T, /) -> Self: ... + def __rsub__(self, x: T, /) -> Self: ... + def __rmul__(self, x: T, /) -> Self: ... -_Series: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +type _PolyScalar = np.bool | np.number | np.object_ -_FloatSeries: TypeAlias = _Series[np.floating] -_ComplexSeries: TypeAlias = _Series[np.complexfloating] -_ObjectSeries: TypeAlias = _Series[np.object_] -_CoefSeries: TypeAlias = _Series[np.inexact | np.object_] +type _Series[ScalarT: _PolyScalar] = np.ndarray[tuple[int], np.dtype[ScalarT]] -_FloatArray: TypeAlias = npt.NDArray[np.floating] -_ComplexArray: TypeAlias = npt.NDArray[np.complexfloating] -_ObjectArray: TypeAlias = npt.NDArray[np.object_] -_CoefArray: TypeAlias = npt.NDArray[np.inexact | np.object_] +type _FloatSeries = _Series[np.floating] +type _ComplexSeries = _Series[np.complexfloating] +type _ObjectSeries = _Series[np.object_] +type _CoefSeries = _Series[np.inexact | np.object_] -_Tuple2: TypeAlias = tuple[_T, _T] -_Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_ScalarT]] -_Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_ScalarT]] +type _FloatArray = npt.NDArray[np.floating] +type _ComplexArray = npt.NDArray[np.complexfloating] +type _ObjectArray = npt.NDArray[np.object_] +type _CoefArray = npt.NDArray[np.inexact | np.object_] -_AnyInt: TypeAlias = SupportsInt | SupportsIndex +type _Tuple2[_T] = tuple[_T, _T] +type _Array1[ScalarT: _PolyScalar] = np.ndarray[tuple[Literal[1]], np.dtype[ScalarT]] +type _Array2[ScalarT: _PolyScalar] = np.ndarray[tuple[Literal[2]], np.dtype[ScalarT]] -_CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps[Any] -_CoefLike_co: TypeAlias = _NumberLike_co | _CoefObjectLike_co +type _AnyInt = SupportsInt | SupportsIndex + +type _CoefObjectLike_co = np.object_ | _SupportsCoefOps[Any] +type _CoefLike_co = _NumberLike_co | _CoefObjectLike_co # The term "series" is used here to refer to 1-d arrays of numeric scalars. -_SeriesLikeBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[bool | np.bool] -_SeriesLikeInt_co: TypeAlias = _SupportsArray[np.dtype[np.integer | np.bool]] | Sequence[_IntLike_co] -_SeriesLikeFloat_co: TypeAlias = _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] | Sequence[_FloatLike_co] -_SeriesLikeComplex_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool]] | Sequence[_ComplexLike_co] -_SeriesLikeObject_co: TypeAlias = _SupportsArray[np.dtype[np.object_]] | Sequence[_CoefObjectLike_co] -_SeriesLikeCoef_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool | np.object_]] | Sequence[_CoefLike_co] +type _SeriesLikeBool_co = _SupportsArray[np.dtype[np.bool]] | Sequence[bool | np.bool] +type _SeriesLikeInt_co = _SupportsArray[np.dtype[np.integer | np.bool]] | Sequence[_IntLike_co] +type _SeriesLikeFloat_co = _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] | Sequence[_FloatLike_co] +type _SeriesLikeComplex_co = _SupportsArray[np.dtype[np.number | np.bool]] | Sequence[_ComplexLike_co] +type _SeriesLikeObject_co = _SupportsArray[np.dtype[np.object_]] | Sequence[_CoefObjectLike_co] +type _SeriesLikeCoef_co = _SupportsArray[np.dtype[_PolyScalar]] | Sequence[_CoefLike_co] + +type _ArrayLikeCoefObject_co = _CoefObjectLike_co | _SeriesLikeObject_co | _NestedSequence[_SeriesLikeObject_co] +type _ArrayLikeCoef_co = npt.NDArray[_PolyScalar] | _ArrayLikeNumber_co | _ArrayLikeCoefObject_co -_ArrayLikeCoefObject_co: TypeAlias = _CoefObjectLike_co | _SeriesLikeObject_co | _NestedSequence[_SeriesLikeObject_co] -_ArrayLikeCoef_co: TypeAlias = npt.NDArray[np.number | np.bool | np.object_] | _ArrayLikeNumber_co | _ArrayLikeCoefObject_co +type _Line[ScalarT: _PolyScalar] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Companion[ScalarT: _PolyScalar] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] -_Line: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +type _AnyDegrees = Sequence[SupportsIndex] +type _FullFitResult = Sequence[np.inexact | np.int32] @type_check_only class _FuncLine(Protocol): @overload - def __call__(self, /, off: _ScalarT, scl: _ScalarT) -> _Line[_ScalarT]: ... + def __call__[ScalarT: _PolyScalar](self, /, off: ScalarT, scl: ScalarT) -> _Line[ScalarT]: ... @overload def __call__(self, /, off: int, scl: int) -> _Line[np.int_]: ... @overload @@ -308,8 +304,6 @@ class _FuncVander(Protocol): @overload def __call__(self, /, x: npt.ArrayLike, deg: SupportsIndex) -> _CoefArray: ... -_AnyDegrees: TypeAlias = Sequence[SupportsIndex] - @type_check_only class _FuncVander2D(Protocol): @overload @@ -360,8 +354,6 @@ class _FuncVander3D(Protocol): deg: _AnyDegrees, ) -> _CoefArray: ... -_FullFitResult: TypeAlias = Sequence[np.inexact | np.int32] - @type_check_only class _FuncFit(Protocol): @overload @@ -476,8 +468,6 @@ class _FuncRoots(Protocol): @overload def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... -_Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] - @type_check_only class _FuncCompanion(Protocol): @overload diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 1cfb27829b2e..157b0e5d0f46 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,15 +1,6 @@ from _typeshed import ConvertibleToInt from collections.abc import Callable, Iterable -from typing import ( - Any, - ClassVar, - Concatenate, - Final, - Literal as L, - Self, - TypeVar, - overload, -) +from typing import Any, ClassVar, Concatenate, Final, Literal as L, Self, overload import numpy as np import numpy.typing as npt @@ -81,15 +72,14 @@ __all__ = [ "chebinterpolate", ] -_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) -_CoefScalarT = TypeVar("_CoefScalarT", bound=np.number | np.bool | np.object_) +### -def _cseries_to_zseries(c: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_to_cseries(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_mul(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_div(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_der(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_int(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _cseries_to_zseries[ScalarT: np.number | np.object_](c: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_to_cseries[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_mul[ScalarT: np.number | np.object_](z1: npt.NDArray[ScalarT], z2: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_div[ScalarT: np.number | np.object_](z1: npt.NDArray[ScalarT], z2: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_der[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_int[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... poly2cheb: Final[_FuncPoly2Ortho] = ... cheb2poly: Final[_FuncUnOp] = ... @@ -133,17 +123,17 @@ def chebinterpolate( args: tuple[()] = (), ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... @overload -def chebinterpolate( - func: Callable[[npt.NDArray[np.float64]], _CoefScalarT], +def chebinterpolate[CoefScalarT: np.number | np.bool | np.object_]( + func: Callable[[npt.NDArray[np.float64]], CoefScalarT], deg: _IntLike_co, args: tuple[()] = (), -) -> npt.NDArray[_CoefScalarT]: ... +) -> npt.NDArray[CoefScalarT]: ... @overload -def chebinterpolate( - func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefScalarT], +def chebinterpolate[CoefScalarT: np.number | np.bool | np.object_]( + func: Callable[Concatenate[npt.NDArray[np.float64], ...], CoefScalarT], deg: _IntLike_co, args: Iterable[Any], -) -> npt.NDArray[_CoefScalarT]: ... +) -> npt.NDArray[CoefScalarT]: ... class Chebyshev(ABCPolyBase[L["T"]]): basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 04b8238735dd..60f4af5a1fd7 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,4 +1,4 @@ -from typing import Any, ClassVar, Final, Literal as L, TypeVar +from typing import Any, ClassVar, Final, Literal as L import numpy as np from numpy._typing import _Shape @@ -63,8 +63,6 @@ __all__ = [ "hermweight", ] -_ShapeT = TypeVar("_ShapeT", bound=_Shape) - poly2herm: Final[_FuncPoly2Ortho] = ... herm2poly: Final[_FuncUnOp] = ... @@ -95,7 +93,10 @@ hermfit: Final[_FuncFit] = ... hermcompanion: Final[_FuncCompanion] = ... hermroots: Final[_FuncRoots] = ... -def _normed_hermite_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... +def _normed_hermite_n[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.float64]], + n: int, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... hermgauss: Final[_FuncGauss] = ... hermweight: Final[_FuncWeight] = ... diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index b996de52c6da..6997c8a381ef 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,4 +1,4 @@ -from typing import Any, ClassVar, Final, Literal as L, TypeVar +from typing import Any, ClassVar, Final, Literal as L import numpy as np from numpy._typing import _Shape @@ -63,8 +63,6 @@ __all__ = [ "hermeweight", ] -_ShapeT = TypeVar("_ShapeT", bound=_Shape) - poly2herme: Final[_FuncPoly2Ortho] = ... herme2poly: Final[_FuncUnOp] = ... @@ -95,7 +93,10 @@ hermefit: Final[_FuncFit] = ... hermecompanion: Final[_FuncCompanion] = ... hermeroots: Final[_FuncRoots] = ... -def _normed_hermite_e_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... +def _normed_hermite_e_n[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.float64]], + n: int, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... hermegauss: Final[_FuncGauss] = ... hermeweight: Final[_FuncWeight] = ... diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 79ca317c12b0..fbaaf7d22880 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,14 +1,5 @@ from collections.abc import Callable, Iterable, Sequence -from typing import ( - Final, - Literal, - Protocol, - SupportsIndex, - TypeAlias, - TypeVar, - overload, - type_check_only, -) +from typing import Final, Literal, Protocol, SupportsIndex, overload, type_check_only import numpy as np import numpy.typing as npt @@ -44,16 +35,13 @@ from ._polytypes import ( __all__ = ["as_series", "format_float", "getdomain", "mapdomain", "mapparms", "trimcoef", "trimseq"] -_T = TypeVar("_T") -_SeqT = TypeVar("_SeqT", bound=_CoefArray | Sequence[_CoefLike_co]) - -_AnyLineF: TypeAlias = Callable[[float, float], _CoefArray] -_AnyMulF: TypeAlias = Callable[[np.ndarray | list[int], np.ndarray], _CoefArray] -_AnyVanderF: TypeAlias = Callable[[np.ndarray, int], _CoefArray] +type _AnyLineF = Callable[[float, float], _CoefArray] +type _AnyMulF = Callable[[np.ndarray | list[int], np.ndarray], _CoefArray] +type _AnyVanderF = Callable[[np.ndarray, int], _CoefArray] @type_check_only -class _ValFunc(Protocol[_T]): - def __call__(self, x: np.ndarray, c: _T, /, *, tensor: bool = True) -> _T: ... +class _ValFunc[T](Protocol): + def __call__(self, x: np.ndarray, c: T, /, *, tensor: bool = True) -> T: ... ### @@ -77,7 +65,7 @@ def as_series(alist: Iterable[_SeriesLikeComplex_co | complex], trim: bool = Tru def as_series(alist: Iterable[_SeriesLikeCoef_co | object], trim: bool = True) -> list[_ObjectSeries]: ... # -def trimseq(seq: _SeqT) -> _SeqT: ... +def trimseq[SeqT: _CoefArray | Sequence[_CoefLike_co]](seq: SeqT) -> SeqT: ... # @overload @@ -219,10 +207,10 @@ def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeObject_co) def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeCoef_co) -> _CoefSeries: ... # keep in sync with `_gridnd` -def _valnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... +def _valnd[T](val_f: _ValFunc[T], c: T, *args: npt.ArrayLike) -> T: ... # keep in sync with `_valnd` -def _gridnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... +def _gridnd[T](val_f: _ValFunc[T], c: T, *args: npt.ArrayLike) -> T: ... # keep in sync with `_polytypes._FuncBinOp` @overload diff --git a/numpy/random/_common.pyi b/numpy/random/_common.pyi index b667fd1c82eb..417387612014 100644 --- a/numpy/random/_common.pyi +++ b/numpy/random/_common.pyi @@ -1,11 +1,12 @@ +from _typeshed import Incomplete from collections.abc import Callable -from typing import Any, NamedTuple, TypeAlias +from typing import NamedTuple import numpy as np -__all__: list[str] = ["interface"] +__all__ = ["interface"] -_CDataVoidPointer: TypeAlias = Any +type _CDataVoidPointer = Incomplete # currently not expressible class interface(NamedTuple): state_address: int diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 9e857fb000ef..9b0e5c331b57 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,8 +1,8 @@ from collections.abc import Callable, MutableSequence -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload import numpy as np -from numpy import dtype, float32, float64, int64 +from numpy import float32, float64, int64 from numpy._typing import ( ArrayLike, DTypeLike, @@ -23,7 +23,6 @@ from numpy._typing import ( _IntPCodes, _ShapeLike, _SingleCodes, - _SupportsDType, _UInt8Codes, _UInt16Codes, _UInt32Codes, @@ -32,24 +31,10 @@ from numpy._typing import ( ) from numpy.random import BitGenerator, RandomState, SeedSequence -_IntegerT = TypeVar("_IntegerT", bound=np.integer) +type _DTypeLikeFloat32 = _DTypeLike[np.float32] | _Float32Codes | _SingleCodes +type _DTypeLikeFloat64 = type[float] | _DTypeLike[np.float64] | _Float64Codes | _DoubleCodes -_DTypeLikeFloat32: TypeAlias = ( - dtype[float32] - | _SupportsDType[dtype[float32]] - | type[float32] - | _Float32Codes - | _SingleCodes -) - -_DTypeLikeFloat64: TypeAlias = ( - dtype[float64] - | _SupportsDType[dtype[float64]] - | type[float] - | type[float64] - | _Float64Codes - | _DoubleCodes -) +### class Generator: def __init__(self, bit_generator: BitGenerator) -> None: ... @@ -57,10 +42,7 @@ class Generator: def __str__(self) -> str: ... def __getstate__(self) -> None: ... def __setstate__(self, state: dict[str, Any] | None) -> None: ... - def __reduce__(self) -> tuple[ - Callable[[BitGenerator], Generator], - tuple[BitGenerator], - None]: ... + def __reduce__(self) -> tuple[Callable[[BitGenerator], Generator], tuple[BitGenerator], None]: ... @property def bit_generator(self) -> BitGenerator: ... def spawn(self, n_children: int) -> list[Generator]: ... @@ -238,15 +220,15 @@ class Generator: endpoint: bool = False, ) -> np.bool: ... @overload - def integers( + def integers[ScalarT: np.integer]( self, low: int, high: int | None = None, size: None = None, *, - dtype: _DTypeLike[_IntegerT], + dtype: _DTypeLike[ScalarT], endpoint: bool = False, - ) -> _IntegerT: ... + ) -> ScalarT: ... @overload def integers( self, @@ -267,15 +249,15 @@ class Generator: endpoint: bool = False, ) -> NDArray[np.bool]: ... @overload - def integers( + def integers[ScalarT: np.integer]( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, *, - dtype: _DTypeLike[_IntegerT], + dtype: _DTypeLike[ScalarT], endpoint: bool = False, - ) -> NDArray[_IntegerT]: ... + ) -> NDArray[ScalarT]: ... @overload def integers( self, diff --git a/numpy/random/_pickle.pyi b/numpy/random/_pickle.pyi index b8b1b7bcf63b..b0aa143801ba 100644 --- a/numpy/random/_pickle.pyi +++ b/numpy/random/_pickle.pyi @@ -1,5 +1,5 @@ from collections.abc import Callable -from typing import Final, Literal, TypedDict, TypeVar, overload, type_check_only +from typing import Final, Literal, TypedDict, overload, type_check_only from numpy.random._generator import Generator from numpy.random._mt19937 import MT19937 @@ -9,8 +9,6 @@ from numpy.random._sfc64 import SFC64 from numpy.random.bit_generator import BitGenerator from numpy.random.mtrand import RandomState -_T = TypeVar("_T", bound=BitGenerator) - @type_check_only class _BitGenerators(TypedDict): MT19937: type[MT19937] @@ -19,6 +17,8 @@ class _BitGenerators(TypedDict): Philox: type[Philox] SFC64: type[SFC64] +### + BitGenerators: Final[_BitGenerators] = ... @overload @@ -32,7 +32,7 @@ def __bit_generator_ctor(bit_generator: Literal["Philox"]) -> Philox: ... @overload def __bit_generator_ctor(bit_generator: Literal["SFC64"]) -> SFC64: ... @overload -def __bit_generator_ctor(bit_generator: type[_T]) -> _T: ... +def __bit_generator_ctor[BitGeneratorT: BitGenerator](bit_generator: type[BitGeneratorT]) -> BitGeneratorT: ... def __generator_ctor( bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index ee4499dee1f3..3c2069aba408 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -8,7 +8,6 @@ from typing import ( Literal, NamedTuple, Self, - TypeAlias, TypedDict, overload, type_check_only, @@ -29,7 +28,7 @@ __all__ = ["BitGenerator", "SeedSequence"] ### -_DTypeLikeUint_: TypeAlias = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes +type _DTypeLikeUint_ = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes @type_check_only class _SeedSeqState(TypedDict): diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 016bbecf4604..5cb7f746380d 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -16,11 +16,8 @@ from typing import ( Generic, Literal as L, NoReturn, - ParamSpec, Self, SupportsIndex, - TypeAlias, - TypeVarTuple, overload, type_check_only, ) @@ -91,25 +88,20 @@ __all__ = [ # noqa: RUF022 ### -_T = TypeVar("_T") -_Ts = TypeVarTuple("_Ts") -_Tss = ParamSpec("_Tss") -_ET = TypeVar("_ET", bound=BaseException, default=BaseException) -_FT = TypeVar("_FT", bound=Callable[..., Any]) _W_co = TypeVar("_W_co", bound=_WarnLog | None, default=_WarnLog | None, covariant=True) -_StrLike: TypeAlias = str | bytes -_RegexLike: TypeAlias = _StrLike | Pattern[Any] -_NumericArrayLike: TypeAlias = _ArrayLikeNumber_co | _ArrayLikeObject_co +type _StrLike = str | bytes +type _RegexLike = _StrLike | Pattern[Any] +type _NumericArrayLike = _ArrayLikeNumber_co | _ArrayLikeObject_co -_ExceptionSpec: TypeAlias = type[_ET] | tuple[type[_ET], ...] -_WarningSpec: TypeAlias = type[Warning] -_WarnLog: TypeAlias = list[warnings.WarningMessage] -_ToModules: TypeAlias = Iterable[types.ModuleType] +type _ExceptionSpec[ExceptionT: BaseException] = type[ExceptionT] | tuple[type[ExceptionT], ...] +type _WarningSpec = type[Warning] +type _WarnLog = list[warnings.WarningMessage] +type _ToModules = Iterable[types.ModuleType] # Must return a bool or an ndarray/generic type that is supported by `np.logical_and.reduce` -_ComparisonFunc: TypeAlias = Callable[ - [NDArray[Any], NDArray[Any]], +type _ComparisonFunc = Callable[ + [np.ndarray, np.ndarray], bool | np.bool | np.number | NDArray[np.bool | np.number | np.object_], ] @@ -158,7 +150,7 @@ class suppress_warnings: def __init__(self, /, forwarding_rule: L["always", "module", "once", "location"] = "always") -> None: ... def __enter__(self) -> Self: ... def __exit__(self, cls: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, /) -> None: ... - def __call__(self, /, func: _FT) -> _FT: ... + def __call__[FuncT: Callable[..., Any]](self, /, func: FuncT) -> FuncT: ... # def filter(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> None: ... @@ -289,36 +281,36 @@ def assert_string_equal(actual: str, desired: str) -> None: ... # @overload -def assert_raises( - exception_class: _ExceptionSpec[_ET], +def assert_raises[ExceptionT: BaseException]( + exception_class: _ExceptionSpec[ExceptionT], /, *, msg: str | None = None, -) -> unittest.case._AssertRaisesContext[_ET]: ... +) -> unittest.case._AssertRaisesContext[ExceptionT]: ... @overload -def assert_raises( - exception_class: _ExceptionSpec, - callable: Callable[_Tss, Any], +def assert_raises[**Tss]( + exception_class: _ExceptionSpec[BaseException], + callable: Callable[Tss, Any], /, - *args: _Tss.args, - **kwargs: _Tss.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> None: ... # @overload -def assert_raises_regex( - exception_class: _ExceptionSpec[_ET], +def assert_raises_regex[ExceptionT: BaseException]( + exception_class: _ExceptionSpec[ExceptionT], expected_regexp: _RegexLike, *, msg: str | None = None, -) -> unittest.case._AssertRaisesContext[_ET]: ... +) -> unittest.case._AssertRaisesContext[ExceptionT]: ... @overload -def assert_raises_regex( - exception_class: _ExceptionSpec, +def assert_raises_regex[**Tss]( + exception_class: _ExceptionSpec[BaseException], expected_regexp: _RegexLike, - callable: Callable[_Tss, Any], - *args: _Tss.args, - **kwargs: _Tss.kwargs, + callable: Callable[Tss, Any], + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> None: ... # @@ -368,19 +360,24 @@ def assert_array_max_ulp( def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... @overload @deprecated("Please use warnings.catch_warnings or pytest.warns instead") -def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... +def assert_warns[**Tss, ReturnT]( + warning_class: _WarningSpec, + func: Callable[Tss, ReturnT], + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> ReturnT: ... # @overload def assert_no_warnings() -> _GeneratorContextManager[None]: ... @overload -def assert_no_warnings(func: Callable[_Tss, _T], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... +def assert_no_warnings[**Tss, ReturnT](func: Callable[Tss, ReturnT], /, *args: Tss.args, **kwargs: Tss.kwargs) -> ReturnT: ... # @overload def assert_no_gc_cycles() -> _GeneratorContextManager[None]: ... @overload -def assert_no_gc_cycles(func: Callable[_Tss, Any], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> None: ... +def assert_no_gc_cycles[**Tss](func: Callable[Tss, Any], /, *args: Tss.args, **kwargs: Tss.kwargs) -> None: ... ### @@ -479,23 +476,23 @@ def run_threaded( prepare_args: None = None, ) -> None: ... @overload -def run_threaded( - func: Callable[[*_Ts], None], +def run_threaded[*Ts]( + func: Callable[[*Ts], None], max_workers: int, pass_count: bool, pass_barrier: bool, outer_iterations: int, - prepare_args: tuple[*_Ts], + prepare_args: tuple[*Ts], ) -> None: ... @overload -def run_threaded( - func: Callable[[*_Ts], None], +def run_threaded[*Ts]( + func: Callable[[*Ts], None], max_workers: int = 8, pass_count: bool = False, pass_barrier: bool = False, outer_iterations: int = 1, *, - prepare_args: tuple[*_Ts], + prepare_args: tuple[*Ts], ) -> None: ... # diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index fb78eb077c44..21aca2bc69ef 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -39,7 +39,7 @@ """ from collections.abc import Callable, Iterable -from typing import TYPE_CHECKING, Final, TypeAlias, cast +from typing import TYPE_CHECKING, Final, cast import numpy as np @@ -115,7 +115,7 @@ def plugin(version: str) -> type: else: - _HookFunc: TypeAlias = Callable[[AnalyzeTypeContext], mypy.types.Type] + type _HookFunc = Callable[[AnalyzeTypeContext], mypy.types.Type] def _hook(ctx: AnalyzeTypeContext) -> mypy.types.Type: """Replace a type-alias with a concrete ``NBitBase`` subclass.""" diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 084ae971bdd0..06159f6e979e 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -1,16 +1,13 @@ -from typing import TypeAlias, TypeVar - import numpy as np import numpy.typing as npt from numpy._typing import _AnyShape -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, np.dtype[_ScalarT]] +type _MArray[ScalarT: np.generic] = np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]] MAR_1d_f8: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] -MAR_b: MaskedArray[np.bool] -MAR_c: MaskedArray[np.complex128] -MAR_td64: MaskedArray[np.timedelta64] +MAR_b: _MArray[np.bool] +MAR_c: _MArray[np.complex128] +MAR_td64: _MArray[np.timedelta64] AR_b: npt.NDArray[np.bool] diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 3ccea66861eb..b5d6b0d38ef0 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -1,12 +1,11 @@ import datetime as dt -from typing import Any, TypeAlias, TypeVar, cast +from typing import Any, cast import numpy as np import numpy.typing as npt from numpy._typing import _Shape -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] +type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] # mypy: disable-error-code=no-untyped-call diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index b7daf3397f9d..36440fca9487 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,17 +1,15 @@ -import sys from collections import deque from pathlib import Path -from typing import Any, Generic, TypeVar, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt +from numpy._typing import _AnyShape -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +class SubClass[ScalarT: np.generic](np.ndarray[_AnyShape, np.dtype[ScalarT]]): ... -class SubClass(npt.NDArray[_ScalarT_co]): ... - -class IntoSubClass(Generic[_ScalarT_co]): - def __array__(self) -> SubClass[_ScalarT_co]: ... +class IntoSubClass[ScalarT: np.generic]: + def __array__(self) -> SubClass[ScalarT]: ... i8: np.int64 @@ -268,10 +266,9 @@ assert_type(np.stack([A, A], out=B), SubClass[np.float64]) assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.block(C), npt.NDArray[Any]) -if sys.version_info >= (3, 12): - from collections.abc import Buffer +from collections.abc import Buffer - def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... +def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... - buffer: Buffer - assert_type(create_array(buffer), npt.NDArray[Any]) +buffer: Buffer +assert_type(create_array(buffer), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index 49986bd5d12c..809f77d9736d 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -1,10 +1,10 @@ -from typing import Literal as L, TypeAlias, assert_type +from typing import Literal as L, assert_type import numpy as np import numpy.typing as npt -FalseType: TypeAlias = L[False] -TrueType: TypeAlias = L[True] +type FalseType = L[False] +type TrueType = L[True] i4: np.int32 i8: np.int64 diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index 2fba2feae385..b83ecc62221f 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -1,11 +1,11 @@ -from typing import TypeAlias, assert_type +from typing import assert_type import numpy as np import numpy._typing as np_t import numpy.typing as npt -AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] -AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] +type AR_T_alias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +type AR_TU_alias = AR_T_alias | npt.NDArray[np.str_] AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi index 5c3dc85038db..b93826922662 100644 --- a/numpy/typing/tests/data/reveal/chararray.pyi +++ b/numpy/typing/tests/data/reveal/chararray.pyi @@ -1,10 +1,10 @@ -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt -_BytesCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] -_StrCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +type _BytesCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] +type _StrCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] AR_U: _StrCharArray AR_S: _BytesCharArray diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 7f670b3be51c..f8f939d4609f 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -2,13 +2,13 @@ import ctypes as ct import datetime as dt from decimal import Decimal from fractions import Fraction -from typing import Any, Literal, LiteralString, TypeAlias, assert_type +from typing import Any, Literal, LiteralString, assert_type import numpy as np from numpy.dtypes import StringDType # a combination of likely `object` dtype-like candidates (no `_co`) -_PyObjectLike: TypeAlias = Decimal | Fraction | dt.datetime | dt.timedelta +type _PyObjectLike = Decimal | Fraction | dt.datetime | dt.timedelta dtype_U: np.dtype[np.str_] dtype_V: np.dtype[np.void] diff --git a/numpy/typing/tests/data/reveal/flatiter.pyi b/numpy/typing/tests/data/reveal/flatiter.pyi index 98d61a6d3428..4907f8464cf2 100644 --- a/numpy/typing/tests/data/reveal/flatiter.pyi +++ b/numpy/typing/tests/data/reveal/flatiter.pyi @@ -1,10 +1,10 @@ -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np -_ArrayND: TypeAlias = np.ndarray[tuple[Any, ...], np.dtypes.StrDType] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtypes.BytesDType] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtypes.Int8DType] +type _ArrayND = np.ndarray[tuple[Any, ...], np.dtypes.StrDType] +type _Array1D = np.ndarray[tuple[int], np.dtypes.BytesDType] +type _Array2D = np.ndarray[tuple[int, int], np.dtypes.Int8DType] _a_nd: np.flatiter[_ArrayND] _a_1d: np.flatiter[_Array1D] diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 8eef32ddd593..a94d278e87f4 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -1,22 +1,20 @@ -from typing import Any, Generic, Literal, NoReturn, TypeAlias, TypeVar, assert_type +from typing import Any, Literal, NoReturn, assert_type import numpy as np -from numpy import dtype, generic from numpy._typing import NDArray, _AnyShape -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) +type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _NoMaskType = np.bool[Literal[False]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] -MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] -_NoMaskType: TypeAlias = np.bool[Literal[False]] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +### -class MaskedArraySubclass(MaskedArray[_ScalarT_co]): ... +class MaskedArraySubclass[ScalarT: np.generic](np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]]): ... -class IntoMaskedArraySubClass(Generic[_ScalarT_co]): - def __array__(self) -> MaskedArraySubclass[_ScalarT_co]: ... +class IntoMaskedArraySubClass[ScalarT: np.generic]: + def __array__(self) -> MaskedArraySubclass[ScalarT]: ... -MaskedArraySubclassC: TypeAlias = MaskedArraySubclass[np.complex128] +type MaskedArraySubclassC = MaskedArraySubclass[np.complex128] AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] diff --git a/numpy/typing/tests/data/reveal/matrix.pyi b/numpy/typing/tests/data/reveal/matrix.pyi index 3a32b3d394f0..b76760d547b9 100644 --- a/numpy/typing/tests/data/reveal/matrix.pyi +++ b/numpy/typing/tests/data/reveal/matrix.pyi @@ -1,9 +1,9 @@ -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt -_Shape2D: TypeAlias = tuple[int, int] +type _Shape2D = tuple[int, int] mat: np.matrix[_Shape2D, np.dtype[np.int64]] ar_f8: npt.NDArray[np.float64] diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 0fe907c0006b..ada8f7777696 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -1,12 +1,10 @@ import datetime as dt -from typing import Any, Literal, TypeVar, assert_type +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) - -class SubClass(npt.NDArray[_ScalarT_co]): ... +class SubClass[ScalarT: np.generic](np.ndarray[tuple[Any, ...], np.dtype[ScalarT]]): ... subclass: SubClass[np.float64] diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index 66470b95bf15..c6e931eaca84 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -1,13 +1,10 @@ -from typing import TypeVar, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt from numpy._typing import _32Bit, _64Bit -T1 = TypeVar("T1", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -T2 = TypeVar("T2", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] - -def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... +def add[T1: npt.NBitBase, T2: npt.NBitBase](a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... # type: ignore[deprecated] i8: np.int64 i4: np.int32 diff --git a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi index feaccf28f578..e8ccc573d642 100644 --- a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi @@ -1,39 +1,36 @@ -from typing import Any, Protocol, TypeAlias, TypeVar, assert_type +from typing import Any, Protocol, assert_type import numpy as np from numpy._typing import _64Bit -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) +class CanAbs[T](Protocol): + def __abs__(self, /) -> T: ... -class CanAbs(Protocol[_T_co]): - def __abs__(self, /) -> _T_co: ... +class CanInvert[T](Protocol): + def __invert__(self, /) -> T: ... -class CanInvert(Protocol[_T_co]): - def __invert__(self, /) -> _T_co: ... +class CanNeg[T](Protocol): + def __neg__(self, /) -> T: ... -class CanNeg(Protocol[_T_co]): - def __neg__(self, /) -> _T_co: ... +class CanPos[T](Protocol): + def __pos__(self, /) -> T: ... -class CanPos(Protocol[_T_co]): - def __pos__(self, /) -> _T_co: ... +def do_abs[T](x: CanAbs[T]) -> T: ... +def do_invert[T](x: CanInvert[T]) -> T: ... +def do_neg[T](x: CanNeg[T]) -> T: ... +def do_pos[T](x: CanPos[T]) -> T: ... -def do_abs(x: CanAbs[_T]) -> _T: ... -def do_invert(x: CanInvert[_T]) -> _T: ... -def do_neg(x: CanNeg[_T]) -> _T: ... -def do_pos(x: CanPos[_T]) -> _T: ... - -_Bool_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.bool]] -_UInt8_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.uint8]] -_Int16_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.int16]] -_LongLong_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longlong]] -_Float32_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float32]] -_Float64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] -_LongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longdouble]] -_Complex64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex64]] -_Complex128_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] -_CLongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.clongdouble]] -_Void_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.void]] +type _Bool_1d = np.ndarray[tuple[int], np.dtype[np.bool]] +type _UInt8_1d = np.ndarray[tuple[int], np.dtype[np.uint8]] +type _Int16_1d = np.ndarray[tuple[int], np.dtype[np.int16]] +type _LongLong_1d = np.ndarray[tuple[int], np.dtype[np.longlong]] +type _Float32_1d = np.ndarray[tuple[int], np.dtype[np.float32]] +type _Float64_1d = np.ndarray[tuple[int], np.dtype[np.float64]] +type _LongDouble_1d = np.ndarray[tuple[int], np.dtype[np.longdouble]] +type _Complex64_1d = np.ndarray[tuple[int], np.dtype[np.complex64]] +type _Complex128_1d = np.ndarray[tuple[int], np.dtype[np.complex128]] +type _CLongDouble_1d = np.ndarray[tuple[int], np.dtype[np.clongdouble]] +type _Void_1d = np.ndarray[tuple[int], np.dtype[np.void]] b1_1d: _Bool_1d u1_1d: _UInt8_1d diff --git a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi index 0ce599a40310..6bbe057ff5b7 100644 --- a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi @@ -1,11 +1,11 @@ -from typing import TypeAlias, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt -_ArrayND: TypeAlias = npt.NDArray[np.int64] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[np.int8]] -_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[np.bool]] +type _ArrayND = npt.NDArray[np.int64] +type _Array2D = np.ndarray[tuple[int, int], np.dtype[np.int8]] +type _Array3D = np.ndarray[tuple[int, int, int], np.dtype[np.bool]] _nd: _ArrayND _2d: _Array2D diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index 4c4899ad6308..faba91273c91 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,30 +1,29 @@ from collections.abc import Sequence from decimal import Decimal -from typing import Any, Literal as L, TypeAlias, TypeVar, assert_type +from typing import Any, Literal as L, assert_type import numpy as np import numpy.polynomial as npp import numpy.typing as npt -_Ar_x: TypeAlias = npt.NDArray[np.inexact | np.object_] -_Ar_f: TypeAlias = npt.NDArray[np.floating] -_Ar_c: TypeAlias = npt.NDArray[np.complexfloating] -_Ar_O: TypeAlias = npt.NDArray[np.object_] +type _Ar_x = npt.NDArray[np.inexact | np.object_] +type _Ar_f = npt.NDArray[np.floating] +type _Ar_c = npt.NDArray[np.complexfloating] +type _Ar_O = npt.NDArray[np.object_] -_Ar_x_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.inexact | np.object_]] -_Ar_f_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] -_Ar_c_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] -_Ar_O_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] +type _Ar_x_n = np.ndarray[tuple[int], np.dtype[np.inexact | np.object_]] +type _Ar_f_n = np.ndarray[tuple[int], np.dtype[np.floating]] +type _Ar_c_n = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _Ar_O_n = np.ndarray[tuple[int], np.dtype[np.object_]] -_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64 | Any]] -_Ar_f_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating]] -_Ar_c_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] -_Ar_O_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] +type _Ar_x_2 = np.ndarray[tuple[L[2]], np.dtype[np.float64 | Any]] +type _Ar_f_2 = np.ndarray[tuple[L[2]], np.dtype[np.floating]] +type _Ar_c_2 = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] +type _Ar_O_2 = np.ndarray[tuple[L[2]], np.dtype[np.object_]] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_Ar_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +type _Ar_1d[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] -_BasisName: TypeAlias = L["X"] +type _BasisName = L["X"] SC_i: np.int_ SC_i_co: int | np.int_ diff --git a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi index 07d6c9d1af65..9c5aff1117dc 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -1,20 +1,20 @@ from collections.abc import Sequence from decimal import Decimal from fractions import Fraction -from typing import Literal as L, TypeAlias, assert_type +from typing import Literal as L, assert_type import numpy as np import numpy.polynomial.polyutils as pu import numpy.typing as npt from numpy.polynomial._polytypes import _Tuple2 -_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] -_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] -_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] +type _ArrFloat1D = np.ndarray[tuple[int], np.dtype[np.floating]] +type _ArrComplex1D = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _ArrObject1D = np.ndarray[tuple[int], np.dtype[np.object_]] -_ArrFloat1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64]] -_ArrComplex1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] -_ArrObject1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] +type _ArrFloat1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.float64]] +type _ArrComplex1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] +type _ArrObject1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.object_]] num_int: int num_float: float diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi index 3188ad9a1239..b87ba4fb2677 100644 --- a/numpy/typing/tests/data/reveal/polynomial_series.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -1,15 +1,15 @@ from collections.abc import Sequence -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.polynomial as npp import numpy.typing as npt -_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] -_ArrFloat1D64: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] -_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] -_ArrComplex1D128: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] -_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] +type _ArrFloat1D = np.ndarray[tuple[int], np.dtype[np.floating]] +type _ArrFloat1D64 = np.ndarray[tuple[int], np.dtype[np.float64]] +type _ArrComplex1D = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _ArrComplex1D128 = np.ndarray[tuple[int], np.dtype[np.complex128]] +type _ArrObject1D = np.ndarray[tuple[int], np.dtype[np.object_]] AR_b: npt.NDArray[np.bool] AR_u4: npt.NDArray[np.uint32] diff --git a/numpy/typing/tests/data/reveal/rec.pyi b/numpy/typing/tests/data/reveal/rec.pyi index aacf217e4207..da66ab003078 100644 --- a/numpy/typing/tests/data/reveal/rec.pyi +++ b/numpy/typing/tests/data/reveal/rec.pyi @@ -1,10 +1,10 @@ import io -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt -_RecArray: TypeAlias = np.recarray[tuple[Any, ...], np.dtype[np.record]] +type _RecArray = np.recarray[tuple[Any, ...], np.dtype[np.record]] AR_i8: npt.NDArray[np.int64] REC_AR_V: _RecArray diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index ab926baa7f15..c56c8e88092c 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -1,9 +1,7 @@ -from typing import Any, Literal, TypeAlias, assert_type +from typing import Any, Literal, assert_type import numpy as np -_1: TypeAlias = Literal[1] - b: np.bool u8: np.uint64 i8: np.int64 @@ -198,17 +196,17 @@ assert_type(S.flatten(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) assert_type(b.reshape(()), np.bool) assert_type(i8.reshape([]), np.int64) -assert_type(b.reshape(1), np.ndarray[tuple[_1], np.dtype[np.bool]]) -assert_type(i8.reshape(-1), np.ndarray[tuple[_1], np.dtype[np.int64]]) -assert_type(u8.reshape(1, 1), np.ndarray[tuple[_1, _1], np.dtype[np.uint64]]) -assert_type(f8.reshape(1, -1), np.ndarray[tuple[_1, _1], np.dtype[np.float64]]) -assert_type(c16.reshape(1, 1, 1), np.ndarray[tuple[_1, _1, _1], np.dtype[np.complex128]]) -assert_type(U.reshape(1, 1, 1, 1), np.ndarray[tuple[_1, _1, _1, _1], np.dtype[np.str_]]) +assert_type(b.reshape(1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.reshape(-1), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.reshape(1, 1), np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(f8.reshape(1, -1), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(c16.reshape(1, 1, 1), np.ndarray[tuple[int, int, int], np.dtype[np.complex128]]) +assert_type(U.reshape(1, 1, 1, 1), np.ndarray[tuple[int, int, int, int], np.dtype[np.str_]]) assert_type( S.reshape(1, 1, 1, 1, 1), np.ndarray[ # len(shape) >= 5 - tuple[_1, _1, _1, _1, _1, *tuple[_1, ...]], + tuple[int, int, int, int, int, *tuple[int, ...]], np.dtype[np.bytes_], ], ) diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index 18bd252d5ff9..166481d80922 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -1,11 +1,11 @@ -from typing import TypeAlias, assert_type +from typing import assert_type import numpy as np import numpy._typing as np_t import numpy.typing as npt -AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] -AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] +type AR_T_alias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +type AR_TU_alias = AR_T_alias | npt.NDArray[np.str_] AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 34fbc5feeb41..583ca60f90a7 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -6,7 +6,7 @@ import unittest import warnings from collections.abc import Callable from pathlib import Path -from typing import Any, TypeVar, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt @@ -16,7 +16,6 @@ AR_i8: npt.NDArray[np.int64] bool_obj: bool suppress_obj: np.testing.suppress_warnings # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -FT = TypeVar("FT", bound=Callable[..., Any]) def func() -> int: ... @@ -148,7 +147,7 @@ assert_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), class Test: ... -def decorate(a: FT) -> FT: +def decorate[FT: Callable[..., Any]](a: FT) -> FT: return a assert_type(np.testing.decorate_methods(Test, decorate), None) diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 30d4f408f1a9..8cafe729a943 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -1,15 +1,13 @@ -from typing import Any, TypeAlias, TypeVar, assert_type, type_check_only +from typing import Any, assert_type, type_check_only import numpy as np import numpy.typing as npt -_ScalarT = TypeVar("_ScalarT", bound=np.generic) +type _1D = tuple[int] +type _2D = tuple[int, int] +type _ND = tuple[Any, ...] -_1D: TypeAlias = tuple[int] -_2D: TypeAlias = tuple[int, int] -_ND: TypeAlias = tuple[Any, ...] - -_Indices2D: TypeAlias = tuple[ +type _Indices2D = tuple[ np.ndarray[_1D, np.dtype[np.intp]], np.ndarray[_1D, np.dtype[np.intp]], ] @@ -33,7 +31,7 @@ _to_1d_f64: list[float] _to_1d_c128: list[complex] @type_check_only -def func1(ar: npt.NDArray[_ScalarT], a: int) -> npt.NDArray[_ScalarT]: ... +def func1[ScalarT: np.generic](ar: npt.NDArray[ScalarT], a: int) -> npt.NDArray[ScalarT]: ... @type_check_only def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... From 4fa8035e37bcc8e6c92ee69c0a97721cebec7411 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 28 Nov 2025 04:06:20 +0100 Subject: [PATCH 0964/1018] MAINT: Remove p311 special-casing in ``spin stubtest`` --- .spin/cmds.py | 8 ++------ tools/stubtest/allowlist.txt | 8 ++++++++ tools/stubtest/allowlist_py311.txt | 2 -- tools/stubtest/allowlist_py312.txt | 6 ------ 4 files changed, 10 insertions(+), 14 deletions(-) delete mode 100644 tools/stubtest/allowlist_py311.txt delete mode 100644 tools/stubtest/allowlist_py312.txt diff --git a/.spin/cmds.py b/.spin/cmds.py index ea62717e4f78..e522108a5107 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -565,17 +565,13 @@ def stubtest(*, concise: bool, build_dir: str) -> None: stubtest_dir = curdir.parent / 'tools' / 'stubtest' mypy_config = stubtest_dir / 'mypy.ini' - allowlists = [stubtest_dir / 'allowlist.txt'] - if sys.version_info < (3, 12): - allowlists.append(stubtest_dir / 'allowlist_py311.txt') - else: - allowlists.append(stubtest_dir / 'allowlist_py312.txt') + allowlist = stubtest_dir / 'allowlist.txt' cmd = [ 'stubtest', '--ignore-disjoint-bases', f'--mypy-config-file={mypy_config}', - *(f'--allowlist={allowlist}' for allowlist in allowlists), + f'--allowlist={allowlist}', ] if concise: cmd.append('--concise') diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index 30f4cd120cc9..eab246271dd1 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -1,3 +1,6 @@ +# TODO: remove once distutils is gone +numpy\.f2py\._backends\._distutils + # intentional type-check-only deviations from runtime numpy\._typing.* numpy\.polynomial\._polytypes @@ -59,6 +62,11 @@ numpy\.core\.shape_base.* numpy\.core\.umath.* numpy\.typing\.mypy_plugin +# false positive "... is not a Union" errors +numpy\.typing\.ArrayLike +numpy\.typing\.DTypeLike +numpy\.typing\.NDArray + # ufuncs, see https://github.com/python/mypy/issues/20223 numpy\.(\w+\.)*abs numpy\.(\w+\.)*absolute diff --git a/tools/stubtest/allowlist_py311.txt b/tools/stubtest/allowlist_py311.txt deleted file mode 100644 index e6b2e364230e..000000000000 --- a/tools/stubtest/allowlist_py311.txt +++ /dev/null @@ -1,2 +0,0 @@ -# python == 3.11.* - diff --git a/tools/stubtest/allowlist_py312.txt b/tools/stubtest/allowlist_py312.txt deleted file mode 100644 index a0ee0edf2be2..000000000000 --- a/tools/stubtest/allowlist_py312.txt +++ /dev/null @@ -1,6 +0,0 @@ -# python >= 3.12 - -# false positive "... is not a Union" errors -numpy\.typing\.ArrayLike -numpy\.typing\.DTypeLike - From 582922b56e99b64fd62424159e3da1bbfc8dd82a Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 28 Nov 2025 04:11:14 +0100 Subject: [PATCH 0965/1018] MAINT: remove ``typing_extensions`` as test dependency --- .github/workflows/linux_blas.yml | 2 +- environment.yml | 5 ++--- requirements/test_requirements.txt | 1 - 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index c4e2d55330c4..88c5322dd754 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -274,7 +274,7 @@ jobs: run: | # do not use test_requirements.txt, it includes coverage which requires # sqlite3, which is not available on OpenSUSE python - pip install --break-system-packages pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install --break-system-packages pytest pytest-xdist hypothesis pytest-timeout spin test -j auto -- numpy/linalg --timeout=600 --durations=10 diff --git a/environment.yml b/environment.yml index 67b7cec26754..307dd4631012 100644 --- a/environment.yml +++ b/environment.yml @@ -7,7 +7,7 @@ name: numpy-dev channels: - conda-forge dependencies: - - python=3.12 # need to pin to avoid issues with builds + - python=3.12 # need to pin to avoid issues with builds - cython>=3.0 - compilers - openblas @@ -24,9 +24,8 @@ dependencies: - pytest-xdist - hypothesis # For type annotations - - typing_extensions>=4.5.0 - mypy=1.19.0 - - orjson # makes mypy faster + - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 - sphinx-copybutton diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 66537dafdade..2e2f679cee72 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -13,7 +13,6 @@ pytest-timeout # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml mypy==1.19.0; platform_python_implementation != "PyPy" -typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer tzdata From 75570dd993271ee90e2e8c4ee5b230a70aa369fb Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 28 Nov 2025 04:28:02 +0100 Subject: [PATCH 0966/1018] STY: appease ruff --- doc/source/conf.py | 2 -- numpy/_pytesttester.py | 1 - numpy/tests/test_public_api.py | 7 +++---- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 302f1fd3731c..111a631d8ea1 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -145,8 +145,6 @@ class PyTypeObject(ctypes.Structure): # The reST default role (used for this markup: `text`) to use for all documents. default_role = "autolink" -nitpick_ignore = [] - # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index cbb4191047ba..25f5300a74ac 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -123,7 +123,6 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, True """ - import pytest module = sys.modules[self.module_name] diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 1e26cb18b60f..3ccba81ebaff 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -142,6 +142,7 @@ def test_NPY_NO_EXPORT(): "version", ]] + PUBLIC_ALIASED_MODULES = [ "numpy.char", "numpy.emath", @@ -191,6 +192,7 @@ def test_NPY_NO_EXPORT(): "testing.print_coercion_tables", ]] + def is_unexpected(name): """Check if this needs to be considered.""" return ( @@ -201,9 +203,6 @@ def is_unexpected(name): ) -SKIP_LIST = [] - - def test_all_modules_are_expected(): """ Test that we don't add anything that looks like a new public module by @@ -214,7 +213,7 @@ def test_all_modules_are_expected(): for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__, prefix=np.__name__ + '.', onerror=None): - if is_unexpected(modname) and modname not in SKIP_LIST: + if is_unexpected(modname): # We have a name that is new. If that's on purpose, add it to # PUBLIC_MODULES. We don't expect to have to add anything to # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name! From 915c058a7bd211c5de04e43a61109983f600b9a5 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 28 Nov 2025 04:45:37 +0100 Subject: [PATCH 0967/1018] TYP: address new mypy test failures --- numpy/_core/fromnumeric.pyi | 41 +++++++++++-------- numpy/_typing/__init__.py | 4 +- .../tests/data/pass/array_constructors.py | 3 +- numpy/typing/tests/data/pass/array_like.py | 8 +--- numpy/typing/tests/data/pass/ma.py | 2 +- numpy/typing/tests/data/pass/ndarray_misc.py | 4 +- .../typing/tests/data/reveal/fromnumeric.pyi | 4 +- .../typing/tests/data/reveal/ndarray_misc.pyi | 6 +-- numpy/typing/tests/data/reveal/numeric.pyi | 2 +- numpy/typing/tests/test_runtime.py | 35 +++++++--------- 10 files changed, 51 insertions(+), 58 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index b929328a9443..ce08335ac256 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -122,7 +122,13 @@ type _PyArray[_T] = list[_T] | tuple[_T, ...] # `int` also covers `bool` type _PyScalar = complex | bytes | str -type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _0D = tuple[()] +type _1D = tuple[int] +type _2D = tuple[int, int] +type _3D = tuple[int, int, int] +type _4D = tuple[int, int, int, int] + +type _Array1D[ScalarT: np.generic] = np.ndarray[_1D, np.dtype[ScalarT]] ### @@ -185,7 +191,7 @@ def reshape[ScalarT: np.generic]( order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... +) -> _Array1D[ScalarT]: ... @overload # shape: ~ShapeT def reshape[ScalarT: np.generic, ShapeT: _Shape]( a: _ArrayLike[ScalarT], @@ -212,7 +218,7 @@ def reshape( order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[tuple[int], np.dtype]: ... +) -> np.ndarray[_1D]: ... @overload # shape: ~ShapeT def reshape[ShapeT: _Shape]( a: ArrayLike, @@ -221,7 +227,7 @@ def reshape[ShapeT: _Shape]( order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[ShapeT, np.dtype]: ... +) -> np.ndarray[ShapeT]: ... @overload # shape: Sequence[index] def reshape( a: ArrayLike, @@ -267,7 +273,7 @@ def repeat[ScalarT: np.generic]( a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: None = None, -) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... +) -> _Array1D[ScalarT]: ... @overload def repeat[ScalarT: np.generic]( a: _ArrayLike[ScalarT], @@ -279,7 +285,7 @@ def repeat( a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None, -) -> np.ndarray[tuple[int], np.dtype[Any]]: ... +) -> _Array1D[Any]: ... @overload def repeat( a: ArrayLike, @@ -465,19 +471,20 @@ def searchsorted( sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> NDArray[intp]: ... -# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +# @overload def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> _Array1D[ScalarT]: ... @overload -def resize[ScalarT: np.generic, ShapeT: _Shape]( - a: _ArrayLike[ScalarT], new_shape: ShapeT -) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +def resize[ScalarT: np.generic, AnyShapeT: (_0D, _1D, _2D, _3D, _4D)]( + a: _ArrayLike[ScalarT], + new_shape: AnyShapeT, +) -> np.ndarray[AnyShapeT, np.dtype[ScalarT]]: ... @overload def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: _ShapeLike) -> NDArray[ScalarT]: ... @overload -def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype]: ... +def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[_1D]: ... @overload -def resize[ShapeT: _Shape](a: ArrayLike, new_shape: ShapeT) -> np.ndarray[ShapeT, np.dtype]: ... +def resize[AnyShapeT: (_0D, _1D, _2D, _3D, _4D)](a: ArrayLike, new_shape: AnyShapeT) -> np.ndarray[AnyShapeT]: ... @overload def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... @@ -550,9 +557,9 @@ def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1 @overload def ravel(a: complex | _NestedSequence[complex], order: _OrderKACF = "C") -> _Array1D[np.complex128 | Any]: ... @overload -def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ... +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[_1D]: ... -def nonzero(a: _ArrayLike[Any]) -> tuple[np.ndarray[tuple[int], np.dtype[intp]], ...]: ... +def nonzero(a: _ArrayLike[Any]) -> tuple[_Array1D[np.intp], ...]: ... # this prevents `Any` from being returned with Pyright @overload @@ -564,12 +571,12 @@ def shape(a: _PyScalar) -> tuple[()]: ... # `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are # subtypes of it, which would make the return types incompatible. @overload -def shape(a: _PyArray[_PyScalar]) -> tuple[int]: ... +def shape(a: _PyArray[_PyScalar]) -> _1D: ... @overload -def shape(a: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ... +def shape(a: _PyArray[_PyArray[_PyScalar]]) -> _2D: ... # this overload will be skipped by typecheckers that don't support PEP 688 @overload -def shape(a: memoryview | bytearray) -> tuple[int]: ... +def shape(a: memoryview | bytearray) -> _1D: ... @overload def shape(a: ArrayLike) -> _AnyShape: ... diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 82479a3fcb08..d6b6105e79b8 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -1,7 +1,7 @@ """Private counterpart of ``numpy.typing``.""" from ._array_like import ( - ArrayLike, + ArrayLike as ArrayLike, NDArray as NDArray, _ArrayLike as _ArrayLike, _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, @@ -84,7 +84,7 @@ # from ._dtype_like import ( - DTypeLike, + DTypeLike as DTypeLike, _DTypeLike as _DTypeLike, _DTypeLikeBool as _DTypeLikeBool, _DTypeLikeBytes as _DTypeLikeBytes, diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py index d91d257cb17c..27cbffa06a5c 100644 --- a/numpy/typing/tests/data/pass/array_constructors.py +++ b/numpy/typing/tests/data/pass/array_constructors.py @@ -1,7 +1,6 @@ from typing import Any import numpy as np -import numpy.typing as npt class Index: @@ -9,7 +8,7 @@ def __index__(self) -> int: return 0 -class SubClass(npt.NDArray[np.float64]): +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): pass diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py index f922beae34ce..f1e09b03a4ec 100644 --- a/numpy/typing/tests/data/pass/array_like.py +++ b/numpy/typing/tests/data/pass/array_like.py @@ -1,11 +1,5 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - import numpy as np - -if TYPE_CHECKING: - from numpy._typing import ArrayLike, NDArray, _SupportsArray +from numpy._typing import ArrayLike, NDArray, _SupportsArray x1: ArrayLike = True x2: ArrayLike = 5 diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index b5d6b0d38ef0..72cbc5d9b98e 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -5,7 +5,7 @@ import numpy.typing as npt from numpy._typing import _Shape -type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] +type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_Shape, np.dtype[ScalarT]] # mypy: disable-error-code=no-untyped-call diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index 40c84d8641bd..d50becb20ee4 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -15,8 +15,8 @@ import numpy.typing as npt -class SubClass(npt.NDArray[np.float64]): ... -class IntSubClass(npt.NDArray[np.intp]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): ... +class IntSubClass(np.ndarray[tuple[Any, ...], np.dtype[np.intp]]): ... i4 = np.int32(1) diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 58ea2c5f8732..26de2c2b6e37 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -5,7 +5,7 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt -class NDArraySubclass(npt.NDArray[np.complex128]): ... +class NDArraySubclass(np.ndarray[tuple[Any, ...], np.dtype[np.complex128]]): ... AR_b: npt.NDArray[np.bool] AR_f4: npt.NDArray[np.float32] @@ -25,7 +25,7 @@ i8: np.int64 f: float # integer‑dtype subclass for argmin/argmax -class NDArrayIntSubclass(npt.NDArray[np.intp]): ... +class NDArrayIntSubclass(np.ndarray[tuple[Any, ...], np.dtype[np.intp]]): ... AR_sub_i: NDArrayIntSubclass assert_type(np.take(b, 0), np.bool) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 2972a58c328f..be0666f95fcb 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -16,7 +16,7 @@ from typing_extensions import CapsuleType import numpy as np import numpy.typing as npt -class SubClass(npt.NDArray[np.object_]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.object_]]): ... f8: np.float64 i8: np.int64 @@ -62,12 +62,12 @@ assert_type(AR_f8.any(out=B), SubClass) assert_type(f8.argmax(), np.intp) assert_type(AR_f8.argmax(), np.intp) assert_type(AR_f8.argmax(axis=0), Any) -assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.intp]) +assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.int64]) assert_type(f8.argmin(), np.intp) assert_type(AR_f8.argmin(), np.intp) assert_type(AR_f8.argmin(axis=0), Any) -assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.intp]) +assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.int64]) assert_type(f8.argsort(), npt.NDArray[np.intp]) assert_type(AR_f8.argsort(), npt.NDArray[np.intp]) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 44192fd64331..7b3abc2d6761 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -10,7 +10,7 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt -class SubClass(npt.NDArray[np.int64]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.int64]]): ... i8: np.int64 diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py index 462fe4eabdc0..9db74c8ddc28 100644 --- a/numpy/typing/tests/test_runtime.py +++ b/numpy/typing/tests/test_runtime.py @@ -3,7 +3,8 @@ from typing import ( Any, NamedTuple, - Union, # pyright: ignore[reportDeprecated] + Self, + TypeAliasType, get_args, get_origin, get_type_hints, @@ -17,31 +18,23 @@ class TypeTup(NamedTuple): - typ: type - args: tuple[type, ...] - origin: type | None + typ: type # type expression + args: tuple[type, ...] # generic type parameters or arguments + origin: type | None # e.g. `UnionType` or `GenericAlias` + @classmethod + def from_type_alias(cls, alias: TypeAliasType, /) -> Self: + # PEP 695 `type _ = ...` aliases wrap the type expression as a + # `types.TypeAliasType` instance with a `__value__` attribute. + tp = alias.__value__ + return cls(typ=tp, args=get_args(tp), origin=get_origin(tp)) -def _flatten_type_alias(t: Any) -> Any: - # "flattens" a TypeAliasType to its underlying type alias - return getattr(t, "__value__", t) - - -NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) TYPES = { - "ArrayLike": TypeTup( - _flatten_type_alias(npt.ArrayLike), - _flatten_type_alias(npt.ArrayLike).__args__, - Union, - ), - "DTypeLike": TypeTup( - _flatten_type_alias(npt.DTypeLike), - _flatten_type_alias(npt.DTypeLike).__args__, - Union, - ), + "ArrayLike": TypeTup.from_type_alias(npt.ArrayLike), + "DTypeLike": TypeTup.from_type_alias(npt.DTypeLike), "NBitBase": TypeTup(npt.NBitBase, (), None), # type: ignore[deprecated] # pyright: ignore[reportDeprecated] - "NDArray": NDArrayTup, + "NDArray": TypeTup.from_type_alias(npt.NDArray), } From 18b128a149501397e771ea180212bd44bd060252 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 28 Nov 2025 17:22:13 +0100 Subject: [PATCH 0968/1018] TYP: fix mypy_primer errors in `timedelta64` --- numpy/__init__.pyi | 64 +++++++++++++++++++++++----------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8ca91fd5b007..b8ede6c2a85a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -165,6 +165,7 @@ from typing import ( TypedDict, final, overload, + override, type_check_only, ) @@ -174,7 +175,6 @@ from typing import ( # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite from typing_extensions import CapsuleType, TypeVar, deprecated -from typing import override from numpy import ( char, @@ -2636,7 +2636,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... def __index__(self: NDArray[integer], /) -> int: ... - def __complex__(self: NDArray[bool_ | number | object_], /) -> complex: ... + def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... def __len__(self) -> int: ... def __contains__(self, value: object, /) -> builtins.bool: ... @@ -2727,13 +2727,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed # @overload - # def __abs__(self: ndarray[ShapeT, dtypes.Complex64DType], /) -> ndarray[ShapeT, dtypes.Float32DType]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.Complex64DType], /) -> ndarray[ShapeT, dtypes.Float32DType]: ... # @overload - # def __abs__(self: ndarray[ShapeT, dtypes.Complex128DType], /) -> ndarray[ShapeT, dtypes.Float64DType]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.Complex128DType], /) -> ndarray[ShapeT, dtypes.Float64DType]: ... # @overload - # def __abs__(self: ndarray[ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[ShapeT, dtypes.LongDoubleDType]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[ShapeT, dtypes.LongDoubleDType]: ... # @overload - # def __abs__(self: ndarray[ShapeT, dtype[complex128]], /) -> ndarray[ShapeT, dtype[float64]]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtype[complex128]], /) -> ndarray[ShapeT, dtype[float64]]: ... @overload def __abs__[ShapeT: _Shape, NBitT: NBitBase]( self: ndarray[ShapeT, dtype[complexfloating[NBitT]]], / @@ -2753,7 +2753,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __matmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __matmul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3018,7 +3018,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... # type: ignore[overload-overlap] + def __sub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3056,7 +3056,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... # type: ignore[overload-overlap] + def __rsub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -4630,7 +4630,7 @@ int_ = intp long = signedinteger[_NBitLong] longlong = signedinteger[_NBitLongLong] -class unsignedinteger(integer[_NBitT1]): +class unsignedinteger(integer[_NBitT]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... # arithmetic ops @@ -4892,7 +4892,7 @@ class inexact(number[_NBitT, _InexactItemT_co], Generic[_NBitT, _InexactItemT_co @abstractmethod def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... -class floating(_RealMixin, _RoundMixin, inexact[_NBitT1, float]): +class floating(_RealMixin, _RoundMixin, inexact[_NBitT, float]): def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... # arithmetic ops @@ -5076,7 +5076,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __add__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __add__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __add__(self, other: complex, /) -> float64 | complex128: ... @@ -5085,7 +5085,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __radd__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __radd__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __radd__(self, other: complex, /) -> float64 | complex128: ... @@ -5094,7 +5094,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __sub__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __sub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __sub__(self, other: complex, /) -> float64 | complex128: ... @@ -5103,7 +5103,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rsub__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __rsub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rsub__(self, other: complex, /) -> float64 | complex128: ... @@ -5112,7 +5112,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __mul__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __mul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __mul__(self, other: complex, /) -> float64 | complex128: ... @@ -5121,7 +5121,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rmul__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __rmul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rmul__(self, other: complex, /) -> float64 | complex128: ... @@ -5130,7 +5130,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __truediv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __truediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __truediv__(self, other: complex, /) -> float64 | complex128: ... @@ -5139,7 +5139,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rtruediv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __rtruediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... @@ -5148,7 +5148,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __floordiv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __floordiv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __floordiv__(self, other: complex, /) -> float64 | complex128: ... @@ -5157,7 +5157,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __rfloordiv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __rfloordiv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... @@ -5166,9 +5166,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... @overload - def __pow__( - self, other: complexfloating[_NBitT1, _NBitT2], mod: None = None, / - ) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __pow__[NBitT: NBitBase](self, other: complexfloating[NBitT], mod: None = None, /) -> complexfloating[NBitT | _64Bit]: ... @overload def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... @@ -5177,9 +5175,9 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] @overload - def __rpow__( - self, other: complexfloating[_NBitT1, _NBitT2], mod: None = None, / - ) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __rpow__[NBitT: NBitBase]( + self, other: complexfloating[NBitT], mod: None = None, / + ) -> complexfloating[NBitT | _64Bit]: ... @overload def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... @@ -5410,9 +5408,11 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... @overload - def __add__[DateOrTimeT: dt.date | dt.timedelta](self: timedelta64[dt.timedelta], x: DateOrTimeT, /) -> DateOrTimeT: ... + def __add__[AnyDateOrTimeT: (dt.datetime, dt.date, dt.timedelta)]( + self: timedelta64[dt.timedelta], x: AnyDateOrTimeT, / + ) -> AnyDateOrTimeT: ... @overload - def __add__[AnyItemT: (dt.timedelta, int, None)]( + def __add__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( self: timedelta64[AnyItemT], x: timedelta64[AnyItemT] | _IntLike_co, / ) -> timedelta64[AnyItemT]: ... __radd__ = __add__ @@ -5433,7 +5433,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... @overload - def __sub__[AnyItemT: (dt.timedelta, int, None)]( + def __sub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( self: timedelta64[AnyItemT], b: timedelta64[AnyItemT] | _IntLike_co, / ) -> timedelta64[AnyItemT]: ... @@ -5450,7 +5450,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] self: timedelta64[dt.timedelta], a: timedelta64[AnyItemT], / ) -> timedelta64[AnyItemT]: ... @overload - def __rsub__[AnyItemT: (dt.timedelta, int, None)]( # type: ignore[misc] + def __rsub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( # type: ignore[misc] self: timedelta64[AnyItemT], a: timedelta64[AnyItemT] | _IntLike_co, / ) -> timedelta64[AnyItemT]: ... @overload @@ -5604,7 +5604,7 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __new__(cls, value: datetime64[_DT64ItemT_co], /) -> Self: ... @overload - def __new__[AnyItemT: (dt.date, dt.datetime, None)](cls, value: AnyItemT, /) -> datetime64[AnyItemT]: ... + def __new__[AnyItemT: (dt.datetime, dt.date, None)](cls, value: AnyItemT, /) -> datetime64[AnyItemT]: ... @overload def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec[_TD64Unit] = ..., /) -> datetime64[None]: ... @overload From 288bc507f14166eac3a47ba157c3f5a5d84c6c30 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 2 Dec 2025 17:28:20 +0100 Subject: [PATCH 0969/1018] MAINT: remove unused stubtest allowlist entry --- tools/stubtest/allowlist.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index eab246271dd1..58110d66046e 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -1,6 +1,3 @@ -# TODO: remove once distutils is gone -numpy\.f2py\._backends\._distutils - # intentional type-check-only deviations from runtime numpy\._typing.* numpy\.polynomial\._polytypes From 94845770d2db2296b4bc7c674f184a0fe1e25582 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 2 Dec 2025 18:39:08 +0100 Subject: [PATCH 0970/1018] STY: re-enable ruff/pyupgrade --- ruff.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ruff.toml b/ruff.toml index c02a90e610e6..ebbb29283622 100644 --- a/ruff.toml +++ b/ruff.toml @@ -36,8 +36,7 @@ extend-select = [ "W", # pycodestyle/warning "PGH", # pygrep-hooks "PLE", # pylint/error - # TODO: re-enable after merging https://github.com/numpy/numpy/pull/30319 - # "UP", # pyupgrade + "UP", # pyupgrade ] ignore = [ # flake8-bugbear From dcf055eb7c6cdbb5a93f4a4ec6071c22ffc3eb29 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 2 Dec 2025 18:51:42 +0100 Subject: [PATCH 0971/1018] MAINT: post-rebase correction --- doc/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 111a631d8ea1..238a7d11f8a4 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -608,7 +608,7 @@ class NumPyLexer(CLexer): breathe_default_members = ("members", "undoc-members", "protected-members") # See https://github.com/breathe-doc/breathe/issues/696 -nitpick_ignore += [ +nitpick_ignore = [ ('c:identifier', 'FILE'), ('c:identifier', 'size_t'), ('c:identifier', 'PyHeapTypeObject'), From b688a081dbcc35fe759dd49e609264425f2763fd Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 4 Dec 2025 15:28:35 +0100 Subject: [PATCH 0972/1018] DOC, TST, TYP: update `NDArray` doctest --- numpy/_typing/_add_docstring.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_typing/_add_docstring.py b/numpy/_typing/_add_docstring.py index 5330a6b3b715..883b890a1a16 100644 --- a/numpy/_typing/_add_docstring.py +++ b/numpy/_typing/_add_docstring.py @@ -137,10 +137,10 @@ def _parse_docstrings() -> str: >>> import numpy.typing as npt >>> print(npt.NDArray) - numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[~_ScalarT]] + NDArray >>> print(npt.NDArray[np.float64]) - numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[numpy.float64]] + NDArray[numpy.float64] >>> NDArrayInt = npt.NDArray[np.int_] >>> a: NDArrayInt = np.arange(10) From 688cd5c17690faddbb39252e137d46e37c5ab30e Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 5 Dec 2025 11:08:52 +0100 Subject: [PATCH 0973/1018] TYP: post-rebase corrections --- numpy/lib/_type_check_impl.pyi | 1 - numpy/lib/_user_array_impl.pyi | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index b75f010d3550..dbef0ca87280 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -137,7 +137,6 @@ def nan_to_num( # NOTE: The [overload-overlap] mypy error is a false positive @overload def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... -def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... @overload def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ... @overload diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index 661dc97f224c..4a6dfffbea92 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -21,10 +21,10 @@ _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=T type _ArrayInt_co = npt.NDArray[np.integer | np.bool] -type _BoolContainer = container[Any, np.dtype[np.bool]] -type _IntegralContainer = container[Any, np.dtype[np.bool | np.integer | np.object_]] -type _RealContainer = container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]] -type _NumericContainer = container[Any, np.dtype[np.number | np.timedelta64 | np.object_]] +type _BoolContainer = container[Any, np.dtype[np.bool]] # type: ignore[deprecated] +type _IntegralContainer = container[Any, np.dtype[np.bool | np.integer | np.object_]] # type: ignore[deprecated] +type _RealContainer = container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]] # type: ignore[deprecated] +type _NumericContainer = container[Any, np.dtype[np.number | np.timedelta64 | np.object_]] # type: ignore[deprecated] type _ToIndexSlice = slice | EllipsisType | _ArrayInt_co | None type _ToIndexSlices = _ToIndexSlice | tuple[_ToIndexSlice, ...] From 60bf05c38a1059cef0d897a83c626bad702ae1ba Mon Sep 17 00:00:00 2001 From: Karan Singh Date: Mon, 8 Dec 2025 01:50:06 +0530 Subject: [PATCH 0974/1018] DOC: Clarify `np.negative` * DOC: Improve examples for np.negative and np.positive * DOC: Improve examples for np.negative * DOC: clarify unary examples [skip actions][skip cirrus] --- numpy/_core/code_generators/ufunc_docstrings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index d4707da906b2..fe07047a1758 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -3174,7 +3174,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'negative', """ - Numerical negative, element-wise. + Numerical negation, element-wise. Parameters ---------- From bab2cb21ee08c0fd3ac8ab79b24629885b88bfe5 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Mon, 8 Dec 2025 03:46:14 -0500 Subject: [PATCH 0975/1018] MAINT: Use RAII objects in unique.cpp to ensure safe resource management. (#30329) Use RAII scoped locking for string locking and GIL release in unique and fix a few smaller things. --- * BENCH: Add UniqueIntegers benchmark. * Remove incorrect call of Py_DECREF(res_obj). * Don't use std::invoke. * Simplify code in the unique_* functions--no need for a lambda. * BUG: Add missing Py_DECREF(arr) in array__unique_hash(). * BUG: Add missing Py_XDECREF(arr) if npy_parse_args() fails in array__unique_hash(). Unlikely to be needed, but might as well be thorough. --- benchmarks/benchmarks/bench_lib.py | 47 ++++ numpy/_core/src/common/raii_utils.hpp | 9 +- numpy/_core/src/multiarray/unique.cpp | 373 +++++++++++--------------- 3 files changed, 217 insertions(+), 212 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 33e2871fc727..11d454ae41bf 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -2,6 +2,8 @@ import string +from asv_runner.benchmarks.mark import SkipNotImplemented + import numpy as np from .common import Benchmark @@ -189,6 +191,51 @@ def time_unique_all(self, array_size, percent_nans, return_inverse=True, return_counts=True) +class UniqueIntegers(Benchmark): + """Benchmark for np.unique with integer dtypes.""" + + param_names = ["array_size", "num_unique_values", "dtype"] + params = [ + # sizes of the 1D arrays + [200, 100000, 1000000], + # number of unique values in arrays + [25, 125, 5000, 50000, 250000], + # dtypes of the arrays + [np.uint8, np.int16, np.uint32, np.int64], + ] + + def setup(self, array_size, num_unique_values, dtype): + unique_array = np.arange(num_unique_values, dtype=dtype) + base_array = np.resize(unique_array, array_size) + rng = np.random.default_rng(121263137472525314065) + rng.shuffle(base_array) + self.arr = base_array + + def time_unique_values(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") + np.unique(self.arr, return_index=False, + return_inverse=False, return_counts=False) + + def time_unique_counts(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") + np.unique(self.arr, return_index=False, + return_inverse=False, return_counts=True,) + + def time_unique_inverse(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") + np.unique(self.arr, return_index=False, + return_inverse=True, return_counts=False) + + def time_unique_all(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") + np.unique(self.arr, return_index=True, + return_inverse=True, return_counts=True) + + class Isin(Benchmark): """Benchmarks for `numpy.isin`.""" diff --git a/numpy/_core/src/common/raii_utils.hpp b/numpy/_core/src/common/raii_utils.hpp index e92d0eae9269..1049e97387f0 100644 --- a/numpy/_core/src/common/raii_utils.hpp +++ b/numpy/_core/src/common/raii_utils.hpp @@ -126,9 +126,11 @@ class SaveThreadState // // Instead of // +// Py_INCREF(descr); // npy_string_allocator *allocator = NpyString_acquire_allocator(descr); // [code that uses allocator] // NpyString_release_allocator(allocator); +// Py_DECREF(descr); // // use // @@ -139,16 +141,19 @@ class SaveThreadState // class NpyStringAcquireAllocator { + PyArray_StringDTypeObject *_descr; npy_string_allocator *_allocator; public: - NpyStringAcquireAllocator(PyArray_StringDTypeObject *descr) { - _allocator = NpyString_acquire_allocator(descr); + NpyStringAcquireAllocator(PyArray_StringDTypeObject *descr) : _descr(descr) { + Py_INCREF(_descr); + _allocator = NpyString_acquire_allocator(_descr); } ~NpyStringAcquireAllocator() { NpyString_release_allocator(_allocator); + Py_DECREF(_descr); } NpyStringAcquireAllocator(const NpyStringAcquireAllocator&) = delete; diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index 77083c04c519..8fc5b580961e 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -1,13 +1,13 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE -#define HASH_TABLE_INITIAL_BUCKETS 1024 #include #include #include #include #include +#include #include #include #include @@ -15,6 +15,7 @@ #include #include "numpy/arrayobject.h" #include "gil_utils.h" +#include "raii_utils.hpp" extern "C" { #include "fnv.h" #include "npy_argparse.h" @@ -22,18 +23,42 @@ extern "C" { #include "numpy/halffloat.h" } -// This is to use RAII pattern to handle cpp exceptions while avoiding memory leaks. -// Adapted from https://stackoverflow.com/a/25510879/2536294 -template -struct FinalAction { - FinalAction(F f) : clean_{f} {} - ~FinalAction() { clean_(); } - private: - F clean_; -}; -template -FinalAction finally(F f) { - return FinalAction(f); +// HASH_TABLE_INITIAL_BUCKETS is the reserve hashset capacity used in the +// std::unordered_set instances in the various unique_* functions. +// We use min(input_size, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket +// count: +// - Reserving for all elements (isize) may over-allocate when there are few +// unique values. +// - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps +// memory usage reasonable (4 KiB for pointers). +// See https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 +const npy_intp HASH_TABLE_INITIAL_BUCKETS = 1024; + +// +// Create a 1-d array with the given length that has the same +// dtype as the input `arr`. +// +static inline PyArrayObject * +empty_array_like(PyArrayObject *arr, npy_intp length) +{ + PyArray_Descr *descr = PyArray_DESCR(arr); + Py_INCREF(descr); + + // Create the output array. + PyArrayObject *res_obj = + reinterpret_cast( + PyArray_NewFromDescr( + &PyArray_Type, + descr, + 1, // ndim + &length, // shape + NULL, // strides + NULL, // data + NPY_ARRAY_WRITEABLE, // flags + NULL // obj + ) + ); + return res_obj; } template @@ -183,19 +208,10 @@ static PyObject* unique_numeric(PyArrayObject *self, npy_bool equal_nan) { /* - * Returns a new NumPy array containing the unique values of the input array of numeric (integer or complex). - * This function uses hashing to identify uniqueness efficiently. - */ - NPY_ALLOW_C_API_DEF; - NPY_ALLOW_C_API; - PyArray_Descr *descr = PyArray_DESCR(self); - Py_INCREF(descr); - NPY_DISABLE_C_API; - - PyThreadState *_save1 = PyEval_SaveThread(); - - // number of elements in the input array - npy_intp isize = PyArray_SIZE(self); + * Returns a new NumPy array containing the unique values of the input + * array of numeric (integer or complex). + * This function uses hashing to identify uniqueness efficiently. + */ auto hash = [equal_nan](const T *value) -> size_t { return hash_func(value, equal_nan); @@ -204,55 +220,38 @@ unique_numeric(PyArrayObject *self, npy_bool equal_nan) return equal_func(lhs, rhs, equal_nan); }; - // Reserve hashset capacity in advance to minimize reallocations and collisions. - // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: - // - Reserving for all elements (isize) may over-allocate when there are few unique values. - // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). - // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 - std::unordered_set hashset( - std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal - ); - - // Input array is one-dimensional, enabling efficient iteration using strides. - char *idata = PyArray_BYTES(self); - npy_intp istride = PyArray_STRIDES(self)[0]; - for (npy_intp i = 0; i < isize; i++, idata += istride) { - hashset.insert((T *)idata); - } + using set_type = std::unordered_set; + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); - npy_intp length = hashset.size(); - - PyEval_RestoreThread(_save1); - NPY_ALLOW_C_API; - PyObject *res_obj = PyArray_NewFromDescr( - &PyArray_Type, - descr, - 1, // ndim - &length, // shape - NULL, // strides - NULL, // data - // This flag is needed to be able to call .sort on it. - NPY_ARRAY_WRITEABLE, // flags - NULL // obj - ); + { + np::raii::SaveThreadState save_thread_state{}; + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert(reinterpret_cast(idata)); + } + } + + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); if (res_obj == NULL) { return NULL; } - NPY_DISABLE_C_API; - PyThreadState *_save2 = PyEval_SaveThread(); - auto save2_dealloc = finally([&]() { - PyEval_RestoreThread(_save2); - }); - - char *odata = PyArray_BYTES((PyArrayObject *)res_obj); - npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; - // Output array is one-dimensional, enabling efficient iteration using strides. - for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { - copy_func(odata, *it); + + { + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + copy_func(odata, *it); + } } - return res_obj; + return reinterpret_cast(res_obj); } template @@ -260,23 +259,16 @@ static PyObject* unique_string(PyArrayObject *self, npy_bool equal_nan) { /* - * Returns a new NumPy array containing the unique values of the input array of fixed size strings. - * This function uses hashing to identify uniqueness efficiently. - */ - NPY_ALLOW_C_API_DEF; - NPY_ALLOW_C_API; - PyArray_Descr *descr = PyArray_DESCR(self); - Py_INCREF(descr); - NPY_DISABLE_C_API; - - PyThreadState *_save1 = PyEval_SaveThread(); - - // number of elements in the input array - npy_intp isize = PyArray_SIZE(self); + * Returns a new NumPy array containing the unique values of the input + * array of fixed size strings. + * This function uses hashing to identify uniqueness efficiently. + */ + PyArray_Descr *descr = PyArray_DESCR(self); // variables for the string npy_intp itemsize = descr->elsize; npy_intp num_chars = itemsize / sizeof(T); + auto hash = [num_chars](const T *value) -> size_t { return npy_fnv1a(value, num_chars * sizeof(T)); }; @@ -284,77 +276,48 @@ unique_string(PyArrayObject *self, npy_bool equal_nan) return std::memcmp(lhs, rhs, itemsize) == 0; }; - // Reserve hashset capacity in advance to minimize reallocations and collisions. - // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: - // - Reserving for all elements (isize) may over-allocate when there are few unique values. - // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). - // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 - std::unordered_set hashset( - std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal - ); - - // Input array is one-dimensional, enabling efficient iteration using strides. - char *idata = PyArray_BYTES(self); - npy_intp istride = PyArray_STRIDES(self)[0]; - for (npy_intp i = 0; i < isize; i++, idata += istride) { - hashset.insert((T *)idata); - } + using set_type = std::unordered_set; + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); - npy_intp length = hashset.size(); - - PyEval_RestoreThread(_save1); - NPY_ALLOW_C_API; - PyObject *res_obj = PyArray_NewFromDescr( - &PyArray_Type, - descr, - 1, // ndim - &length, // shape - NULL, // strides - NULL, // data - // This flag is needed to be able to call .sort on it. - NPY_ARRAY_WRITEABLE, // flags - NULL // obj - ); + { + np::raii::SaveThreadState save_thread_state{}; + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert(reinterpret_cast(idata)); + } + } + + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); if (res_obj == NULL) { return NULL; } - NPY_DISABLE_C_API; - PyThreadState *_save2 = PyEval_SaveThread(); - auto save2_dealloc = finally([&]() { - PyEval_RestoreThread(_save2); - }); - - char *odata = PyArray_BYTES((PyArrayObject *)res_obj); - npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; - // Output array is one-dimensional, enabling efficient iteration using strides. - for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { - std::memcpy(odata, *it, itemsize); + + { + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + std::memcpy(odata, *it, itemsize); + } } - return res_obj; + return reinterpret_cast(res_obj); } static PyObject* unique_vstring(PyArrayObject *self, npy_bool equal_nan) { /* - * Returns a new NumPy array containing the unique values of the input array. - * This function uses hashing to identify uniqueness efficiently. - */ - NPY_ALLOW_C_API_DEF; - NPY_ALLOW_C_API; - PyArray_Descr *descr = PyArray_DESCR(self); - Py_INCREF(descr); - NPY_DISABLE_C_API; - - PyThreadState *_save1 = PyEval_SaveThread(); - - // number of elements in the input array - npy_intp isize = PyArray_SIZE(self); + * Returns a new NumPy array containing the unique values of the input array. + * This function uses hashing to identify uniqueness efficiently. + */ - // variables for the vstring - npy_string_allocator *in_allocator = NpyString_acquire_allocator((PyArray_StringDTypeObject *)descr); auto hash = [equal_nan](const npy_static_string *value) -> size_t { if (value->buf == NULL) { if (equal_nan) { @@ -382,83 +345,70 @@ unique_vstring(PyArrayObject *self, npy_bool equal_nan) return std::memcmp(lhs->buf, rhs->buf, lhs->size) == 0; }; - // Reserve hashset capacity in advance to minimize reallocations and collisions. - // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: - // - Reserving for all elements (isize) may over-allocate when there are few unique values. - // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). - // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 - std::unordered_set hashset( - std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal - ); - - // Input array is one-dimensional, enabling efficient iteration using strides. - char *idata = PyArray_BYTES(self); - npy_intp istride = PyArray_STRIDES(self)[0]; - // unpacked_strings need to be allocated outside of the loop because of the lifetime problem. + npy_intp isize = PyArray_SIZE(self); + // unpacked_strings must live as long as hashset because hashset points + // to values in this vector. std::vector unpacked_strings(isize, {0, NULL}); - for (npy_intp i = 0; i < isize; i++, idata += istride) { - npy_packed_static_string *packed_string = (npy_packed_static_string *)idata; - int is_null = NpyString_load(in_allocator, packed_string, &unpacked_strings[i]); - if (is_null == -1) { - npy_gil_error(PyExc_RuntimeError, - "Failed to load string from packed static string. "); - return NULL; + + using set_type = std::unordered_set; + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); + + { + PyArray_StringDTypeObject *descr = + reinterpret_cast(PyArray_DESCR(self)); + np::raii::NpyStringAcquireAllocator alloc(descr); + np::raii::SaveThreadState save_thread_state{}; + + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + + for (npy_intp i = 0; i < isize; i++, idata += istride) { + npy_packed_static_string *packed_string = + reinterpret_cast(idata); + int is_null = NpyString_load(alloc.allocator(), packed_string, + &unpacked_strings[i]); + if (is_null == -1) { + // Unexpected error. Throw a C++ exception that will be caught + // by the caller of unique_vstring() and converted into a Python + // RuntimeError. + throw std::runtime_error("Failed to load string from packed " + "static string."); + } + hashset.insert(&unpacked_strings[i]); } - hashset.insert(&unpacked_strings[i]); } - NpyString_release_allocator(in_allocator); - - npy_intp length = hashset.size(); - - PyEval_RestoreThread(_save1); - NPY_ALLOW_C_API; - PyObject *res_obj = PyArray_NewFromDescr( - &PyArray_Type, - descr, - 1, // ndim - &length, // shape - NULL, // strides - NULL, // data - // This flag is needed to be able to call .sort on it. - NPY_ARRAY_WRITEABLE, // flags - NULL // obj - ); + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); if (res_obj == NULL) { return NULL; } - PyArray_Descr *res_descr = PyArray_DESCR((PyArrayObject *)res_obj); - Py_INCREF(res_descr); - NPY_DISABLE_C_API; - - PyThreadState *_save2 = PyEval_SaveThread(); - auto save2_dealloc = finally([&]() { - PyEval_RestoreThread(_save2); - }); - - npy_string_allocator *out_allocator = NpyString_acquire_allocator((PyArray_StringDTypeObject *)res_descr); - auto out_allocator_dealloc = finally([&]() { - NpyString_release_allocator(out_allocator); - }); - - char *odata = PyArray_BYTES((PyArrayObject *)res_obj); - npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; - // Output array is one-dimensional, enabling efficient iteration using strides. - for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { - npy_packed_static_string *packed_string = (npy_packed_static_string *)odata; - int pack_status = 0; - if ((*it)->buf == NULL) { - pack_status = NpyString_pack_null(out_allocator, packed_string); - } else { - pack_status = NpyString_pack(out_allocator, packed_string, (*it)->buf, (*it)->size); - } - if (pack_status == -1) { - // string packing failed - return NULL; + + { + PyArray_StringDTypeObject *res_descr = + reinterpret_cast(PyArray_DESCR(res_obj)); + np::raii::NpyStringAcquireAllocator alloc(res_descr); + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + npy_packed_static_string *packed_string = + reinterpret_cast(odata); + int pack_status = 0; + if ((*it)->buf == NULL) { + pack_status = NpyString_pack_null(alloc.allocator(), packed_string); + } else { + pack_status = NpyString_pack(alloc.allocator(), packed_string, + (*it)->buf, (*it)->size); + } + if (pack_status == -1) { + // string packing failed + return NULL; + } } } - - return res_obj; + return reinterpret_cast(res_obj); } @@ -549,24 +499,27 @@ array__unique_hash(PyObject *NPY_UNUSED(module), NULL, NULL, NULL ) < 0 ) { + Py_XDECREF(arr); return NULL; } + PyObject *result = NULL; try { auto type = PyArray_TYPE(arr); // we only support data types present in our unique_funcs map if (unique_funcs.find(type) == unique_funcs.end()) { Py_RETURN_NOTIMPLEMENTED; } - - return unique_funcs[type](arr, equal_nan); + result = unique_funcs[type](arr, equal_nan); } catch (const std::bad_alloc &e) { PyErr_NoMemory(); - return NULL; + result = NULL; } catch (const std::exception &e) { PyErr_SetString(PyExc_RuntimeError, e.what()); - return NULL; + result = NULL; } + Py_DECREF(arr); + return result; } From 7eec5e2977871dfbfd363a828d5f11131b46d953 Mon Sep 17 00:00:00 2001 From: Arthur Lacote Date: Mon, 8 Dec 2025 09:55:27 +0100 Subject: [PATCH 0976/1018] BUG: fix reduction issue in weighted quantile (#30070) * TST: Test weighted quantile with multiple axes reduction * FIX: Fix weighted quantile reduction by handling weights in _ureduce --- numpy/lib/_function_base_impl.py | 20 ++++++++++++++------ numpy/lib/tests/test_function_base.py | 11 +++++++++++ 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 87a834850fda..e407598d62c6 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -3857,13 +3857,21 @@ def _ureduce(a, func, keepdims=False, **kwargs): if len(axis) == 1: kwargs['axis'] = axis[0] else: - keep = set(range(nd)) - set(axis) + keep = sorted(set(range(nd)) - set(axis)) nkeep = len(keep) - # swap axis that should not be reduced to front - for i, s in enumerate(sorted(keep)): - a = a.swapaxes(i, s) - # merge reduced axis - a = a.reshape(a.shape[:nkeep] + (-1,)) + + def reshape_arr(a): + # move axis that should not be reduced to front + a = np.moveaxis(a, keep, range(nkeep)) + # merge reduced axis + return a.reshape(a.shape[:nkeep] + (-1,)) + + a = reshape_arr(a) + + weights = kwargs.get("weights") + if weights is not None: + kwargs["weights"] = reshape_arr(weights) + kwargs['axis'] = -1 elif keepdims and out is not None: index_out = (0, ) * nd diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 5b8b0adbdd0d..412f06d07e20 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -4228,6 +4228,17 @@ def test_quantile_with_weights_and_axis(self, method): ) assert_allclose(q, q_res) + # axis is a tuple of all axes + q = np.quantile(y, alpha, weights=w, method=method, axis=(0, 1, 2)) + q_res = np.quantile(y, alpha, weights=w, method=method, axis=None) + assert_allclose(q, q_res) + + q = np.quantile(y, alpha, weights=w, method=method, axis=(1, 2)) + q_res = np.zeros(shape=(2,)) + for i in range(2): + q_res[i] = np.quantile(y[i], alpha, weights=w[i], method=method) + assert_allclose(q, q_res) + @pytest.mark.parametrize("method", methods_supporting_weights) def test_quantile_weights_min_max(self, method): # Test weighted quantile at 0 and 1 with leading and trailing zero From 13f849dc9a49104c7fc8c6cd521323f3990b3b95 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 8 Dec 2025 18:31:23 +0100 Subject: [PATCH 0977/1018] TYP: ``random.Generator``: many fixes and improvements (#30391) --- numpy/random/_generator.pyi | 1364 ++++++++++----------- numpy/typing/tests/data/reveal/random.pyi | 415 ++----- 2 files changed, 751 insertions(+), 1028 deletions(-) diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 9b0e5c331b57..f3f911b98ce7 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,255 +1,516 @@ from collections.abc import Callable, MutableSequence -from typing import Any, Literal, overload +from typing import Any, Literal, Self, overload import numpy as np -from numpy import float32, float64, int64 from numpy._typing import ( ArrayLike, DTypeLike, NDArray, + _ArrayLike, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _BoolCodes, _DoubleCodes, _DTypeLike, - _DTypeLikeBool, _Float32Codes, _Float64Codes, _FloatLike_co, - _Int8Codes, - _Int16Codes, - _Int32Codes, _Int64Codes, - _IntPCodes, + _NestedSequence, _ShapeLike, _SingleCodes, - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _UIntPCodes, ) -from numpy.random import BitGenerator, RandomState, SeedSequence -type _DTypeLikeFloat32 = _DTypeLike[np.float32] | _Float32Codes | _SingleCodes -type _DTypeLikeFloat64 = type[float] | _DTypeLike[np.float64] | _Float64Codes | _DoubleCodes +from .bit_generator import BitGenerator, SeedSequence +from .mtrand import RandomState + +type _ArrayF32 = NDArray[np.float32] +type _ArrayF64 = NDArray[np.float64] + +type _DTypeLikeI64 = _DTypeLike[np.int64] | _Int64Codes +type _DTypeLikeF32 = _DTypeLike[np.float32] | _Float32Codes | _SingleCodes +type _DTypeLikeF64 = type[float] | _DTypeLike[np.float64] | _Float64Codes | _DoubleCodes +# we use `str` to avoid type-checker performance issues because of the many `Literal` variants +type _DTypeLikeFloat = type[float] | _DTypeLike[np.float32 | np.float64] | str + +# Similar to `_ArrayLike{}_co`, but rejects scalars +type _NDArrayLikeInt = NDArray[np.generic[int]] | _NestedSequence[int] +type _NDArrayLikeFloat = NDArray[np.generic[float]] | _NestedSequence[float] + +type _MethodExp = Literal["zig", "inv"] ### class Generator: def __init__(self, bit_generator: BitGenerator) -> None: ... - def __repr__(self) -> str: ... - def __str__(self) -> str: ... - def __getstate__(self) -> None: ... def __setstate__(self, state: dict[str, Any] | None) -> None: ... def __reduce__(self) -> tuple[Callable[[BitGenerator], Generator], tuple[BitGenerator], None]: ... + + # @property def bit_generator(self) -> BitGenerator: ... - def spawn(self, n_children: int) -> list[Generator]: ... + def spawn(self, n_children: int) -> list[Self]: ... def bytes(self, length: int) -> bytes: ... + + # continuous distributions + + # @overload - def standard_normal( - self, - size: None = None, - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = None, - ) -> float: ... - @overload - def standard_normal( - self, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_normal( - self, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_normal( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = None, - ) -> NDArray[float32]: ... - @overload - def standard_normal( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def permutation(self, x: int, axis: int = 0) -> NDArray[int64]: ... - @overload - def permutation(self, x: ArrayLike, axis: int = 0) -> NDArray[Any]: ... + def standard_cauchy(self, size: None = None) -> float: ... @overload + def standard_cauchy(self, size: _ShapeLike) -> _ArrayF64: ... + + # + @overload # size=None (default); NOTE: dtype is ignored + def random(self, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None) -> float: ... + @overload # size=, dtype=f64 (default) + def random(self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None) -> _ArrayF64: ... + @overload # size=, dtype=f32 + def random(self, size: _ShapeLike, dtype: _DTypeLikeF32, out: None = None) -> _ArrayF32: ... + @overload # out: f64 array (keyword) + def random[ArrayT: _ArrayF64](self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f64 array + def random[ArrayT: _ArrayF32](self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + @overload # out: f64 array (positional) + def random[ArrayT: _ArrayF64](self, size: _ShapeLike | None, dtype: _DTypeLikeF64, out: ArrayT) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array + def random[ArrayT: _ArrayF32](self, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + + # + @overload # size=None (default); NOTE: dtype is ignored + def standard_normal(self, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None) -> float: ... + @overload # size=, dtype: f64 (default) + def standard_normal(self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None) -> _ArrayF64: ... + @overload # size=, dtype: f32 + def standard_normal(self, size: _ShapeLike, dtype: _DTypeLikeF32, *, out: None = None) -> _ArrayF32: ... + @overload # dtype: f64 (default), out: f64 array (keyword) + def standard_normal[ArrayT: _ArrayF64]( + self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f32 array + def standard_normal[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array + def standard_normal[ArrayT: _ArrayF32](self, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + + # + @overload # size=None (default); NOTE: dtype is ignored def standard_exponential( - self, - size: None = None, - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = "zig", - out: None = None, + self, size: None = None, dtype: _DTypeLikeFloat = ..., method: _MethodExp = "zig", out: None = None ) -> float: ... - @overload - def standard_exponential( - self, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_exponential( - self, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_exponential( - self, - size: _ShapeLike | None = None, - *, - method: Literal["zig", "inv"] = "zig", - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload + @overload # size=, dtype: f64 (default) def standard_exponential( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat32 = ..., - method: Literal["zig", "inv"] = "zig", - out: NDArray[float32] | None = None, - ) -> NDArray[float32]: ... - @overload + self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., method: _MethodExp = "zig", out: None = None + ) -> _ArrayF64: ... + @overload # size=, dtype: f32 (default) def standard_exponential( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = "zig", - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def random( - self, - size: None = None, - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = None, - ) -> float: ... - @overload - def random( - self, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def random( - self, - size: _ShapeLike | None = None, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def random( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = None, - ) -> NDArray[float32]: ... - @overload - def random( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def beta( - self, - a: _FloatLike_co, - b: _FloatLike_co, - size: None = None, + self, size: _ShapeLike, dtype: _DTypeLikeF32, method: _MethodExp = "zig", out: None = None + ) -> _ArrayF32: ... + @overload # dtype: f64 (default), out: f64 array (keyword) + def standard_exponential[ArrayT: _ArrayF64]( + self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., method: _MethodExp = "zig", *, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f32 array + def standard_exponential[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, method: _MethodExp = "zig", out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array (keyword) + def standard_exponential[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None, dtype: _DTypeLikeF32, method: _MethodExp = "zig", *, out: ArrayT + ) -> ArrayT: ... + + # + @overload # 0d, size=None (default); NOTE: dtype is ignored + def standard_gamma( + self, shape: _FloatLike_co, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None ) -> float: ... - @overload - def beta( - self, - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def exponential(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... - @overload - def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ... + @overload # >0d, dtype: f64 (default) + def standard_gamma( + self, shape: _NDArrayLikeFloat, size: None = None, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64: ... + @overload # >0d, dtype: f32 (keyword) + def standard_gamma( + self, shape: _NDArrayLikeFloat, size: None = None, *, dtype: _DTypeLikeF32, out: None = None + ) -> _ArrayF32: ... + @overload # >=0d, dtype: f64 (default) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: None = None, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64 | Any: ... + @overload # >=0d, dtype: f32 (keyword) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: None = None, *, dtype: _DTypeLikeF32, out: None = None + ) -> _ArrayF32 | Any: ... + @overload # >=0d, size=, dtype: f64 (default) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64: ... + @overload # >=0d, size=, dtype: f32 + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike, dtype: _DTypeLikeF32, *, out: None = None + ) -> _ArrayF32: ... + @overload # >=0d, dtype: f64 (default), out: f64 array (keyword) + def standard_gamma[ArrayT: _ArrayF64]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT + ) -> ArrayT: ... + @overload # >=0d, dtype: f32 (keyword), out: f32 array + def standard_gamma[ArrayT: _ArrayF32]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... + @overload # >=0d, dtype: f32 (positional), out: f32 array + def standard_gamma[ArrayT: _ArrayF32]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... # - @overload + @overload # 0d + def power(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def power(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def power(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def power(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def pareto(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def pareto(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def pareto(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def pareto(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def weibull(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def weibull(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def weibull(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def weibull(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def standard_t(self, /, df: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def standard_t(self, /, df: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def standard_t(self, /, df: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def standard_t(self, /, df: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def chisquare(self, /, df: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def chisquare(self, /, df: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def chisquare(self, /, df: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def chisquare(self, /, df: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default) + def exponential(self, /, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (keyword) + def exponential(self, /, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (positional) + def exponential(self, /, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def exponential(self, /, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def exponential(self, /, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default) + def rayleigh(self, /, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (keyword) + def rayleigh(self, /, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (positional) + def rayleigh(self, /, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def rayleigh(self, /, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def rayleigh(self, /, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def noncentral_chisquare(self, /, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def noncentral_chisquare(self, /, df: _NDArrayLikeFloat, nonc: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def f(self, /, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def f(self, /, dfnum: _NDArrayLikeFloat, dfden: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def vonmises(self, /, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def vonmises(self, /, mu: _NDArrayLikeFloat, kappa: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def wald(self, /, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def wald(self, /, mean: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def beta(self, /, a: _FloatLike_co, b: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def beta(self, /, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def beta(self, /, a: _ArrayLikeFloat_co, b: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def beta(self, /, a: _NDArrayLikeFloat, b: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def beta(self, /, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d (default) + def gamma(self, /, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (keyword) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def gamma(self, /, shape: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def uniform(self, /, low: _FloatLike_co = 0.0, high: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # >=0d, >=0d, size= (positional) + def uniform(self, /, low: _ArrayLikeFloat_co, high: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def uniform(self, /, low: _ArrayLikeFloat_co, high: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d, size= (keyword) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, high: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, *, high: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def uniform(self, /, low: _NDArrayLikeFloat, high: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, high: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def normal(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def normal(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def normal(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def normal(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def gumbel(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def gumbel(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def gumbel(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def gumbel(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def logistic(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def logistic(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def logistic(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def logistic(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def logistic(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def logistic(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def logistic( + self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def laplace(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def laplace(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def laplace(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def laplace(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def laplace(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def laplace(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def laplace( + self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def lognormal(self, /, mean: _FloatLike_co = 0.0, sigma: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def lognormal(self, /, mean: _ArrayLikeFloat_co, sigma: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def lognormal(self, /, mean: _ArrayLikeFloat_co, sigma: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # size= (keyword) + def lognormal(self, /, mean: _ArrayLikeFloat_co = 0.0, sigma: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def lognormal(self, /, mean: _ArrayLikeFloat_co = 0.0, *, sigma: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def lognormal(self, /, mean: _NDArrayLikeFloat, sigma: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def lognormal( + self, /, mean: _ArrayLikeFloat_co = 0.0, sigma: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d, 0d + def triangular(self, /, left: _FloatLike_co, mode: _FloatLike_co, right: _FloatLike_co, size: None = None) -> float: ... + @overload # >=0d, >=0d, >=0d, size= + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: _ShapeLike + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >0d + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _NDArrayLikeFloat, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >0d, >=0d + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _NDArrayLikeFloat, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >0d, >=0d, >=0d + def triangular( + self, /, left: _NDArrayLikeFloat, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >=0d (fallback) + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d, 0d + def noncentral_f(self, /, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = None) -> float: ... + @overload # >=0d, >=0d, >=0d, size= + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: _ShapeLike + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >0d + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _NDArrayLikeFloat, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >0d, >=0d + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _NDArrayLikeFloat, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >0d, >=0d, >=0d + def noncentral_f( + self, /, dfnum: _NDArrayLikeFloat, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >=0d (fallback) + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64 | Any: ... + + ### + # discrete + + # + @overload # 0d bool | int + def integers[AnyIntT: (bool, int)]( + self, low: int, high: int | None = None, size: None = None, *, dtype: type[AnyIntT], endpoint: bool = False + ) -> AnyIntT: ... + @overload # 0d integer dtype + def integers[ScalarT: np.integer | np.bool]( + self, low: int, high: int | None = None, size: None = None, *, dtype: _DTypeLike[ScalarT], endpoint: bool = False + ) -> ScalarT: ... + @overload # 0d int64 (default) def integers( - self, - low: int, - high: int | None = None, - size: None = None, - dtype: _DTypeLike[np.int64] | _Int64Codes = ..., - endpoint: bool = False, + self, low: int, high: int | None = None, size: None = None, dtype: _DTypeLikeI64 = ..., endpoint: bool = False ) -> np.int64: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: type[bool], - endpoint: bool = False, - ) -> bool: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: type[int], - endpoint: bool = False, - ) -> int: ... - @overload + @overload # 0d unknown def integers( + self, low: int, high: int | None = None, size: None = None, dtype: DTypeLike | None = ..., endpoint: bool = False + ) -> Any: ... + @overload # integer dtype, size= + def integers[ScalarT: np.integer | np.bool]( self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _DTypeLike[np.bool] | _BoolCodes, - endpoint: bool = False, - ) -> np.bool: ... - @overload - def integers[ScalarT: np.integer]( - self, - low: int, - high: int | None = None, - size: None = None, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, *, + size: _ShapeLike, dtype: _DTypeLike[ScalarT], endpoint: bool = False, - ) -> ScalarT: ... - @overload + ) -> NDArray[ScalarT]: ... + @overload # int64 (default), size= def integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - dtype: _DTypeLike[np.int64] | _Int64Codes = ..., + *, + size: _ShapeLike, + dtype: _DTypeLikeI64 = ..., endpoint: bool = False, ) -> NDArray[np.int64]: ... - @overload + @overload # unknown, size= def integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, *, - dtype: _DTypeLikeBool, + size: _ShapeLike, + dtype: DTypeLike | None = ..., endpoint: bool = False, - ) -> NDArray[np.bool]: ... - @overload - def integers[ScalarT: np.integer]( + ) -> np.ndarray: ... + @overload # >=0d, integer dtype + def integers[ScalarT: np.integer | np.bool]( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -257,231 +518,202 @@ class Generator: *, dtype: _DTypeLike[ScalarT], endpoint: bool = False, - ) -> NDArray[ScalarT]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _Int8Codes, - endpoint: bool = False, - ) -> np.int8: ... - @overload + ) -> NDArray[ScalarT] | Any: ... + @overload # >=0d, int64 (default) def integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - *, - dtype: _Int8Codes, - endpoint: bool = False, - ) -> NDArray[np.int8]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt8Codes, + dtype: _DTypeLikeI64 = ..., endpoint: bool = False, - ) -> np.uint8: ... - @overload + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, unknown def integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - *, - dtype: _UInt8Codes, + dtype: DTypeLike | None = ..., endpoint: bool = False, - ) -> NDArray[np.uint8]: ... - @overload - def integers( + ) -> np.ndarray | Any: ... + + # + @overload # 0d + def zipf(self, /, a: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def zipf(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def zipf(self, /, a: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def zipf(self, /, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d + def geometric(self, /, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def geometric(self, /, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def geometric(self, /, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def geometric(self, /, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d + def logseries(self, /, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def logseries(self, /, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def logseries(self, /, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def logseries(self, /, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d (default) + def poisson(self, /, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... + @overload # size= (keyword) + def poisson(self, /, lam: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # size= (positional) + def poisson(self, /, lam: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def poisson(self, /, lam: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def poisson(self, /, lam: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d + def binomial(self, /, n: int, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def binomial(self, /, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >=0d, >0d + def binomial(self, /, n: _ArrayLikeInt_co, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >0d, >=0d + def binomial(self, /, n: _NDArrayLikeInt, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d + def binomial(self, /, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d + def negative_binomial(self, /, n: _FloatLike_co, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def negative_binomial(self, /, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >=0d, >0d + def negative_binomial(self, /, n: _ArrayLikeFloat_co, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >0d, >=0d + def negative_binomial(self, /, n: _NDArrayLikeFloat, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d + def negative_binomial( + self, /, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d, 0d + def hypergeometric(self, /, ngood: int, nbad: int, nsample: int, size: None = None) -> int: ... + @overload # size= + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: _ShapeLike + ) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d, >0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _NDArrayLikeInt, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, >0d, >=0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _NDArrayLikeInt, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >0d, >=0d, >=0d + def hypergeometric( + self, /, ngood: _NDArrayLikeInt, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, >=0d, >=0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + + ### + # multivariate + + # + def dirichlet(self, /, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> _ArrayF64: ... + + # + def multivariate_normal( self, - low: int, - high: int | None = None, - size: None = None, + /, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, *, - dtype: _Int16Codes, - endpoint: bool = False, - ) -> np.int16: ... - @overload - def integers( + method: Literal["svd", "eigh", "cholesky"] = "svd", + ) -> _ArrayF64: ... + + # + def multinomial( + self, /, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[np.int64]: ... + + # + def multivariate_hypergeometric( self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, + /, + colors: _ArrayLikeInt_co, + nsample: int, size: _ShapeLike | None = None, - *, - dtype: _Int16Codes, - endpoint: bool = False, - ) -> NDArray[np.int16]: ... + method: Literal["marginals", "count"] = "marginals", + ) -> NDArray[np.int64]: ... + + ### + # resampling + + # axis must be 0 for MutableSequence @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt16Codes, - endpoint: bool = False, - ) -> np.uint16: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UInt16Codes, - endpoint: bool = False, - ) -> NDArray[np.uint16]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _Int32Codes, - endpoint: bool = False, - ) -> np.int32: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _Int32Codes, - endpoint: bool = False, - ) -> NDArray[np.int32]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt32Codes, - endpoint: bool = False, - ) -> np.uint32: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UInt32Codes, - endpoint: bool = False, - ) -> NDArray[np.uint32]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt64Codes, - endpoint: bool = False, - ) -> np.uint64: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UInt64Codes, - endpoint: bool = False, - ) -> NDArray[np.uint64]: ... + def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ... @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _IntPCodes, - endpoint: bool = False, - ) -> np.intp: ... + def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ... + + # @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _IntPCodes, - endpoint: bool = False, - ) -> NDArray[np.intp]: ... + def permutation(self, /, x: int, axis: int = 0) -> NDArray[np.int64]: ... @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UIntPCodes, - endpoint: bool = False, - ) -> np.uintp: ... + def permutation(self, /, x: ArrayLike, axis: int = 0) -> np.ndarray: ... + + # @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UIntPCodes, - endpoint: bool = False, - ) -> NDArray[np.uintp]: ... + def permuted[ArrayT: np.ndarray](self, /, x: ArrayT, *, axis: int | None = None, out: None = None) -> ArrayT: ... @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - dtype: DTypeLike | None = ..., - endpoint: bool = False, - ) -> Any: ... + def permuted(self, /, x: ArrayLike, *, axis: int | None = None, out: None = None) -> np.ndarray: ... @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - dtype: DTypeLike | None = ..., - endpoint: bool = False, - ) -> NDArray[Any]: ... + def permuted[ArrayT: np.ndarray](self, /, x: ArrayLike, *, axis: int | None = None, out: ArrayT) -> ArrayT: ... - # TODO: Use a TypeVar _T here to get away from Any output? - # Should be int->NDArray[int64], ArrayLike[_T] -> _T | NDArray[Any] - @overload + # + @overload # >=0d int, size=None (default) def choice( self, - a: int, + /, + a: int | _NestedSequence[int], size: None = None, replace: bool = True, p: _ArrayLikeFloat_co | None = None, axis: int = 0, shuffle: bool = True, ) -> int: ... - @overload - def choice( + @overload # >=0d known, size=None (default) + def choice[ScalarT: np.generic]( self, - a: int, - size: _ShapeLike | None = None, + /, + a: _ArrayLike[ScalarT], + size: None = None, replace: bool = True, p: _ArrayLikeFloat_co | None = None, axis: int = 0, shuffle: bool = True, - ) -> NDArray[int64]: ... - @overload + ) -> ScalarT: ... + @overload # >=0d unknown, size=None (default) def choice( self, + /, a: ArrayLike, size: None = None, replace: bool = True, @@ -489,356 +721,38 @@ class Generator: axis: int = 0, shuffle: bool = True, ) -> Any: ... - @overload + @overload # >=0d int, size= def choice( self, - a: ArrayLike, - size: _ShapeLike | None = None, + /, + a: int | _NestedSequence[int], + size: _ShapeLike, replace: bool = True, p: _ArrayLikeFloat_co | None = None, axis: int = 0, shuffle: bool = True, - ) -> NDArray[Any]: ... - @overload - def uniform( - self, - low: _FloatLike_co = 0.0, - high: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... - @overload - def uniform( - self, - low: _ArrayLikeFloat_co = 0.0, - high: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def normal( - self, - loc: _FloatLike_co = 0.0, - scale: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... - @overload - def normal( - self, - loc: _ArrayLikeFloat_co = 0.0, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_gamma( - self, - shape: _FloatLike_co, - size: None = None, - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = None, - ) -> float: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = None, - ) -> NDArray[float32]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def gamma( - self, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None - ) -> float: ... - @overload - def gamma( - self, - shape: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def f( - self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None - ) -> float: ... - @overload - def f( - self, - dfnum: _ArrayLikeFloat_co, - dfden: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def noncentral_f( - self, - dfnum: _FloatLike_co, - dfden: _FloatLike_co, - nonc: _FloatLike_co, - size: None = None, - ) -> float: ... - @overload - def noncentral_f( - self, - dfnum: _ArrayLikeFloat_co, - dfden: _ArrayLikeFloat_co, - nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def chisquare(self, df: _FloatLike_co, size: None = None) -> float: ... - @overload - def chisquare( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def noncentral_chisquare( - self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None - ) -> float: ... - @overload - def noncentral_chisquare( - self, - df: _ArrayLikeFloat_co, - nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def standard_t(self, df: _FloatLike_co, size: None = None) -> float: ... - @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: None = None - ) -> NDArray[float64]: ... - @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def vonmises( - self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None - ) -> float: ... - @overload - def vonmises( - self, - mu: _ArrayLikeFloat_co, - kappa: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def pareto(self, a: _FloatLike_co, size: None = None) -> float: ... - @overload - def pareto( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def weibull(self, a: _FloatLike_co, size: None = None) -> float: ... - @overload - def weibull( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def power(self, a: _FloatLike_co, size: None = None) -> float: ... - @overload - def power( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def standard_cauchy(self, size: None = None) -> float: ... - @overload - def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... - @overload - def laplace( - self, - loc: _FloatLike_co = 0.0, - scale: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... - @overload - def laplace( - self, - loc: _ArrayLikeFloat_co = 0.0, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def gumbel( - self, - loc: _FloatLike_co = 0.0, - scale: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... - @overload - def gumbel( - self, - loc: _ArrayLikeFloat_co = 0.0, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def logistic( - self, - loc: _FloatLike_co = 0.0, - scale: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... - @overload - def logistic( - self, - loc: _ArrayLikeFloat_co = 0.0, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def lognormal( - self, - mean: _FloatLike_co = 0.0, - sigma: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... - @overload - def lognormal( - self, - mean: _ArrayLikeFloat_co = 0.0, - sigma: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def rayleigh(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... - @overload - def rayleigh( - self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def wald( - self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None - ) -> float: ... - @overload - def wald( - self, - mean: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def triangular( - self, - left: _FloatLike_co, - mode: _FloatLike_co, - right: _FloatLike_co, - size: None = None, - ) -> float: ... - @overload - def triangular( - self, - left: _ArrayLikeFloat_co, - mode: _ArrayLikeFloat_co, - right: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def binomial(self, n: int, p: _FloatLike_co, size: None = None) -> int: ... - @overload - def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def negative_binomial( - self, n: _FloatLike_co, p: _FloatLike_co, size: None = None - ) -> int: ... - @overload - def negative_binomial( - self, - n: _ArrayLikeFloat_co, - p: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def poisson(self, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... - @overload - def poisson( - self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def zipf(self, a: _FloatLike_co, size: None = None) -> int: ... - @overload - def zipf( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def geometric(self, p: _FloatLike_co, size: None = None) -> int: ... - @overload - def geometric( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def hypergeometric( - self, ngood: int, nbad: int, nsample: int, size: None = None - ) -> int: ... - @overload - def hypergeometric( - self, - ngood: _ArrayLikeInt_co, - nbad: _ArrayLikeInt_co, - nsample: _ArrayLikeInt_co, - size: _ShapeLike | None = None, - ) -> NDArray[int64]: ... - @overload - def logseries(self, p: _FloatLike_co, size: None = None) -> int: ... - @overload - def logseries( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - def multivariate_normal( + ) -> NDArray[np.int64]: ... + @overload # >=0d known, size= + def choice[ScalarT: np.generic]( self, - mean: _ArrayLikeFloat_co, - cov: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - check_valid: Literal["warn", "raise", "ignore"] = "warn", - tol: float = 1e-8, - *, - method: Literal["svd", "eigh", "cholesky"] = "svd", - ) -> NDArray[float64]: ... - def multinomial( - self, n: _ArrayLikeInt_co, - pvals: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - def multivariate_hypergeometric( + /, + a: _ArrayLike[ScalarT], + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> NDArray[ScalarT]: ... + @overload # >=0d unknown, size= + def choice( self, - colors: _ArrayLikeInt_co, - nsample: int, - size: _ShapeLike | None = None, - method: Literal["marginals", "count"] = "marginals", - ) -> NDArray[int64]: ... - def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - def permuted( - self, x: ArrayLike, *, axis: int | None = None, out: NDArray[Any] | None = None - ) -> NDArray[Any]: ... - - # axis must be 0 for MutableSequence - @overload - def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ... - @overload - def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ... + /, + a: ArrayLike, + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> np.ndarray: ... -def default_rng( - seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None -) -> Generator: ... +def default_rng(seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None) -> Generator: ... diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index e188eb02893f..4d00ef0d99aa 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -349,11 +349,11 @@ assert_type(def_gen.gumbel(0.5, 0.5), float) assert_type(def_gen.gumbel(0.5, 0.5, size=None), float) assert_type(def_gen.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -363,11 +363,11 @@ assert_type(def_gen.laplace(0.5, 0.5), float) assert_type(def_gen.laplace(0.5, 0.5, size=None), float) assert_type(def_gen.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -377,11 +377,11 @@ assert_type(def_gen.logistic(0.5, 0.5), float) assert_type(def_gen.logistic(0.5, 0.5, size=None), float) assert_type(def_gen.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -419,11 +419,11 @@ assert_type(def_gen.normal(0.5, 0.5), float) assert_type(def_gen.normal(0.5, 0.5, size=None), float) assert_type(def_gen.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -488,14 +488,14 @@ assert_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), np assert_type(def_gen.hypergeometric(20, 20, 10), int) assert_type(def_gen.hypergeometric(20, 20, 10, size=None), int) assert_type(def_gen.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64] | Any) assert_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.int64]) assert_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64] | Any) assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.int64]) assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.int64]) @@ -503,8 +503,8 @@ I_int64_100: npt.NDArray[np.int64] = np.array([100], dtype=np.int64) assert_type(def_gen.integers(0, 100), np.int64) assert_type(def_gen.integers(100), np.int64) -assert_type(def_gen.integers([100]), npt.NDArray[np.int64]) -assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64]) +assert_type(def_gen.integers([100]), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64] | Any) I_bool_low: npt.NDArray[np.bool] = np.array([0], dtype=np.bool) I_bool_low_like: list[int] = [0] @@ -515,107 +515,59 @@ assert_type(def_gen.integers(2, dtype=bool), bool) assert_type(def_gen.integers(0, 2, dtype=bool), bool) assert_type(def_gen.integers(1, dtype=bool, endpoint=True), bool) assert_type(def_gen.integers(0, 1, dtype=bool, endpoint=True), bool) -assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) assert_type(def_gen.integers(2, dtype=np.bool), np.bool) assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) -assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) I_u1_low: npt.NDArray[np.uint8] = np.array([0], dtype=np.uint8) I_u1_low_like: list[int] = [0] I_u1_high_open: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) I_u1_high_closed: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) -assert_type(def_gen.integers(256, dtype="u1"), np.uint8) -assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) -assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) -assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) - -assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) -assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) -assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) -assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) - assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) I_u2_low: npt.NDArray[np.uint16] = np.array([0], dtype=np.uint16) I_u2_low_like: list[int] = [0] I_u2_high_open: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) I_u2_high_closed: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) -assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) -assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) -assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) -assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) - -assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) -assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) -assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) -assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) - assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) I_u4_low: npt.NDArray[np.uint32] = np.array([0], dtype=np.uint32) I_u4_low_like: list[int] = [0] @@ -626,266 +578,122 @@ assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) - -assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) -assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) -assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) -assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) - -assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) -assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) -assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) -assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) I_u8_low: npt.NDArray[np.uint64] = np.array([0], dtype=np.uint64) I_u8_low_like: list[int] = [0] I_u8_high_open: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) I_u8_high_closed: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) -assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) -assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) - -assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) -assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) - assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) I_i1_low: npt.NDArray[np.int8] = np.array([-128], dtype=np.int8) I_i1_low_like: list[int] = [-128] I_i1_high_open: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) I_i1_high_closed: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) -assert_type(def_gen.integers(128, dtype="i1"), np.int8) -assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) -assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) -assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) - -assert_type(def_gen.integers(128, dtype="int8"), np.int8) -assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) -assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) -assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) - assert_type(def_gen.integers(128, dtype=np.int8), np.int8) assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) I_i2_low: npt.NDArray[np.int16] = np.array([-32768], dtype=np.int16) I_i2_low_like: list[int] = [-32768] I_i2_high_open: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) I_i2_high_closed: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) -assert_type(def_gen.integers(32768, dtype="i2"), np.int16) -assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) -assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) -assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) - -assert_type(def_gen.integers(32768, dtype="int16"), np.int16) -assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) -assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) -assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) - assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) I_i4_low: npt.NDArray[np.int32] = np.array([-2147483648], dtype=np.int32) I_i4_low_like: list[int] = [-2147483648] I_i4_high_open: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) I_i4_high_closed: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) -assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) -assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) - -assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) -assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) - assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) I_i8_low: npt.NDArray[np.int64] = np.array([-9223372036854775808], dtype=np.int64) I_i8_low_like: list[int] = [-9223372036854775808] I_i8_high_open: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) I_i8_high_closed: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) -assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) -assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) - -assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) -assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) - assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) assert_type(def_gen.bit_generator, np.random.BitGenerator) @@ -897,11 +705,12 @@ assert_type(def_gen.choice(5, 3, replace=True), npt.NDArray[np.int64]) assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5), npt.NDArray[np.int64]) assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False), npt.NDArray[np.int64]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]), Any) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) +str_list: list[str] +assert_type(def_gen.choice(str_list), Any) +assert_type(def_gen.choice(str_list, 3), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, p=[1 / 4] * 4), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, replace=True), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) assert_type(def_gen.dirichlet([0.5, 0.5]), npt.NDArray[np.float64]) assert_type(def_gen.dirichlet(np.array([0.5, 0.5])), npt.NDArray[np.float64]) @@ -929,13 +738,13 @@ assert_type(def_gen.permutation(10), npt.NDArray[np.int64]) assert_type(def_gen.permutation([1, 2, 3, 4]), npt.NDArray[Any]) assert_type(def_gen.permutation(np.array([1, 2, 3, 4])), npt.NDArray[Any]) assert_type(def_gen.permutation(D_2D, axis=1), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D), npt.NDArray[np.float64]) assert_type(def_gen.permuted(D_2D_like), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, axis=1), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, axis=1), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), npt.NDArray[np.float64]) assert_type(def_gen.shuffle(np.arange(10)), None) assert_type(def_gen.shuffle([1, 2, 3, 4, 5]), None) From 16d30474e5e6fce5b8c3b98aa5e55c4631b96368 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Dec 2025 19:21:25 +0100 Subject: [PATCH 0978/1018] MAINT: Bump astral-sh/setup-uv from 7.1.4 to 7.1.5 (#30395) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mypy.yml | 2 +- .github/workflows/stubtest.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 1deb5ab82815..ac87ccacd846 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -61,7 +61,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 + - uses: astral-sh/setup-uv@ed21f2f24f8dd64503750218de024bcf64c7250a # v7.1.5 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index dcac63be8a0a..c91954403b14 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 + - uses: astral-sh/setup-uv@ed21f2f24f8dd64503750218de024bcf64c7250a # v7.1.5 with: python-version: ${{ matrix.py }} activate-environment: true From e113b65964643a2393edfb987b003e9b174d7a79 Mon Sep 17 00:00:00 2001 From: Sayed Awad Date: Mon, 8 Dec 2025 20:39:42 +0200 Subject: [PATCH 0979/1018] SIMD, BLD: Fix Highway target attribute build failure on ppc64 (#30392) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On GCC < 13, compiler target attributes cause a build failure with the `power9/vsx3` feature enabled: ``` from ../numpy/_core/src/umath/loops_trigonometric.dispatch.cpp:5: hwy/ops/ppc_vsx-inl.h: In function ‘np::simd::{anonymous}::Vec np::simd::{anonymous}::LoadN(const TLane*, size_t) [with TLane = float]’: hwy/ops/ppc_vsx-inl.h:1200:19: error: inlining failed in call to ‘always_inline’ ‘hwy::N_PPC10::VFromD 1200 | HWY_API VFromD LoadN(D d, const T* HWY_RESTRICT p, ``` To fix this issue, a new Highway option `HWY_DISABLE_ATTR` has been introduced in Highway to explicitly disable compiler target attributes, as we rely on a source-based CPU dispatcher. Therefore, this patch updates to the latest Highway main. This patch also extends the native CI ppc64 build to include Clang, and sets testing against Python 3.12. --- .github/workflows/linux-ppc64le.yml | 26 +++++++++++++++++++++----- meson_cpu/ppc64/meson.build | 6 ++---- numpy/_core/src/highway | 2 +- pyproject.toml | 2 +- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index cff7fb1d5b89..7e75786b0bea 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -25,26 +25,42 @@ jobs: # For more details, see: https://github.com/numpy/numpy/issues/29125 if: github.repository == 'numpy/numpy' runs-on: ubuntu-24.04-ppc64le-p10 - name: "Native PPC64LE" + + strategy: + fail-fast: false + matrix: + config: + - name: "GCC" + args: "-Dallow-noblas=false" + - name: "clang" + args: "-Dallow-noblas=false" + + name: "${{ matrix.config.name }}" steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - persist-credentials: false - + - name: Install dependencies run: | sudo apt update - sudo apt install -y python3 python3-pip python3-dev ninja-build gfortran \ + sudo apt install -y python3.12 python3-pip python3-dev ninja-build gfortran \ build-essential libopenblas-dev liblapack-dev pkg-config pip install --upgrade pip pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt echo "/home/runner/.local/bin" >> $GITHUB_PATH + - name: Install clang + if: matrix.config.name == 'clang' + run: | + sudo apt install -y clang + export CC=clang + export CXX=clang++ + - name: Meson Build run: | - spin build -- -Dallow-noblas=false + spin build -- ${{ matrix.config.args }} - name: Meson Log if: always() diff --git a/meson_cpu/ppc64/meson.build b/meson_cpu/ppc64/meson.build index 8f2e8373c77c..58690d1fa80a 100644 --- a/meson_cpu/ppc64/meson.build +++ b/meson_cpu/ppc64/meson.build @@ -3,15 +3,13 @@ mod_features = import('features') compiler_id = meson.get_compiler('c').get_id() VSX = mod_features.new( - 'VSX', 1, args: '-mvsx', + 'VSX', 1, args: ['-mvsx', '-DHWY_COMPILE_ONLY_STATIC', '-DHWY_DISABLE_ATTR'] + + (compiler_id == 'clang' ? ['-maltivec'] : []), test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx.c')[0], extra_tests: { 'VSX_ASM': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx_asm.c')[0] } ) -if compiler_id == 'clang' - VSX.update(args: ['-mvsx', '-maltivec']) -endif VSX2 = mod_features.new( 'VSX2', 2, implies: VSX, args: {'val': '-mcpu=power8', 'match': '.*vsx'}, detect: {'val': 'VSX2', 'match': 'VSX'}, diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 37c08e5528f6..ee36c8371293 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 37c08e5528f63ead9c7e4fd99ba454c1b1a3e3f7 +Subproject commit ee36c837129310be19c17c9108c6dc3f6ae06942 diff --git a/pyproject.toml b/pyproject.toml index a459ea2cc2ee..084ba993072e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ license-files = [ 'LICENSE.txt', # BSD-3-Clause 'numpy/_core/include/numpy/libdivide/LICENSE.txt', # Zlib 'numpy/_core/src/common/pythoncapi-compat/COPYING', # 0BSD - 'numpy/_core/src/highway/LICENSE-BSD3', # BSD-3-Clause + 'numpy/_core/src/highway/LICENSE', # Dual-licensed: Apache 2.0 or BSD 3-Clause 'numpy/_core/src/multiarray/dragon4_LICENSE.txt', # MIT 'numpy/_core/src/npysort/x86-simd-sort/LICENSE.md', # BSD-3-Clause 'numpy/_core/src/umath/svml/LICENSE', # BSD-3-Clause From cee14ff2a69c02fabc73e0809ce95cb03dc92567 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 8 Dec 2025 21:42:42 +0100 Subject: [PATCH 0980/1018] DOC: prefer passing scalar types to ``dtype=`` in examples (#30394) * DOC: prefer scalar types for ``dtype=`` * DOC: revert doc change in `mtrand.pyx` Co-authored-by: Charles Harris --------- Co-authored-by: Charles Harris --- doc/neps/nep-0050-scalar-promotion.rst | 4 +- doc/neps/nep-0055-string_dtype.rst | 4 +- doc/source/reference/arrays.classes.rst | 6 +- doc/source/reference/arrays.datetime.rst | 4 +- doc/source/reference/arrays.dtypes.rst | 2 +- doc/source/reference/arrays.promotion.rst | 6 +- doc/source/reference/c-api/array.rst | 6 +- doc/source/reference/thread_safety.rst | 6 +- doc/source/user/absolute_beginners.rst | 2 +- doc/source/user/basics.creation.rst | 48 +++++----- doc/source/user/basics.io.genfromtxt.rst | 24 ++--- doc/source/user/basics.subclassing.rst | 4 +- doc/source/user/basics.ufuncs.rst | 6 +- doc/source/user/c-info.ufunc-tutorial.rst | 2 +- doc/source/user/quickstart.rst | 8 +- numpy/_core/_add_newdocs.py | 90 +++++++++---------- .../_core/code_generators/ufunc_docstrings.py | 28 +++--- numpy/_core/fromnumeric.py | 16 ++-- numpy/_core/function_base.py | 6 +- numpy/_core/memmap.py | 12 +-- numpy/_core/multiarray.py | 8 +- numpy/_core/numeric.py | 28 +++--- numpy/_core/numerictypes.py | 2 +- numpy/ctypeslib/_ctypeslib.py | 2 +- numpy/doc/ufuncs.py | 2 +- numpy/fft/_helper.py | 4 +- numpy/fft/_pocketfft.py | 4 +- numpy/lib/_array_utils_impl.py | 2 +- numpy/lib/_function_base_impl.py | 6 +- numpy/lib/_index_tricks_impl.py | 2 +- numpy/lib/_npyio_impl.py | 2 +- numpy/lib/_twodim_base_impl.py | 4 +- numpy/lib/_type_check_impl.py | 10 +-- numpy/linalg/_linalg.py | 4 +- numpy/ma/core.py | 6 +- numpy/ma/extras.py | 6 +- numpy/matlib.py | 6 +- numpy/matrixlib/defmatrix.py | 6 +- numpy/random/_generator.pyx | 2 +- 39 files changed, 194 insertions(+), 196 deletions(-) diff --git a/doc/neps/nep-0050-scalar-promotion.rst b/doc/neps/nep-0050-scalar-promotion.rst index aa04dd2c740e..974f6691d363 100644 --- a/doc/neps/nep-0050-scalar-promotion.rst +++ b/doc/neps/nep-0050-scalar-promotion.rst @@ -509,9 +509,9 @@ will be ignored. This means, that operations will never silently use the The user will have to write one of:: np.array([3]) + np.array(2**100) - np.array([3]) + np.array(2**100, dtype=object) + np.array([3]) + np.array(2**100, dtype=np.object_) -As such implicit conversion to ``object`` should be rare and the work-around +As such implicit conversion to ``object_`` should be rare and the work-around is clear, we expect that the backwards compatibility concerns are fairly small. diff --git a/doc/neps/nep-0055-string_dtype.rst b/doc/neps/nep-0055-string_dtype.rst index 7e29e1425e8c..555052fa16f7 100644 --- a/doc/neps/nep-0055-string_dtype.rst +++ b/doc/neps/nep-0055-string_dtype.rst @@ -224,7 +224,7 @@ to fixed-width unicode arrays:: In [3]: data = [str(i) * 10 for i in range(100_000)] - In [4]: %timeit arr_object = np.array(data, dtype=object) + In [4]: %timeit arr_object = np.array(data, dtype=np.object_) 3.15 ms ± 74.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) In [5]: %timeit arr_stringdtype = np.array(data, dtype=StringDType()) @@ -242,7 +242,7 @@ for strings, the string loading performance of ``StringDType`` should improve. String operations have similar performance:: - In [7]: %timeit np.array([s.capitalize() for s in data], dtype=object) + In [7]: %timeit np.array([s.capitalize() for s in data], dtype=np.object_) 31.6 ms ± 728 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) In [8]: %timeit np.char.capitalize(arr_stringdtype) diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 80821c2c08fa..755a13ff7252 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -479,16 +479,16 @@ Example: >>> import numpy as np - >>> a = np.memmap('newfile.dat', dtype=float, mode='w+', shape=1000) + >>> a = np.memmap('newfile.dat', dtype=np.float64, mode='w+', shape=1000) >>> a[10] = 10.0 >>> a[30] = 30.0 >>> del a - >>> b = np.fromfile('newfile.dat', dtype=float) + >>> b = np.fromfile('newfile.dat', dtype=np.float64) >>> print(b[10], b[30]) 10.0 30.0 - >>> a = np.memmap('newfile.dat', dtype=float) + >>> a = np.memmap('newfile.dat', dtype=np.float64) >>> print(a[10], a[30]) 10.0 30.0 diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index c0ec6a572c83..9cb7f59db78b 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -124,10 +124,10 @@ datetime type with generic units. >>> import numpy as np - >>> np.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64') + >>> np.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype=np.datetime64) array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64[D]') - >>> np.array(['2001-01-01T12:00', '2002-02-03T13:56:03.172'], dtype='datetime64') + >>> np.array(['2001-01-01T12:00', '2002-02-03T13:56:03.172'], dtype=np.datetime64) array(['2001-01-01T12:00:00.000', '2002-02-03T13:56:03.172'], dtype='datetime64[ms]') diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index fcb3e122e6de..262c22655c76 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -561,7 +561,7 @@ This equivalence can only be handled through ``==``, not through ``is``. >>> import numpy as np - >>> a = np.array([1, 2], dtype=float) + >>> a = np.array([1, 2], dtype=np.float64) >>> a.dtype == np.dtype(np.float64) True >>> a.dtype == np.float64 diff --git a/doc/source/reference/arrays.promotion.rst b/doc/source/reference/arrays.promotion.rst index d2dead0ce7b5..32e503383217 100644 --- a/doc/source/reference/arrays.promotion.rst +++ b/doc/source/reference/arrays.promotion.rst @@ -79,10 +79,10 @@ their precision when determining the result dtype. This is often convenient. For instance, when working with arrays of a low precision dtype, it is usually desirable for simple operations with Python scalars to preserve the dtype. - >>> arr_float32 = np.array([1, 2.5, 2.1], dtype="float32") + >>> arr_float32 = np.array([1, 2.5, 2.1], dtype=np.float32) >>> arr_float32 + 10.0 # undesirable to promote to float64 array([11. , 12.5, 12.1], dtype=float32) - >>> arr_int16 = np.array([3, 5, 7], dtype="int16") + >>> arr_int16 = np.array([3, 5, 7], dtype=np.int16) >>> arr_int16 + 10 # undesirable to promote to int64 array([13, 15, 17], dtype=int16) @@ -130,7 +130,7 @@ overflows: ... RuntimeWarning: overflow encountered in scalar add Note that NumPy warns when overflows occur for scalars, but not for arrays; -e.g., ``np.array(100, dtype="uint8") + 100`` will *not* warn. +e.g., ``np.array(100, dtype=np.uint8) + 100`` will *not* warn. Numerical promotion ------------------- diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index d07a00ebde73..885bdb17181e 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1786,9 +1786,9 @@ the functions that must be implemented for each slot. - ``0.0`` is the default for ``sum([])``. But ``-0.0`` is the correct identity otherwise as it preserves the sign for ``sum([-0.0])``. - We use no identity for object, but return the default of ``0`` and - ``1`` for the empty ``sum([], dtype=object)`` and - ``prod([], dtype=object)``. - This allows ``np.sum(np.array(["a", "b"], dtype=object))`` to work. + ``1`` for the empty ``sum([], dtype=np.object_)`` and + ``prod([], dtype=np.object_)``. + This allows ``np.sum(np.array(["a", "b"], dtype=np.object_))`` to work. - ``-inf`` or ``INT_MIN`` for ``max`` is an identity, but at least ``INT_MIN`` not a good *default* when there are no items. diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst index 84590bfac39c..6b6b9b0ea054 100644 --- a/doc/source/reference/thread_safety.rst +++ b/doc/source/reference/thread_safety.rst @@ -29,8 +29,8 @@ locking if you need to mutation and multithreading. Note that operations that *do not* release the GIL will see no performance gains from use of the `threading` module, and instead might be better served with -`multiprocessing`. In particular, operations on arrays with ``dtype=object`` do -not release the GIL. +`multiprocessing`. In particular, operations on arrays with ``dtype=np.object_`` +do not release the GIL. Free-threaded Python -------------------- @@ -47,5 +47,5 @@ Because free-threaded Python does not have a global interpreter lock to serialize access to Python objects, there are more opportunities for threads to mutate shared state and create thread safety issues. In addition to the limitations about locking of the ndarray object noted above, this also means -that arrays with ``dtype=object`` are not protected by the GIL, creating data +that arrays with ``dtype=np.object_`` are not protected by the GIL, creating data races for python objects that are not possible outside free-threaded python. diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 6909db8cb7e2..f1007db45acc 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -779,7 +779,7 @@ You can add the arrays together with the plus sign. :: >>> data = np.array([1, 2]) - >>> ones = np.ones(2, dtype=int) + >>> ones = np.ones(2, dtype=np.int_) >>> data + ones array([2, 3]) diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst index 1a7707ee69c9..19fa737d5f8d 100644 --- a/doc/source/user/basics.creation.rst +++ b/doc/source/user/basics.creation.rst @@ -20,7 +20,7 @@ There are 6 general mechanisms for creating arrays: 6) Use of special library functions (e.g., random) You can use these methods to create ndarrays or :ref:`structured_arrays`. -This document will cover general methods for ndarray creation. +This document will cover general methods for ndarray creation. 1) Converting Python sequences to NumPy arrays ============================================== @@ -29,8 +29,8 @@ NumPy arrays can be defined using Python sequences such as lists and tuples. Lists and tuples are defined using ``[...]`` and ``(...)``, respectively. Lists and tuples can define ndarray creation: -* a list of numbers will create a 1D array, -* a list of lists will create a 2D array, +* a list of numbers will create a 1D array, +* a list of lists will create a 2D array, * further nested lists will create higher-dimensional arrays. In general, any array object is called an **ndarray** in NumPy. :: @@ -72,7 +72,7 @@ results, for example:: Notice when you perform operations with two arrays of the same ``dtype``: ``uint32``, the resulting array is the same type. When you -perform operations with different ``dtype``, NumPy will +perform operations with different ``dtype``, NumPy will assign a new type that satisfies all of the array elements involved in the computation, here ``uint32`` and ``int32`` can both be represented in as ``int64``. @@ -88,7 +88,7 @@ you create the array. .. 40 functions seems like a small number, but the routines.array-creation - has ~47. I'm sure there are more. + has ~47. I'm sure there are more. NumPy has over 40 built-in functions for creating arrays as laid out in the :ref:`Array creation routines `. @@ -104,7 +104,7 @@ dimension of the array they create: The 1D array creation functions e.g. :func:`numpy.linspace` and :func:`numpy.arange` generally need at least two inputs, ``start`` and -``stop``. +``stop``. :func:`numpy.arange` creates arrays with regularly incrementing values. Check the documentation for complete information and examples. A few @@ -113,7 +113,7 @@ examples are shown:: >>> import numpy as np >>> np.arange(10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.arange(2, 10, dtype=float) + >>> np.arange(2, 10, dtype=np.float64) array([2., 3., 4., 5., 6., 7., 8., 9.]) >>> np.arange(2, 3, 0.1) array([2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) @@ -121,8 +121,8 @@ examples are shown:: Note: best practice for :func:`numpy.arange` is to use integer start, end, and step values. There are some subtleties regarding ``dtype``. In the second example, the ``dtype`` is defined. In the third example, the array is -``dtype=float`` to accommodate the step size of ``0.1``. Due to roundoff error, -the ``stop`` value is sometimes included. +``dtype=np.float64`` to accommodate the step size of ``0.1``. Due to roundoff error, +the ``stop`` value is sometimes included. :func:`numpy.linspace` will create arrays with a specified number of elements, and spaced equally between the specified beginning and end values. For @@ -140,7 +140,7 @@ number of elements and the starting and end point. The previous ------------------------------- The 2D array creation functions e.g. :func:`numpy.eye`, :func:`numpy.diag`, and :func:`numpy.vander` -define properties of special matrices represented as 2D arrays. +define properties of special matrices represented as 2D arrays. ``np.eye(n, m)`` defines a 2D identity matrix. The elements where i=j (row index and column index are equal) are 1 and the rest are 0, as such:: @@ -159,7 +159,7 @@ and the rest are 0, as such:: the diagonal *or* if given a 2D array returns a 1D array that is only the diagonal elements. The two array creation functions can be helpful while doing linear algebra, as such:: - + >>> import numpy as np >>> np.diag([1, 2, 3]) array([[1, 0, 0], @@ -197,7 +197,7 @@ routine is helpful in generating linear least squares models, as such:: [ 8, 4, 2, 1], [27, 9, 3, 1], [64, 16, 4, 1]]) - + 3 - general ndarray creation functions -------------------------------------- @@ -205,20 +205,20 @@ The ndarray creation functions e.g. :func:`numpy.ones`, :func:`numpy.zeros`, and :meth:`~numpy.random.Generator.random` define arrays based upon the desired shape. The ndarray creation functions can create arrays with any dimension by specifying how many dimensions -and length along that dimension in a tuple or list. +and length along that dimension in a tuple or list. :func:`numpy.zeros` will create an array filled with 0 values with the specified shape. The default dtype is ``float64``:: >>> import numpy as np >>> np.zeros((2, 3)) - array([[0., 0., 0.], + array([[0., 0., 0.], [0., 0., 0.]]) >>> np.zeros((2, 3, 2)) array([[[0., 0.], [0., 0.], [0., 0.]], - + [[0., 0.], [0., 0.], [0., 0.]]]) @@ -228,7 +228,7 @@ specified shape. The default dtype is ``float64``:: >>> import numpy as np >>> np.ones((2, 3)) - array([[1., 1., 1.], + array([[1., 1., 1.], [1., 1., 1.]]) >>> np.ones((2, 3, 2)) array([[[1., 1.], @@ -265,11 +265,11 @@ dimension:: >>> import numpy as np >>> np.indices((3,3)) - array([[[0, 0, 0], - [1, 1, 1], - [2, 2, 2]], - [[0, 1, 2], - [0, 1, 2], + array([[[0, 0, 0], + [1, 1, 1], + [2, 2, 2]], + [[0, 1, 2], + [0, 1, 2], [0, 1, 2]]]) This is particularly useful for evaluating functions of multiple dimensions on @@ -322,7 +322,7 @@ arrays into a 4-by-4 array using ``block``:: [ 0., 0., 0., -4.]]) Other routines use similar syntax to join ndarrays. Check the -routine's documentation for further examples and syntax. +routine's documentation for further examples and syntax. 4) Reading arrays from disk, either from standard or custom formats =================================================================== @@ -330,7 +330,7 @@ routine's documentation for further examples and syntax. This is the most common case of large array creation. The details depend greatly on the format of data on disk. This section gives general pointers on how to handle various formats. For more detailed examples of IO look at -:ref:`How to Read and Write files `. +:ref:`How to Read and Write files `. Standard binary formats ----------------------- @@ -397,4 +397,4 @@ knowledge to interface with C or C++. NumPy is the fundamental library for array containers in the Python Scientific Computing stack. Many Python libraries, including SciPy, Pandas, and OpenCV, use NumPy ndarrays as the common format for data exchange, These libraries can create, -operate on, and work with NumPy arrays. +operate on, and work with NumPy arrays. diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst index d5b6bba8f28d..2a1523ba209b 100644 --- a/doc/source/user/basics.io.genfromtxt.rst +++ b/doc/source/user/basics.io.genfromtxt.rst @@ -201,16 +201,16 @@ The main way to control how the sequences of strings we have read from the file are converted to other types is to set the ``dtype`` argument. Acceptable values for this argument are: -* a single type, such as ``dtype=float``. +* a single type, such as ``dtype=np.float64``. The output will be 2D with the given dtype, unless a name has been associated with each column with the use of the ``names`` argument - (see below). Note that ``dtype=float`` is the default for + (see below). Note that ``dtype=np.float64`` is the default for :func:`~numpy.genfromtxt`. -* a sequence of types, such as ``dtype=(int, float, float)``. +* a sequence of types, such as ``dtype=(np.int_, np.float64, np.float64)``. * a comma-separated string, such as ``dtype="i4,f8,|U3"``. * a dictionary with two keys ``'names'`` and ``'formats'``. * a sequence of tuples ``(name, type)``, such as - ``dtype=[('A', int), ('B', float)]``. + ``dtype=[('A', np.int_), ('B', np.float64)]``. * an existing :class:`numpy.dtype` object. * the special value ``None``. In that case, the type of the columns will be determined from the data @@ -243,7 +243,7 @@ each column. A first possibility is to use an explicit structured dtype, as mentioned previously:: >>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=[(_, int) for _ in "abc"]) + >>> np.genfromtxt(data, dtype=[(_, np.int_) for _ in "abc"]) array([(1, 2, 3), (4, 5, 6)], dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> ndtype=[('a',int), ('b', float), ('c', int)] + >>> ndtype=[('a', np.int_), ('b', np.float64), ('c', np.int_)] >>> names = ["A", "B", "C"] >>> np.genfromtxt(data, names=names, dtype=ndtype) array([(1, 2., 3), (4, 5., 6)], @@ -289,7 +289,7 @@ with the standard NumPy default of ``"f%i"``, yielding names like ``f0``, ``f1`` and so forth:: >>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int)) + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_)) array([(1, 2., 3), (4, 5., 6)], dtype=[('f0', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int), names="a") + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_), names="a") array([(1, 2., 3), (4, 5., 6)], dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int), defaultfmt="var_%02i") + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_), defaultfmt="var_%02i") array([(1, 2., 3), (4, 5., 6)], dtype=[('var_00', '>> data = "N/A, 2, 3\n4, ,???" >>> kwargs = dict(delimiter=",", - ... dtype=int, + ... dtype=np.int_, ... names="a,b,c", ... missing_values={0:"N/A", 'b':" ", 2:"???"}, ... filling_values={0:0, 'b':0, 2:-999}) diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index 2a369aaae17c..202561a958a8 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -346,7 +346,7 @@ Simple example - adding an extra attribute to ndarray class InfoArray(np.ndarray): - def __new__(subtype, shape, dtype=float, buffer=None, offset=0, + def __new__(subtype, shape, dtype=np.float64, buffer=None, offset=0, strides=None, order=None, info=None): # Create the ndarray instance of our type, given the usual # ndarray input arguments. This will call the standard @@ -779,5 +779,3 @@ your function's signature should accept ``**kwargs``. For example: This object is now compatible with ``np.sum`` again because any extraneous arguments (i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the ``**unused_kwargs`` parameter. - - diff --git a/doc/source/user/basics.ufuncs.rst b/doc/source/user/basics.ufuncs.rst index 773fe86c21d2..8607c2abda9b 100644 --- a/doc/source/user/basics.ufuncs.rst +++ b/doc/source/user/basics.ufuncs.rst @@ -76,7 +76,7 @@ an integer (or Boolean) data-type and smaller than the size of the >>> x.dtype dtype('int64') - >>> np.multiply.reduce(x, dtype=float) + >>> np.multiply.reduce(x, dtype=np.float64) array([ 0., 28., 80.]) Finally, the *out* keyword allows you to @@ -84,10 +84,10 @@ provide an output array (or a tuple of output arrays for multi-output ufuncs). If *out* is given, the *dtype* argument is only used for the internal computations. Considering ``x`` from the previous example:: - >>> y = np.zeros(3, dtype=int) + >>> y = np.zeros(3, dtype=np.int_) >>> y array([0, 0, 0]) - >>> np.multiply.reduce(x, dtype=float, out=y) + >>> np.multiply.reduce(x, dtype=np.float64, out=y) array([ 0, 28, 80]) Ufuncs also have a fifth method, :func:`numpy.ufunc.at`, that allows in place diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 09daa95b7875..e5773f8232b8 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -608,7 +608,7 @@ After the above has been installed, it can be imported and used as follows. >>> import npufunc >>> npufunc.logit(0.5) np.float64(0.0) ->>> a = np.linspace(0, 1, 5, dtype="f4") +>>> a = np.linspace(0, 1, 5, dtype=np.float32) >>> npufunc.logit(a) :1: RuntimeWarning: divide by zero encountered in logit array([ -inf, -1.0986123, 0. , 1.0986123, inf], diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index ef4a0467c706..1208bd1a6347 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -163,7 +163,7 @@ The type of the array can also be explicitly specified at creation time: :: - >>> c = np.array([[1, 2], [3, 4]], dtype=complex) + >>> c = np.array([[1, 2], [3, 4]], dtype=np.complex128) >>> c array([[1.+0.j, 2.+0.j], [3.+0.j, 4.+0.j]]) @@ -346,7 +346,7 @@ existing array rather than create a new one. :: >>> rg = np.random.default_rng(1) # create instance of default random number generator - >>> a = np.ones((2, 3), dtype=int) + >>> a = np.ones((2, 3), dtype=np.int_) >>> b = rg.random((2, 3)) >>> a *= 3 >>> a @@ -535,7 +535,7 @@ are given in a tuple separated by commas:: >>> def f(x, y): ... return 10 * x + y ... - >>> b = np.fromfunction(f, (5, 4), dtype=int) + >>> b = np.fromfunction(f, (5, 4), dtype=np.int_) >>> b array([[ 0, 1, 2, 3], [10, 11, 12, 13], @@ -1256,7 +1256,7 @@ set `__: ... A, B = np.meshgrid(x, y) ... C = A + B*1j ... z = np.zeros_like(C) - ... divtime = maxit + np.zeros(z.shape, dtype=int) + ... divtime = maxit + np.zeros(z.shape, dtype=np.int_) ... ... for i in range(maxit): ... z = z**2 + C diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index eded752b2721..b37014b6a648 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -418,11 +418,11 @@ original data when the :meth:`~object.__exit__` function is called but not before: - >>> a = np.arange(6, dtype='i4')[::-2] + >>> a = np.arange(6, dtype=np.int32)[::-2] >>> with np.nditer(a, [], ... [['writeonly', 'updateifcopy']], ... casting='unsafe', - ... op_dtypes=[np.dtype('f4')]) as i: + ... op_dtypes=[np.dtype(np.float32)]) as i: ... x = i.operands[0] ... x[:] = [-1, -2, -3] ... # a still unchanged here @@ -939,7 +939,7 @@ ``NPY_MAXDIMS``). Setting ``ndmax`` stops recursion at the specified depth, preserving deeper nested structures as objects instead of promoting them to - higher-dimensional arrays. In this case, ``dtype=object`` is required. + higher-dimensional arrays. In this case, ``dtype=np.object_`` is required. .. versionadded:: 2.4.0 ${ARRAY_FUNCTION_LIKE} @@ -994,7 +994,7 @@ Type provided: - >>> np.array([1, 2, 3], dtype=complex) + >>> np.array([1, 2, 3], dtype=np.complex128) array([ 1.+0.j, 2.+0.j, 3.+0.j]) Data-type consisting of more than one element: @@ -1015,14 +1015,14 @@ Limiting the maximum dimensions with ``ndmax``: - >>> a = np.array([[1, 2], [3, 4]], dtype=object, ndmax=2) + >>> a = np.array([[1, 2], [3, 4]], dtype=np.object_, ndmax=2) >>> a array([[1, 2], [3, 4]], dtype=object) >>> a.shape (2, 2) - >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) + >>> b = np.array([[1, 2], [3, 4]], dtype=np.object_, ndmax=1) >>> b array([list([1, 2]), list([3, 4])], dtype=object) >>> b.shape @@ -1389,7 +1389,7 @@ array([[ -9.74499359e+001, 6.69583040e-309], [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized - >>> np.empty([2, 2], dtype=int) + >>> np.empty([2, 2], dtype=np.int_) array([[-1073741821, -1067949133], [ 496041986, 19249760]]) #uninitialized @@ -1455,7 +1455,7 @@ >>> np.zeros(5) array([ 0., 0., 0., 0., 0.]) - >>> np.zeros((5,), dtype=int) + >>> np.zeros((5,), dtype=np.int_) array([0, 0, 0, 0, 0]) >>> np.zeros((2, 1)) @@ -1487,7 +1487,7 @@ fromstring(string, dtype=None, count=-1, *, sep, like=None) -- - fromstring(string, dtype=float, count=-1, *, sep, like=None) + fromstring(string, dtype=np.float64, count=-1, *, sep, like=None) A new 1-D array initialized from text data in a string. @@ -1538,9 +1538,9 @@ Examples -------- >>> import numpy as np - >>> np.fromstring('1 2', dtype=int, sep=' ') + >>> np.fromstring('1 2', dtype=np.int_, sep=' ') array([1, 2]) - >>> np.fromstring('1, 2', dtype=int, sep=',') + >>> np.fromstring('1, 2', dtype=np.int_, sep=',') array([1, 2]) """) @@ -1649,7 +1649,7 @@ fromfile(file, dtype=None, count=-1, sep='', offset=0, *, like=None) -- - fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) + fromfile(file, dtype=np.float64, count=-1, sep='', offset=0, *, like=None) Construct an array from data in a text or binary file. @@ -1737,7 +1737,7 @@ frombuffer(buffer, dtype=None, count=-1, offset=0, *, like=None) -- - frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None) + frombuffer(buffer, dtype=np.float64, count=-1, offset=0, *, like=None) Interpret a buffer as a 1-dimensional array. @@ -1770,7 +1770,7 @@ If the buffer has data that is not in machine byte-order, this should be specified as part of the data-type, e.g.:: - >>> dt = np.dtype(int) + >>> dt = np.dtype(np.int_) >>> dt = dt.newbyteorder('>') >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP @@ -1925,9 +1925,9 @@ `start` is much larger than `step`. This can lead to unexpected behaviour. For example:: - >>> np.arange(0, 5, 0.5, dtype=int) + >>> np.arange(0, 5, 0.5, dtype=np.int_) array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) - >>> np.arange(-3, 3, 0.5, dtype=int) + >>> np.arange(-3, 3, 0.5, dtype=np.int_) array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) In such cases, the use of `numpy.linspace` should be preferred. @@ -2030,16 +2030,16 @@ Examples -------- >>> import numpy as np - >>> np.promote_types('f4', 'f8') + >>> np.promote_types(np.float32, np.float64) dtype('float64') - >>> np.promote_types('i8', 'f4') + >>> np.promote_types(np.int64, np.float32) dtype('float64') >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') + >>> np.promote_types(np.int32, 'S8') dtype('S11') An example of a non-associative case: @@ -2376,7 +2376,7 @@ ndarray(shape, dtype=None, buffer=None, offset=0, strides=None, order=None) -- - ndarray(shape, dtype=float, buffer=None, offset=0, strides=None, order=None) + ndarray(shape, dtype=np.float64, buffer=None, offset=0, strides=None, order=None) An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type object describes the @@ -2485,7 +2485,7 @@ First mode, `buffer` is None: >>> import numpy as np - >>> np.ndarray(shape=(2,2), dtype=float, order='F') + >>> np.ndarray(shape=(2,2), dtype=np.float64, order='F') array([[0.0e+000, 0.0e+000], # random [ nan, 2.5e-323]]) @@ -2493,7 +2493,7 @@ >>> np.ndarray((2,), buffer=np.array([1,2,3]), ... offset=np.int_().itemsize, - ... dtype=int) # offset = 1*itemsize, i.e. skip first element + ... dtype=np.int_) # offset = 1*itemsize, i.e. skip first element array([2, 3]) """) @@ -3573,15 +3573,15 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: >>> x array([1. , 2. , 2.5]) - >>> x.astype(int) + >>> x.astype(np.int_) array([1, 2, 2]) - >>> x.astype(int, casting="same_value") + >>> x.astype(np.int_, casting="same_value") Traceback (most recent call last): ... ValueError: could not cast 'same_value' double to long - >>> x[:2].astype(int, casting="same_value") + >>> x[:2].astype(np.int_, casting="same_value") array([1, 2]) """) @@ -3751,12 +3751,12 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: >>> y.flags['C_CONTIGUOUS'] True - For arrays containing Python objects (e.g. dtype=object), + For arrays containing Python objects (e.g. dtype=np.object_), the copy is a shallow one. The new array will contain the same object which may lead to surprises if that object can be modified (is mutable): - >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=np.object_) >>> b = a.copy() >>> b[2][0] = 10 >>> a @@ -3766,7 +3766,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: use `copy.deepcopy`: >>> import copy - >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=np.object_) >>> c = copy.deepcopy(a) >>> c[2][0] = 10 >>> c @@ -3868,7 +3868,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: to a single array element. The following is a rare example where this distinction is important: - >>> a = np.array([None, None], dtype=object) + >>> a = np.array([None, None], dtype=np.object_) >>> a[0] = np.array(3) >>> a array([array(3), None], dtype=object) @@ -4020,7 +4020,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: For an array with object dtype, elements are returned as-is. - >>> a = np.array([np.int64(1)], dtype=object) + >>> a = np.array([np.int64(1)], dtype=np.object_) >>> a.item() #return np.int64 np.int64(1) """) @@ -4780,7 +4780,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: .. note:: Passing None for ``dtype`` is different from omitting the parameter, since the former invokes ``dtype(None)`` which is an alias for - ``dtype('float64')``. + ``dtype(np.float64)``. Parameters ---------- @@ -5867,8 +5867,8 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: This API requires passing dtypes, define them for convenience: >>> import numpy as np - >>> int32 = np.dtype("int32") - >>> float32 = np.dtype("float32") + >>> int32 = np.dtype(np.int32) + >>> float32 = np.dtype(np.float32) The typical ufunc call does not pass an output dtype. `numpy.add` has two inputs and one output, so leave the output as ``None`` (not provided): @@ -6095,11 +6095,11 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: -------- >>> import numpy as np - >>> x = np.dtype('i4') + >>> x = np.dtype(np.int32) >>> x.alignment 4 - >>> x = np.dtype(float) + >>> x = np.dtype(np.float64) >>> x.alignment 8 @@ -6124,11 +6124,11 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: -------- >>> import numpy as np - >>> dt = np.dtype('i2') + >>> dt = np.dtype(np.int16) >>> dt.byteorder '=' >>> # endian is not relevant for 8 bit numbers - >>> np.dtype('i1').byteorder + >>> np.dtype(np.int8).byteorder '|' >>> # or ASCII strings >>> np.dtype('S2').byteorder @@ -6274,13 +6274,13 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: -------- >>> import numpy as np - >>> dt = np.dtype('i2') + >>> dt = np.dtype(np.int16) >>> dt.isbuiltin 1 - >>> dt = np.dtype('f8') + >>> dt = np.dtype(np.float64) >>> dt.isbuiltin 1 - >>> dt = np.dtype([('field1', 'f8')]) + >>> dt = np.dtype([('field1', np.float64)]) >>> dt.isbuiltin 0 @@ -6348,13 +6348,13 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: -------- >>> import numpy as np - >>> dt = np.dtype('i4') + >>> dt = np.dtype(np.int32) >>> dt.kind 'i' - >>> dt = np.dtype('f8') + >>> dt = np.dtype(np.float64) >>> dt.kind 'f' - >>> dt = np.dtype([('field1', 'f8')]) + >>> dt = np.dtype([('field1', np.float64)]) >>> dt.kind 'V' @@ -6520,7 +6520,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: >>> x.subdtype (dtype('float32'), (8,)) - >>> x = np.dtype('i2') + >>> x = np.dtype(np.int16) >>> x.subdtype >>> @@ -6542,7 +6542,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: >>> x.base dtype('float32') - >>> x = np.dtype('i2') + >>> x = np.dtype(np.int16) >>> x.base dtype('int16') diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index fe07047a1758..1630a9d6f136 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -917,7 +917,7 @@ def add_newdoc(place, name, doc): array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) >>> >>> # Example of providing the optional output parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True @@ -1145,7 +1145,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1506,7 +1506,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 @@ -1545,7 +1545,7 @@ def add_newdoc(place, name, doc): ------- out : bool or ndarray of bool Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1957,7 +1957,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1994,7 +1994,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -3255,7 +3255,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -3373,9 +3373,9 @@ def add_newdoc(place, name, doc): >>> p array([nan, nan]) - To get complex results, give the argument ``dtype=complex``. + To get complex results, give the argument ``dtype=np.complex128``. - >>> np.power(x3, 1.5, dtype=complex) + >>> np.power(x3, 1.5, dtype=np.complex128) array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) """) @@ -3452,9 +3452,9 @@ def add_newdoc(place, name, doc): >>> p array([nan, nan]) - To get complex results, give the argument ``dtype=complex``. + To get complex results, give the argument ``dtype=np.complex128``. - >>> np.float_power(x3, 1.5, dtype=complex) + >>> np.float_power(x3, 1.5, dtype=np.complex128) array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) """) @@ -4055,7 +4055,7 @@ def add_newdoc(place, name, doc): >>> # Discrepancy due to vagaries of floating point arithmetic. >>> # Example of providing the optional output parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.sinh([0.1], out1) >>> out2 is out1 True @@ -4256,7 +4256,7 @@ def add_newdoc(place, name, doc): >>> >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True @@ -4309,7 +4309,7 @@ def add_newdoc(place, name, doc): >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.tanh([0.1], out1) >>> out2 is out1 True diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 33fb9ec4b39f..dd94b4d0bed9 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -2026,7 +2026,7 @@ def nonzero(a): Notes ----- While the nonzero values can be obtained with ``a[nonzero(a)]``, it is - recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which + recommended to use ``x[x.astype(np.bool)]`` or ``x[x != 0]`` instead, which will correctly handle 0-d arrays. Examples @@ -2385,7 +2385,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, more precise approach to summation. Especially when summing a large number of lower precision floating point numbers, such as ``float32``, numerical errors can become significant. - In such cases it can be advisable to use `dtype="float64"` to use a higher + In such cases it can be advisable to use `dtype=np.float64` to use a higher precision for the output. Examples @@ -2721,7 +2721,7 @@ def cumulative_prod(x, /, *, axis=None, dtype=None, out=None, ... # total product 1*2*3 = 6 array([1, 2, 6]) >>> a = np.array([1, 2, 3, 4, 5, 6]) - >>> np.cumulative_prod(a, dtype=float) # specify type of output + >>> np.cumulative_prod(a, dtype=np.float64) # specify type of output array([ 1., 2., 6., 24., 120., 720.]) The cumulative product for each column (i.e., over the rows) of ``b``: @@ -2808,7 +2808,7 @@ def cumulative_sum(x, /, *, axis=None, dtype=None, out=None, array([1, 2, 3, 4, 5, 6]) >>> np.cumulative_sum(a) array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumulative_sum(a, dtype=float) # specifies type of output value(s) + >>> np.cumulative_sum(a, dtype=np.float64) # specifies type of output value(s) array([ 1., 3., 6., 10., 15., 21.]) >>> b = np.array([[1, 2, 3], [4, 5, 6]]) @@ -2892,7 +2892,7 @@ def cumsum(a, axis=None, dtype=None, out=None): [4, 5, 6]]) >>> np.cumsum(a) array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumsum(a, dtype=float) # specifies type of output value(s) + >>> np.cumsum(a, dtype=np.float64) # specifies type of output value(s) array([ 1., 3., 6., 10., 15., 21.]) >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns @@ -3096,7 +3096,7 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, array([1, 3]) >>> np.max(a, where=[False, True], initial=-1, axis=0) array([-1, 3]) - >>> b = np.arange(5, dtype=float) + >>> b = np.arange(5, dtype=np.float64) >>> b[2] = np.nan >>> np.max(b) np.float64(nan) @@ -3235,7 +3235,7 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, >>> np.min(a, where=[False, True], initial=10, axis=0) array([10, 1]) - >>> b = np.arange(5, dtype=float) + >>> b = np.arange(5, dtype=np.float64) >>> b[2] = np.nan >>> np.min(b) np.float64(nan) @@ -3456,7 +3456,7 @@ def cumprod(a, axis=None, dtype=None, out=None): ... # total product 1*2*3 = 6 array([1, 2, 6]) >>> a = np.array([[1, 2, 3], [4, 5, 6]]) - >>> np.cumprod(a, dtype=float) # specify type of output + >>> np.cumprod(a, dtype=np.float64) # specify type of output array([ 1., 2., 6., 24., 120., 720.]) The cumulative product for each column (i.e., over the rows) of `a`: diff --git a/numpy/_core/function_base.py b/numpy/_core/function_base.py index 7fcc6b4f770a..b01ba108d2c4 100644 --- a/numpy/_core/function_base.py +++ b/numpy/_core/function_base.py @@ -38,7 +38,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, .. versionchanged:: 1.20.0 Values are rounded towards ``-inf`` instead of ``0`` when an integer ``dtype`` is specified. The old behavior can - still be obtained with ``np.linspace(start, stop, num).astype(int)`` + still be obtained with ``np.linspace(start, stop, num).astype(np.int_)`` Parameters ---------- @@ -375,9 +375,9 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): Note that the above may not produce exact integers: - >>> np.geomspace(1, 256, num=9, dtype=int) + >>> np.geomspace(1, 256, num=9, dtype=np.int_) array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) - >>> np.around(np.geomspace(1, 256, num=9)).astype(int) + >>> np.around(np.geomspace(1, 256, num=9)).astype(np.int_) array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) Negative, decreasing, and complex inputs are allowed: diff --git a/numpy/_core/memmap.py b/numpy/_core/memmap.py index 8cfa7f94a8da..e0b638c6f976 100644 --- a/numpy/_core/memmap.py +++ b/numpy/_core/memmap.py @@ -130,7 +130,7 @@ class memmap(ndarray): Examples -------- >>> import numpy as np - >>> data = np.arange(12, dtype='float32') + >>> data = np.arange(12, dtype=np.float32) >>> data.resize((3,4)) This example uses a temporary file so that doctest doesn't write @@ -142,7 +142,7 @@ class memmap(ndarray): Create a memmap with dtype and shape that matches our data: - >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) + >>> fp = np.memmap(filename, dtype=np.float32, mode='w+', shape=(3,4)) >>> fp memmap([[0., 0., 0., 0.], [0., 0., 0., 0.], @@ -165,7 +165,7 @@ class memmap(ndarray): Load the memmap and verify data was stored: - >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> newfp = np.memmap(filename, dtype=np.float32, mode='r', shape=(3,4)) >>> newfp memmap([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], @@ -173,13 +173,13 @@ class memmap(ndarray): Read-only memmap: - >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> fpr = np.memmap(filename, dtype=np.float32, mode='r', shape=(3,4)) >>> fpr.flags.writeable False Copy-on-write memmap: - >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) + >>> fpc = np.memmap(filename, dtype=np.float32, mode='c', shape=(3,4)) >>> fpc.flags.writeable True @@ -205,7 +205,7 @@ class memmap(ndarray): Offset into a memmap: - >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) + >>> fpo = np.memmap(filename, dtype=np.float32, mode='r', offset=16) >>> fpo memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index dd5a66a8785f..54d240c89e3e 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -700,7 +700,7 @@ def min_scalar_type(a, /): >>> np.min_scalar_type(1e50) dtype('float64') - >>> np.min_scalar_type(np.arange(4,dtype='f8')) + >>> np.min_scalar_type(np.arange(4, dtype=np.float64)) dtype('float64') """ @@ -732,10 +732,10 @@ def result_type(*arrays_and_dtypes): Examples -------- >>> import numpy as np - >>> np.result_type(3, np.arange(7, dtype='i1')) + >>> np.result_type(3, np.arange(7, dtype=np.int8)) dtype('int8') - >>> np.result_type('i4', 'c8') + >>> np.result_type(np.int32, np.complex64) dtype('complex128') >>> np.result_type(3.0, -2) @@ -961,7 +961,7 @@ def bincount(x, /, weights=None, minlength=0): The input array needs to be of integer dtype, otherwise a TypeError is raised: - >>> np.bincount(np.arange(5, dtype=float)) + >>> np.bincount(np.arange(5, dtype=np.float64)) Traceback (most recent call last): ... TypeError: Cannot cast array data from dtype('float64') to dtype('int64') diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 11527c9de442..d4e1685501d7 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -151,7 +151,7 @@ def zeros_like( array([[0, 0, 0], [0, 0, 0]]) - >>> y = np.arange(3, dtype=float) + >>> y = np.arange(3, dtype=np.float64) >>> y array([0., 1., 2.]) >>> np.zeros_like(y) @@ -211,7 +211,7 @@ def ones(shape, dtype=None, order='C', *, device=None, like=None): >>> np.ones(5) array([1., 1., 1., 1., 1.]) - >>> np.ones((5,), dtype=int) + >>> np.ones((5,), dtype=np.int_) array([1, 1, 1, 1, 1]) >>> np.ones((2, 1)) @@ -300,7 +300,7 @@ def ones_like( array([[1, 1, 1], [1, 1, 1]]) - >>> y = np.arange(3, dtype=float) + >>> y = np.arange(3, dtype=np.float64) >>> y array([0., 1., 2.]) >>> np.ones_like(y) @@ -448,21 +448,21 @@ def full_like( Examples -------- >>> import numpy as np - >>> x = np.arange(6, dtype=int) + >>> x = np.arange(6, dtype=np.int_) >>> np.full_like(x, 1) array([1, 1, 1, 1, 1, 1]) >>> np.full_like(x, 0.1) array([0, 0, 0, 0, 0, 0]) - >>> np.full_like(x, 0.1, dtype=np.double) + >>> np.full_like(x, 0.1, dtype=np.float64) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - >>> np.full_like(x, np.nan, dtype=np.double) + >>> np.full_like(x, np.nan, dtype=np.float64) array([nan, nan, nan, nan, nan, nan]) - >>> y = np.arange(6, dtype=np.double) + >>> y = np.arange(6, dtype=np.float64) >>> np.full_like(y, 0.1) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - >>> y = np.zeros([2, 2, 3], dtype=int) + >>> y = np.zeros([2, 2, 3], dtype=np.int_) >>> np.full_like(y, [0, 0, 255]) array([[[ 0, 0, 255], [ 0, 0, 255]], @@ -982,7 +982,7 @@ def outer(a, b, out=None): An example using a "vector" of letters: - >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> x = np.array(['a', 'b', 'c'], dtype=np.object_) >>> np.outer(x, [1, 2, 3]) array([['a', 'aa', 'aaa'], ['b', 'bb', 'bbb'], @@ -1107,7 +1107,7 @@ def tensordot(a, b, axes=2): An extended example taking advantage of the overloading of + and \\*: >>> a = np.array(range(1, 9)).reshape((2, 2, 2)) - >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) + >>> A = np.array(('a', 'b', 'c', 'd'), dtype=np.object_) >>> A = A.reshape((2, 2)) >>> a; A array([[[1, 2], @@ -1922,20 +1922,20 @@ def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): Examples -------- >>> import numpy as np - >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float) + >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=np.float64) array([[0., 0.], [1., 1.]]) - >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float) + >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=np.float64) array([[0., 1.], [0., 1.]]) - >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) + >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=np.int_) array([[ True, False, False], [False, True, False], [False, False, True]]) - >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) + >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=np.int_) array([[0, 1, 2], [1, 2, 3], [2, 3, 4]]) diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index 265ad4f8eb1f..70570c8c0f39 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -220,7 +220,7 @@ def issctype(rep): Strings are also a scalar type: - >>> issctype(np.dtype('str')) + >>> issctype(np.dtype(np.str_)) True """ diff --git a/numpy/ctypeslib/_ctypeslib.py b/numpy/ctypeslib/_ctypeslib.py index 9255603cd5d0..a18e11810418 100644 --- a/numpy/ctypeslib/_ctypeslib.py +++ b/numpy/ctypeslib/_ctypeslib.py @@ -502,7 +502,7 @@ def as_ctypes_type(dtype): -------- Converting a simple dtype: - >>> dt = np.dtype('int8') + >>> dt = np.dtype(np.int8) >>> ctype = np.ctypeslib.as_ctypes_type(dt) >>> ctype diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py index 7324168e1dc8..f97e9ff3f80c 100644 --- a/numpy/doc/ufuncs.py +++ b/numpy/doc/ufuncs.py @@ -113,7 +113,7 @@ output argument is used, the ufunc still returns a reference to the result. >>> x = np.arange(2) - >>> np.add(np.arange(2, dtype=float), np.arange(2, dtype=float), x, + >>> np.add(np.arange(2, dtype=np.float64), np.arange(2, dtype=np.float64), x, ... casting='unsafe') array([0, 2]) >>> x diff --git a/numpy/fft/_helper.py b/numpy/fft/_helper.py index 77adeac9207f..b3598534bcdf 100644 --- a/numpy/fft/_helper.py +++ b/numpy/fft/_helper.py @@ -156,7 +156,7 @@ def fftfreq(n, d=1.0, device=None): Examples -------- >>> import numpy as np - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=np.float64) >>> fourier = np.fft.fft(signal) >>> n = signal.size >>> timestep = 0.1 @@ -215,7 +215,7 @@ def rfftfreq(n, d=1.0, device=None): Examples -------- >>> import numpy as np - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=np.float64) >>> fourier = np.fft.rfft(signal) >>> n = signal.size >>> sample_rate = 100 diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 1ce7c76b8636..93f96c9a10b6 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -302,7 +302,7 @@ def ifft(a, n=None, axis=-1, norm=None, out=None): >>> import matplotlib.pyplot as plt >>> t = np.arange(400) - >>> n = np.zeros((400,), dtype=complex) + >>> n = np.zeros((400,), dtype=np.complex128) >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) >>> s = np.fft.ifft(n) >>> plt.plot(t, s.real, label='real') @@ -1005,7 +1005,7 @@ def ifftn(a, s=None, axes=None, norm=None, out=None): Create and plot an image with band-limited frequency content: >>> import matplotlib.pyplot as plt - >>> n = np.zeros((200,200), dtype=complex) + >>> n = np.zeros((200,200), dtype=np.complex128) >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) >>> im = np.fft.ifftn(n).real >>> plt.imshow(im) diff --git a/numpy/lib/_array_utils_impl.py b/numpy/lib/_array_utils_impl.py index c3996e1f2b92..25d78c1eb6a6 100644 --- a/numpy/lib/_array_utils_impl.py +++ b/numpy/lib/_array_utils_impl.py @@ -30,7 +30,7 @@ def byte_bounds(a): Examples -------- >>> import numpy as np - >>> I = np.eye(2, dtype='f'); I.dtype + >>> I = np.eye(2, dtype=np.float32); I.dtype dtype('float32') >>> low, high = np.lib.array_utils.byte_bounds(I) >>> high - low == I.size*I.itemsize diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index e407598d62c6..3e0005079104 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -654,7 +654,7 @@ class ndarray is returned. ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] - >>> np.asarray_chkfinite(a, dtype=float) + >>> np.asarray_chkfinite(a, dtype=np.float64) array([1., 2.]) Raises ValueError if array_like contains Nans or Infs. @@ -5239,7 +5239,7 @@ def delete(arr, obj, axis=None): Often it is preferable to use a boolean mask. For example: >>> arr = np.arange(12) + 1 - >>> mask = np.ones(len(arr), dtype=bool) + >>> mask = np.ones(len(arr), dtype=np.bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] @@ -5622,7 +5622,7 @@ def append(arr, values, axis=None): the array at index 0 has 2 dimension(s) and the array at index 1 has 1 dimension(s) - >>> a = np.array([1, 2], dtype=int) + >>> a = np.array([1, 2], dtype=np.int_) >>> c = np.append(a, []) >>> c array([1., 2.]) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 40fef85b1853..5ee60d0fceaf 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -977,7 +977,7 @@ def diag_indices(n, ndim=2): And use it to set the diagonal of an array of zeros to 1: - >>> a = np.zeros((2, 2, 2), dtype=int) + >>> a = np.zeros((2, 2, 2), dtype=np.int_) >>> a[d3] = 1 >>> a array([[[1, 0], diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 72e746f19eba..0e135917cd52 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1343,7 +1343,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, single escaped character: >>> s = StringIO('"Hello, my name is ""Monty""!"') - >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"') + >>> np.loadtxt(s, dtype=np.str_, delimiter=",", quotechar='"') array('Hello, my name is "Monty"!', dtype='>> import numpy as np - >>> np.eye(2, dtype=int) + >>> np.eye(2, dtype=np.int_) array([[1, 0], [0, 1]]) >>> np.eye(3, k=1) @@ -418,7 +418,7 @@ def tri(N, M=None, k=0, dtype=float, *, like=None): Examples -------- >>> import numpy as np - >>> np.tri(3, 5, 2, dtype=int) + >>> np.tri(3, 5, 2, dtype=np.int_) array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 1]]) diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 584088cdc21d..37192043513f 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -240,26 +240,26 @@ def isreal(x): Examples -------- >>> import numpy as np - >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) + >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=np.complex128) >>> np.isreal(a) array([False, True, True, True, True, False]) The function does not work on string arrays. - >>> a = np.array([2j, "a"], dtype="U") + >>> a = np.array([2j, "a"], dtype=np.str_) >>> np.isreal(a) # Warns about non-elementwise comparison False - Returns True for all elements in input array of ``dtype=object`` even if + Returns True for all elements in input array of ``dtype=np.object_`` even if any of the elements is complex. - >>> a = np.array([1, "2", 3+4j], dtype=object) + >>> a = np.array([1, "2", 3+4j], dtype=np.object_) >>> np.isreal(a) array([ True, True, True]) isreal should not be used with object arrays - >>> a = np.array([1+2j, 2+1j], dtype=object) + >>> a = np.array([1+2j, 2+1j], dtype=np.object_) >>> np.isreal(a) array([ True, True]) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 6884c9b7ef8d..00e485346577 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -938,7 +938,7 @@ def outer(x1, x2, /): An example using a "vector" of letters: - >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> x = np.array(['a', 'b', 'c'], dtype=np.object_) >>> np.linalg.outer(x, [1, 2, 3]) array([['a', 'aa', 'aaa'], ['b', 'bb', 'bbb'], @@ -1764,7 +1764,7 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): ((9, 9), (6,), (6, 6)) >>> np.allclose(a, np.dot(U[:, :6] * S, Vh)) True - >>> smat = np.zeros((9, 6), dtype=complex) + >>> smat = np.zeros((9, 6), dtype=np.complex128) >>> smat[:6, :6] = np.diag(S) >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) True diff --git a/numpy/ma/core.py b/numpy/ma/core.py index e0ead317d9f6..f6d7b0a8c8e5 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -2277,7 +2277,7 @@ def masked_object(x, value, copy=True, shrink=True): -------- >>> import numpy as np >>> import numpy.ma as ma - >>> food = np.array(['green_eggs', 'ham'], dtype=object) + >>> food = np.array(['green_eggs', 'ham'], dtype=np.object_) >>> # don't eat spoiled food >>> eat = ma.masked_object(food, 'green_eggs') >>> eat @@ -2286,7 +2286,7 @@ def masked_object(x, value, copy=True, shrink=True): fill_value='green_eggs', dtype=object) >>> # plain ol` ham is boring - >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) + >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=np.object_) >>> eat = ma.masked_object(fresh_food, 'green_eggs') >>> eat masked_array(data=['cheese', 'ham', 'pineapple'], @@ -2403,7 +2403,7 @@ def masked_invalid(a, copy=True): -------- >>> import numpy as np >>> import numpy.ma as ma - >>> a = np.arange(5, dtype=float) + >>> a = np.arange(5, dtype=np.float64) >>> a[2] = np.nan >>> a[3] = np.inf >>> a diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 7387d4f9beb7..769c38fdc900 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1078,7 +1078,7 @@ def mask_rowcols(a, axis=None): Examples -------- >>> import numpy as np - >>> a = np.zeros((3, 3), dtype=int) + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -1135,7 +1135,7 @@ def mask_rows(a, axis=np._NoValue): Examples -------- >>> import numpy as np - >>> a = np.zeros((3, 3), dtype=int) + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -1186,7 +1186,7 @@ def mask_cols(a, axis=np._NoValue): Examples -------- >>> import numpy as np - >>> a = np.zeros((3, 3), dtype=int) + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], diff --git a/numpy/matlib.py b/numpy/matlib.py index f27d503cdbca..151cb6b369b4 100644 --- a/numpy/matlib.py +++ b/numpy/matlib.py @@ -56,7 +56,7 @@ def empty(shape, dtype=None, order='C'): >>> np.matlib.empty((2, 2)) # filled with random data matrix([[ 6.76425276e-320, 9.79033856e-307], # random [ 7.39337286e-309, 3.22135945e-309]]) - >>> np.matlib.empty((2, 2), dtype=int) + >>> np.matlib.empty((2, 2), dtype=np.int_) matrix([[ 6600475, 0], # random [ 6586976, 22740995]]) @@ -177,7 +177,7 @@ def identity(n, dtype=None): Examples -------- >>> import numpy.matlib - >>> np.matlib.identity(3, dtype=int) + >>> np.matlib.identity(3, dtype=np.int_) matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) @@ -222,7 +222,7 @@ def eye(n, M=None, k=0, dtype=float, order='C'): Examples -------- >>> import numpy.matlib - >>> np.matlib.eye(3, k=1, dtype=float) + >>> np.matlib.eye(3, k=1, dtype=np.float64) matrix([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]]) diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 39b9a935500e..d706e09ed947 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -315,11 +315,11 @@ def sum(self, axis=None, dtype=None, out=None): >>> x.sum(axis=1) matrix([[3], [7]]) - >>> x.sum(axis=1, dtype='float') + >>> x.sum(axis=1, dtype=np.float64) matrix([[3.], [7.]]) - >>> out = np.zeros((2, 1), dtype='float') - >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) + >>> out = np.zeros((2, 1), dtype=np.float64) + >>> x.sum(axis=1, dtype=np.float64, out=np.asmatrix(out)) matrix([[3.], [7.]]) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index ed9b6a3d12a8..35794bdfca6a 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -793,7 +793,7 @@ cdef class Generator: than the optimized sampler even if each element of ``p`` is 1 / len(a). ``p`` must sum to 1 when cast to ``float64``. To ensure this, you may wish - to normalize using ``p = p / np.sum(p, dtype=float)``. + to normalize using ``p = p / np.sum(p, dtype=np.float64)``. When passing ``a`` as an integer type and ``size`` is not specified, the return type is a native Python ``int``. From 7bd9dbc31c51b5e710428a24a73245edc32ccf68 Mon Sep 17 00:00:00 2001 From: Sayed Awad Date: Mon, 8 Dec 2025 22:44:20 +0200 Subject: [PATCH 0981/1018] BENCH: Add `--cpu-affinity` option to set CPU affinity for running the benchmark (#30393) --- .spin/cmds.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index e522108a5107..5c4d5e90f6d7 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -397,6 +397,11 @@ def lint(ctx, fix): help="The factor above or below which a benchmark result is " "considered reportable. This is passed on to the asv command." ) +@click.option( + '--cpu-affinity', default=None, multiple=False, + help="Set CPU affinity for running the benchmark, in format: 0 or 0,1,2 or 0-3." + "Default: not set" +) @click.argument( 'commits', metavar='', required=False, @@ -404,7 +409,8 @@ def lint(ctx, fix): ) @meson.build_dir_option @click.pass_context -def bench(ctx, tests, compare, verbose, quick, factor, commits, build_dir): +def bench(ctx, tests, compare, verbose, quick, factor, cpu_affinity, + commits, build_dir): """🏋 Run benchmarks. \b @@ -447,6 +453,9 @@ def bench(ctx, tests, compare, verbose, quick, factor, commits, build_dir): if quick: bench_args = ['--quick'] + bench_args + if cpu_affinity: + bench_args += ['--cpu-affinity', cpu_affinity] + if not compare: # No comparison requested; we build and benchmark the current version From b70fc7747edacb5644d32f8d1d22259d24a3167b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 9 Dec 2025 02:08:34 +0100 Subject: [PATCH 0982/1018] TYP: simplified char code literals, ``ctypeslib``, and ``dtype.__new__`` (#30396) * TYP: simplfied dtype char code literals * TYP: remove duplicate and redundant uses of char code literals * TYP: rewrite `ctypeslib` in terms of fixed-width types * TYP: `dtype.__new__` cleanup and complex `ctypes` support * TYP: add missing `type ` keyword --- numpy/__init__.pyi | 119 ++++++---- numpy/_core/numeric.pyi | 10 +- numpy/_typing/__init__.py | 11 - numpy/_typing/_char_codes.py | 227 +++++++------------ numpy/ctypeslib/_ctypeslib.pyi | 187 ++++++++------- numpy/random/_generator.pyi | 6 +- numpy/random/mtrand.pyi | 8 +- numpy/typing/tests/data/reveal/ctypeslib.pyi | 103 +++++---- numpy/typing/tests/data/reveal/dtype.pyi | 8 +- 9 files changed, 313 insertions(+), 366 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b8ede6c2a85a..aad324f54c4d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3,6 +3,7 @@ import builtins import ctypes as ct import datetime as dt import inspect +import sys from abc import abstractmethod from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias from decimal import Decimal @@ -87,24 +88,15 @@ from numpy._typing import ( # type: ignore[deprecated] _Float64Codes, _Complex64Codes, _Complex128Codes, - _ByteCodes, - _ShortCodes, _IntCCodes, _IntPCodes, _LongCodes, _LongLongCodes, - _UByteCodes, - _UShortCodes, _UIntCCodes, _UIntPCodes, _ULongCodes, _ULongLongCodes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, _LongDoubleCodes, - _CSingleCodes, - _CDoubleCodes, _CLongDoubleCodes, _DT64Codes, _TD64Codes, @@ -1105,7 +1097,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: type[float64 | ct.c_double] | _Float64Codes | _DoubleCodes | None, + dtype: type[float64 | ct.c_double] | _Float64Codes | None, align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1151,7 +1143,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 copy: builtins.bool = False, *, metadata: dict[str, Any] = ..., - ) -> dtype[int_ | np.bool]: ... + ) -> dtype[int_ | Any]: ... @overload def __new__( cls, @@ -1160,7 +1152,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 copy: builtins.bool = False, *, metadata: dict[str, Any] = ..., - ) -> dtype[float64 | int_ | np.bool]: ... + ) -> dtype[float64 | Any]: ... @overload def __new__( cls, @@ -1169,7 +1161,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 copy: builtins.bool = False, *, metadata: dict[str, Any] = ..., - ) -> dtype[complex128 | float64 | int_ | np.bool]: ... + ) -> dtype[complex128 | Any]: ... @overload def __new__( cls, @@ -1219,7 +1211,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: _UInt8Codes | _UByteCodes | type[ct.c_uint8 | ct.c_ubyte], + dtype: _UInt8Codes | type[ct.c_uint8], align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1228,7 +1220,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: _UInt16Codes | _UShortCodes | type[ct.c_uint16 | ct.c_ushort], + dtype: _UInt16Codes | type[ct.c_uint16 | ct.c_ushort], align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1269,13 +1261,13 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 copy: builtins.bool = False, *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[ulong]: ... + ) -> dtype[uint32 | uint64]: ... # `signedinteger` string-based representations and ctypes @overload def __new__( cls, - dtype: _Int8Codes | _ByteCodes | type[ct.c_int8 | ct.c_byte], + dtype: _Int8Codes | type[ct.c_int8], align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1284,7 +1276,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: _Int16Codes | _ShortCodes | type[ct.c_int16 | ct.c_short], + dtype: _Int16Codes | type[ct.c_int16 | ct.c_short], align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1325,13 +1317,13 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 copy: builtins.bool = False, *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[long]: ... + ) -> dtype[int32 | int64]: ... # `floating` string-based representations and ctypes @overload def __new__( cls, - dtype: _Float16Codes | _HalfCodes, + dtype: _Float16Codes, align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1340,7 +1332,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: _Float32Codes | _SingleCodes, + dtype: _Float32Codes | type[ct.c_float], align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1357,34 +1349,63 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 metadata: dict[builtins.str, Any] = ..., ) -> dtype[longdouble]: ... - # `complexfloating` string-based representations - @overload - def __new__( - cls, - dtype: _Complex64Codes | _CSingleCodes, - align: builtins.bool = False, - copy: builtins.bool = False, - *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[complex64]: ... - @overload - def __new__( - cls, - dtype: _Complex128Codes | _CDoubleCodes, - align: builtins.bool = False, - copy: builtins.bool = False, - *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[complex128]: ... - @overload - def __new__( - cls, - dtype: _CLongDoubleCodes, - align: builtins.bool = False, - copy: builtins.bool = False, - *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[clongdouble]: ... + # `complexfloating` string-based representations and ctypes + if sys.version_info >= (3, 14) and sys.platform != "win32": + @overload + def __new__( + cls, + dtype: _Complex64Codes | type[ct.c_float_complex], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex64]: ... + @overload + def __new__( + cls, + dtype: _Complex128Codes | type[ct.c_double_complex], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex128]: ... + @overload + def __new__( + cls, + dtype: _CLongDoubleCodes | type[ct.c_longdouble_complex], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[clongdouble]: ... + else: + @overload + def __new__( + cls, + dtype: _Complex64Codes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex64]: ... + @overload + def __new__( + cls, + dtype: _Complex128Codes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex128]: ... + @overload + def __new__( + cls, + dtype: _CLongDoubleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[clongdouble]: ... # Miscellaneous string-based representations and ctypes @overload diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index f7399ef44856..ddf0bfa31977 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -36,13 +36,11 @@ from numpy._typing import ( _ArrayLikeInt_co, _ArrayLikeNumber_co, _ArrayLikeTD64_co, - _CDoubleCodes, _Complex128Codes, - _DoubleCodes, _DTypeLike, _DTypeLikeBool, _Float64Codes, - _IntCodes, + _IntPCodes, _NestedSequence, _NumberLike_co, _ScalarLike_co, @@ -660,9 +658,9 @@ type _ArrayLike1DNumber_co = _SupportsArray[np.dtype[_Number_co]] | Sequence[com type _ArrayLike1DTD64_co = _ArrayLike1D[_TD64_co] type _ArrayLike1DObject_co = _ArrayLike1D[np.object_] -type _DTypeLikeInt = type[int] | _IntCodes -type _DTypeLikeFloat64 = type[float] | _Float64Codes | _DoubleCodes -type _DTypeLikeComplex128 = type[complex] | _Complex128Codes | _CDoubleCodes +type _DTypeLikeInt = type[int] | _IntPCodes +type _DTypeLikeFloat64 = type[float] | _Float64Codes +type _DTypeLikeComplex128 = type[complex] | _Complex128Codes ### diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index d6b6105e79b8..4de797bd4e37 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -29,16 +29,12 @@ # from ._char_codes import ( _BoolCodes as _BoolCodes, - _ByteCodes as _ByteCodes, _BytesCodes as _BytesCodes, - _CDoubleCodes as _CDoubleCodes, _CharacterCodes as _CharacterCodes, _CLongDoubleCodes as _CLongDoubleCodes, _Complex64Codes as _Complex64Codes, _Complex128Codes as _Complex128Codes, _ComplexFloatingCodes as _ComplexFloatingCodes, - _CSingleCodes as _CSingleCodes, - _DoubleCodes as _DoubleCodes, _DT64Codes as _DT64Codes, _FlexibleCodes as _FlexibleCodes, _Float16Codes as _Float16Codes, @@ -46,14 +42,12 @@ _Float64Codes as _Float64Codes, _FloatingCodes as _FloatingCodes, _GenericCodes as _GenericCodes, - _HalfCodes as _HalfCodes, _InexactCodes as _InexactCodes, _Int8Codes as _Int8Codes, _Int16Codes as _Int16Codes, _Int32Codes as _Int32Codes, _Int64Codes as _Int64Codes, _IntCCodes as _IntCCodes, - _IntCodes as _IntCodes, _IntegerCodes as _IntegerCodes, _IntPCodes as _IntPCodes, _LongCodes as _LongCodes, @@ -61,24 +55,19 @@ _LongLongCodes as _LongLongCodes, _NumberCodes as _NumberCodes, _ObjectCodes as _ObjectCodes, - _ShortCodes as _ShortCodes, _SignedIntegerCodes as _SignedIntegerCodes, - _SingleCodes as _SingleCodes, _StrCodes as _StrCodes, _StringCodes as _StringCodes, _TD64Codes as _TD64Codes, - _UByteCodes as _UByteCodes, _UInt8Codes as _UInt8Codes, _UInt16Codes as _UInt16Codes, _UInt32Codes as _UInt32Codes, _UInt64Codes as _UInt64Codes, _UIntCCodes as _UIntCCodes, - _UIntCodes as _UIntCodes, _UIntPCodes as _UIntPCodes, _ULongCodes as _ULongCodes, _ULongLongCodes as _ULongLongCodes, _UnsignedIntegerCodes as _UnsignedIntegerCodes, - _UShortCodes as _UShortCodes, _VoidCodes as _VoidCodes, ) diff --git a/numpy/_typing/_char_codes.py b/numpy/_typing/_char_codes.py index 6d1e06dc894c..518f9b473e4a 100644 --- a/numpy/_typing/_char_codes.py +++ b/numpy/_typing/_char_codes.py @@ -1,51 +1,43 @@ from typing import Literal -type _BoolCodes = Literal[ - "bool", "bool_", - "?", "|?", "=?", "?", - "b1", "|b1", "=b1", "b1", -] # fmt: skip - -type _UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] -type _UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] -type _UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] -type _UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] +type _BoolCodes = Literal["bool", "bool_", "?", "b1", "|b1", "=b1", "b1"] -type _Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] -type _Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] +type _Int8Codes = Literal["int8", "byte", "b", "i1", "|i1", "=i1", "i1"] +type _Int16Codes = Literal["int16", "short", "h", "i2", "|i2", "=i2", "i2"] type _Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] type _Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] -type _Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] -type _Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] -type _Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] - -type _Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] -type _Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] +type _UInt8Codes = Literal["uint8", "ubyte", "B", "u1", "|u1", "=u1", "u1"] +type _UInt16Codes = Literal["uint16", "ushort", "H", "u2", "|u2", "=u2", "u2"] +type _UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] +type _UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] -type _ByteCodes = Literal["byte", "b", "|b", "=b", "b"] -type _ShortCodes = Literal["short", "h", "|h", "=h", "h"] type _IntCCodes = Literal["intc", "i", "|i", "=i", "i"] -type _IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] type _LongCodes = Literal["long", "l", "|l", "=l", "l"] -type _IntCodes = _IntPCodes type _LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] +type _IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] -type _UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] -type _UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] type _UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] -type _UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] type _ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] -type _UIntCodes = _UIntPCodes type _ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] +type _UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] + +type _Float16Codes = Literal["float16", "half", "e", "f2", "|f2", "=f2", "f2"] +type _Float32Codes = Literal["float32", "single", "f", "f4", "|f4", "=f4", "f4"] +type _Float64Codes = Literal[ + "float64", "float", "double", "d", "f8", "|f8", "=f8", "f8" +] -type _HalfCodes = Literal["half", "e", "|e", "=e", "e"] -type _SingleCodes = Literal["single", "f", "|f", "=f", "f"] -type _DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] type _LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] -type _CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] -type _CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] +type _Complex64Codes = Literal[ + "complex64", "csingle", "F", "c8", "|c8", "=c8", "c8" +] + +type _Complex128Codes = Literal[ + "complex128", "complex", "cdouble", "D", "c16", "|c16", "=c16", "c16" +] + type _CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] type _StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] @@ -53,97 +45,57 @@ type _VoidCodes = Literal["void", "V", "|V", "=V", "V"] type _ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] +# datetime64 +type _DT64Codes_any = Literal["datetime64", "M", "M8", "|M8", "=M8", "M8"] +type _DT64Codes_date = Literal[ + "datetime64[Y]", "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", + "datetime64[M]", "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", + "datetime64[W]", "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", + "datetime64[D]", "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", +] # fmt: skip +type _DT64Codes_datetime = Literal[ + "datetime64[h]", "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", + "datetime64[m]", "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", + "datetime64[s]", "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", + "datetime64[ms]", "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", + "datetime64[us]", "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", + "datetime64[μs]", "M8[μs]", "|M8[μs]", "=M8[μs]", "M8[μs]", +] # fmt: skip +type _DT64Codes_int = Literal[ + "datetime64[ns]", "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", + "datetime64[ps]", "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", + "datetime64[fs]", "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", + "datetime64[as]", "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", +] # fmt: skip type _DT64Codes = Literal[ - "datetime64", "|datetime64", "=datetime64", - "datetime64", - "datetime64[Y]", "|datetime64[Y]", "=datetime64[Y]", - "datetime64[Y]", - "datetime64[M]", "|datetime64[M]", "=datetime64[M]", - "datetime64[M]", - "datetime64[W]", "|datetime64[W]", "=datetime64[W]", - "datetime64[W]", - "datetime64[D]", "|datetime64[D]", "=datetime64[D]", - "datetime64[D]", - "datetime64[h]", "|datetime64[h]", "=datetime64[h]", - "datetime64[h]", - "datetime64[m]", "|datetime64[m]", "=datetime64[m]", - "datetime64[m]", - "datetime64[s]", "|datetime64[s]", "=datetime64[s]", - "datetime64[s]", - "datetime64[ms]", "|datetime64[ms]", "=datetime64[ms]", - "datetime64[ms]", - "datetime64[us]", "|datetime64[us]", "=datetime64[us]", - "datetime64[us]", - "datetime64[ns]", "|datetime64[ns]", "=datetime64[ns]", - "datetime64[ns]", - "datetime64[ps]", "|datetime64[ps]", "=datetime64[ps]", - "datetime64[ps]", - "datetime64[fs]", "|datetime64[fs]", "=datetime64[fs]", - "datetime64[fs]", - "datetime64[as]", "|datetime64[as]", "=datetime64[as]", - "datetime64[as]", - "M", "|M", "=M", "M", - "M8", "|M8", "=M8", "M8", - "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", - "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", - "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", - "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", - "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", - "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", - "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", - "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", - "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", - "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", - "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", - "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", - "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", -] -type _TD64Codes = Literal[ - "timedelta64", "|timedelta64", "=timedelta64", - "timedelta64", - "timedelta64[Y]", "|timedelta64[Y]", "=timedelta64[Y]", - "timedelta64[Y]", - "timedelta64[M]", "|timedelta64[M]", "=timedelta64[M]", - "timedelta64[M]", - "timedelta64[W]", "|timedelta64[W]", "=timedelta64[W]", - "timedelta64[W]", - "timedelta64[D]", "|timedelta64[D]", "=timedelta64[D]", - "timedelta64[D]", - "timedelta64[h]", "|timedelta64[h]", "=timedelta64[h]", - "timedelta64[h]", - "timedelta64[m]", "|timedelta64[m]", "=timedelta64[m]", - "timedelta64[m]", - "timedelta64[s]", "|timedelta64[s]", "=timedelta64[s]", - "timedelta64[s]", - "timedelta64[ms]", "|timedelta64[ms]", "=timedelta64[ms]", - "timedelta64[ms]", - "timedelta64[us]", "|timedelta64[us]", "=timedelta64[us]", - "timedelta64[us]", - "timedelta64[ns]", "|timedelta64[ns]", "=timedelta64[ns]", - "timedelta64[ns]", - "timedelta64[ps]", "|timedelta64[ps]", "=timedelta64[ps]", - "timedelta64[ps]", - "timedelta64[fs]", "|timedelta64[fs]", "=timedelta64[fs]", - "timedelta64[fs]", - "timedelta64[as]", "|timedelta64[as]", "=timedelta64[as]", - "timedelta64[as]", - "m", "|m", "=m", "m", - "m8", "|m8", "=m8", "m8", - "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", - "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", - "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", - "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", - "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", - "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", - "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", - "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", - "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", - "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", - "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", - "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", - "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", + _DT64Codes_any, + _DT64Codes_date, + _DT64Codes_datetime, + _DT64Codes_int, ] +# timedelta64 +type _TD64Codes_any = Literal["timedelta64", "m", "m8", "|m8", "=m8", "m8"] +type _TD64Codes_int = Literal[ + "timedelta64[Y]", "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", + "timedelta64[M]", "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", + "timedelta64[ns]", "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", + "timedelta64[ps]", "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", + "timedelta64[fs]", "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", + "timedelta64[as]", "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", +] # fmt: skip +type _TD64Codes_timedelta = Literal[ + "timedelta64[W]", "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", + "timedelta64[D]", "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", + "timedelta64[h]", "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", + "timedelta64[m]", "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", + "timedelta64[s]", "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", + "timedelta64[ms]", "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", + "timedelta64[us]", "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", + "timedelta64[μs]", "m8[μs]", "|m8[μs]", "=m8[μs]", "m8[μs]", +] # fmt: skip +type _TD64Codes = Literal[_TD64Codes_any, _TD64Codes_int, _TD64Codes_timedelta] + # NOTE: `StringDType' has no scalar type, and therefore has no name that can # be passed to the `dtype` constructor type _StringCodes = Literal["T", "|T", "=T", "T"] @@ -154,52 +106,43 @@ # Another advantage of nesting, is that they always have a "flat" # `Literal.__args__`, which is a tuple of *literally* all its literal values. -type _UnsignedIntegerCodes = Literal[ - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _UIntCodes, - _UByteCodes, - _UShortCodes, - _UIntCCodes, - _ULongCodes, - _ULongLongCodes, -] type _SignedIntegerCodes = Literal[ _Int8Codes, _Int16Codes, _Int32Codes, _Int64Codes, - _IntCodes, - _ByteCodes, - _ShortCodes, _IntCCodes, _LongCodes, _LongLongCodes, + _IntPCodes, +] +type _UnsignedIntegerCodes = Literal[ + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, + _UIntPCodes, ] type _FloatingCodes = Literal[ _Float16Codes, _Float32Codes, _Float64Codes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, - _LongDoubleCodes + _LongDoubleCodes, ] type _ComplexFloatingCodes = Literal[ _Complex64Codes, _Complex128Codes, - _CSingleCodes, - _CDoubleCodes, _CLongDoubleCodes, ] type _IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] type _InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] type _NumberCodes = Literal[_IntegerCodes, _InexactCodes] -type _CharacterCodes = Literal[_StrCodes, _BytesCodes] -type _FlexibleCodes = Literal[_VoidCodes, _CharacterCodes] +type _CharacterCodes = Literal[_BytesCodes, _StrCodes] +type _FlexibleCodes = Literal[_CharacterCodes, _VoidCodes] type _GenericCodes = Literal[ _BoolCodes, diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index 1d4780aeb422..2e88d7d9464f 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -1,28 +1,9 @@ -# NOTE: Numpy's mypy plugin is used for importing the correct -# platform-specific `ctypes._SimpleCData[int]` sub-type -import ctypes +import ctypes as ct from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence -from ctypes import c_int64 as _c_intp from typing import Any, ClassVar, Literal as L, overload import numpy as np -from numpy import ( - byte, - double, - intc, - long, - longdouble, - longlong, - short, - single, - ubyte, - uintc, - ulong, - ulonglong, - ushort, - void, -) from numpy._core._internal import _ctypes from numpy._core.multiarray import flagsobj from numpy._typing import ( @@ -31,21 +12,27 @@ from numpy._typing import ( _AnyShape, _ArrayLike, _BoolCodes, - _ByteCodes, - _DoubleCodes, _DTypeLike, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, _IntCCodes, + _IntPCodes, _LongCodes, _LongDoubleCodes, _LongLongCodes, _ShapeLike, - _ShortCodes, - _SingleCodes, - _UByteCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, _UIntCCodes, + _UIntPCodes, _ULongCodes, _ULongLongCodes, - _UShortCodes, _VoidDTypeLike, ) @@ -61,7 +48,7 @@ type _FlagsKind = L[ ] # TODO: Add a shape type parameter -class _ndptr[OptionalDTypeT: np.dtype | None](ctypes.c_void_p): +class _ndptr[OptionalDTypeT: np.dtype | None](ct.c_void_p): # In practice these 4 classvars are defined in the dynamic class # returned by `ndpointer` _dtype_: OptionalDTypeT = ... @@ -83,10 +70,11 @@ class _concrete_ndptr[DTypeT: np.dtype](_ndptr[DTypeT]): @property def contents(self) -> np.ndarray[_AnyShape, DTypeT]: ... -def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ctypes.CDLL: ... +def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ct.CDLL: ... -c_intp = _c_intp +c_intp = ct.c_int64 # most platforms are 64-bit nowadays +# @overload def ndpointer( dtype: None = None, @@ -125,105 +113,110 @@ def ndpointer( flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_ndptr[np.dtype]]: ... -@overload -def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ... -@overload -def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ... -@overload -def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ... -@overload -def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ... -@overload -def as_ctypes_type(dtype: _LongCodes | _DTypeLike[long] | type[ctypes.c_long]) -> type[ctypes.c_long]: ... -@overload -def as_ctypes_type(dtype: type[int]) -> type[c_intp]: ... -@overload -def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ... -@overload -def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ... -@overload -def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ... -@overload -def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ... -@overload -def as_ctypes_type(dtype: _ULongCodes | _DTypeLike[ulong] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ... -@overload -def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ... -@overload -def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ... -@overload -def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ... -@overload -def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ... -@overload -def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ctypes.Union` or `ctypes.Structure` -@overload +# +@overload # bool +def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ct.c_bool]) -> type[ct.c_bool]: ... +@overload # int8 +def as_ctypes_type(dtype: _Int8Codes | _DTypeLike[np.int8] | type[ct.c_int8]) -> type[ct.c_int8]: ... +@overload # int16 +def as_ctypes_type(dtype: _Int16Codes | _DTypeLike[np.int16] | type[ct.c_int16]) -> type[ct.c_int16]: ... +@overload # int32 +def as_ctypes_type(dtype: _Int32Codes | _DTypeLike[np.int32] | type[ct.c_int32]) -> type[ct.c_int32]: ... +@overload # int64 +def as_ctypes_type(dtype: _Int64Codes | _DTypeLike[np.int64] | type[ct.c_int64]) -> type[ct.c_int64]: ... +@overload # intc +def as_ctypes_type(dtype: _IntCCodes | type[ct.c_int]) -> type[ct.c_int]: ... +@overload # long +def as_ctypes_type(dtype: _LongCodes | type[ct.c_long]) -> type[ct.c_long]: ... +@overload # longlong +def as_ctypes_type(dtype: _LongLongCodes | type[ct.c_longlong]) -> type[ct.c_longlong]: ... +@overload # intp +def as_ctypes_type(dtype: _IntPCodes | type[ct.c_ssize_t] | type[int]) -> type[ct.c_ssize_t]: ... +@overload # uint8 +def as_ctypes_type(dtype: _UInt8Codes | _DTypeLike[np.uint8] | type[ct.c_uint8]) -> type[ct.c_uint8]: ... +@overload # uint16 +def as_ctypes_type(dtype: _UInt16Codes | _DTypeLike[np.uint16] | type[ct.c_uint16]) -> type[ct.c_uint16]: ... +@overload # uint32 +def as_ctypes_type(dtype: _UInt32Codes | _DTypeLike[np.uint32] | type[ct.c_uint32]) -> type[ct.c_uint32]: ... +@overload # uint64 +def as_ctypes_type(dtype: _UInt64Codes | _DTypeLike[np.uint64] | type[ct.c_uint64]) -> type[ct.c_uint64]: ... +@overload # uintc +def as_ctypes_type(dtype: _UIntCCodes | type[ct.c_uint]) -> type[ct.c_uint]: ... +@overload # ulong +def as_ctypes_type(dtype: _ULongCodes | type[ct.c_ulong]) -> type[ct.c_ulong]: ... +@overload # ulonglong +def as_ctypes_type(dtype: _ULongLongCodes | type[ct.c_ulonglong]) -> type[ct.c_ulonglong]: ... +@overload # uintp +def as_ctypes_type(dtype: _UIntPCodes | type[ct.c_size_t]) -> type[ct.c_size_t]: ... +@overload # float32 +def as_ctypes_type(dtype: _Float32Codes | _DTypeLike[np.float32] | type[ct.c_float]) -> type[ct.c_float]: ... +@overload # float64 +def as_ctypes_type(dtype: _Float64Codes | _DTypeLike[np.float64] | type[float | ct.c_double]) -> type[ct.c_double]: ... +@overload # longdouble +def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[np.longdouble] | type[ct.c_longdouble]) -> type[ct.c_longdouble]: ... +@overload # void +def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ct.Union` or `ct.Structure` +@overload # fallback def as_ctypes_type(dtype: str) -> type[Any]: ... +# @overload -def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... +def as_array(obj: ct._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... @overload def as_array[ScalarT: np.generic](obj: _ArrayLike[ScalarT], shape: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def as_array(obj: object, shape: _ShapeLike | None = None) -> NDArray[Any]: ... +# @overload -def as_ctypes(obj: np.bool) -> ctypes.c_bool: ... -@overload -def as_ctypes(obj: byte) -> ctypes.c_byte: ... -@overload -def as_ctypes(obj: short) -> ctypes.c_short: ... -@overload -def as_ctypes(obj: intc) -> ctypes.c_int: ... -@overload -def as_ctypes(obj: long) -> ctypes.c_long: ... +def as_ctypes(obj: np.bool) -> ct.c_bool: ... @overload -def as_ctypes(obj: longlong) -> ctypes.c_longlong: ... # type: ignore[overload-cannot-match] +def as_ctypes(obj: np.int8) -> ct.c_int8: ... @overload -def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ... +def as_ctypes(obj: np.int16) -> ct.c_int16: ... @overload -def as_ctypes(obj: ushort) -> ctypes.c_ushort: ... +def as_ctypes(obj: np.int32) -> ct.c_int32: ... @overload -def as_ctypes(obj: uintc) -> ctypes.c_uint: ... +def as_ctypes(obj: np.int64) -> ct.c_int64: ... @overload -def as_ctypes(obj: ulong) -> ctypes.c_ulong: ... +def as_ctypes(obj: np.uint8) -> ct.c_uint8: ... @overload -def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ... # type: ignore[overload-cannot-match] +def as_ctypes(obj: np.uint16) -> ct.c_uint16: ... @overload -def as_ctypes(obj: single) -> ctypes.c_float: ... +def as_ctypes(obj: np.uint32) -> ct.c_uint32: ... @overload -def as_ctypes(obj: double) -> ctypes.c_double: ... +def as_ctypes(obj: np.uint64) -> ct.c_uint64: ... @overload -def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ... +def as_ctypes(obj: np.float32) -> ct.c_float: ... @overload -def as_ctypes(obj: void) -> Any: ... # `ctypes.Union` or `ctypes.Structure` +def as_ctypes(obj: np.float64) -> ct.c_double: ... @overload -def as_ctypes(obj: NDArray[np.bool]) -> ctypes.Array[ctypes.c_bool]: ... +def as_ctypes(obj: np.longdouble) -> ct.c_longdouble: ... @overload -def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ... +def as_ctypes(obj: np.void) -> Any: ... # `ct.Union` or `ct.Structure` @overload -def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ... +def as_ctypes(obj: NDArray[np.bool]) -> ct.Array[ct.c_bool]: ... @overload -def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ... +def as_ctypes(obj: NDArray[np.int8]) -> ct.Array[ct.c_int8]: ... @overload -def as_ctypes(obj: NDArray[long]) -> ctypes.Array[ctypes.c_long]: ... +def as_ctypes(obj: NDArray[np.int16]) -> ct.Array[ct.c_int16]: ... @overload -def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ... # type: ignore[overload-cannot-match] +def as_ctypes(obj: NDArray[np.int32]) -> ct.Array[ct.c_int32]: ... @overload -def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ... +def as_ctypes(obj: NDArray[np.int64]) -> ct.Array[ct.c_int64]: ... @overload -def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ... +def as_ctypes(obj: NDArray[np.uint8]) -> ct.Array[ct.c_uint8]: ... @overload -def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ... +def as_ctypes(obj: NDArray[np.uint16]) -> ct.Array[ct.c_uint16]: ... @overload -def as_ctypes(obj: NDArray[ulong]) -> ctypes.Array[ctypes.c_ulong]: ... +def as_ctypes(obj: NDArray[np.uint32]) -> ct.Array[ct.c_uint32]: ... @overload -def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ... # type: ignore[overload-cannot-match] +def as_ctypes(obj: NDArray[np.uint64]) -> ct.Array[ct.c_uint64]: ... @overload -def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ... +def as_ctypes(obj: NDArray[np.float32]) -> ct.Array[ct.c_float]: ... @overload -def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ... +def as_ctypes(obj: NDArray[np.float64]) -> ct.Array[ct.c_double]: ... @overload -def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ... +def as_ctypes(obj: NDArray[np.longdouble]) -> ct.Array[ct.c_longdouble]: ... @overload -def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ... # `ctypes.Union` or `ctypes.Structure` +def as_ctypes(obj: NDArray[np.void]) -> ct.Array[Any]: ... # `ct.Union` or `ct.Structure` diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index f3f911b98ce7..f3fb9bb7baf5 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -9,7 +9,6 @@ from numpy._typing import ( _ArrayLike, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _DoubleCodes, _DTypeLike, _Float32Codes, _Float64Codes, @@ -17,7 +16,6 @@ from numpy._typing import ( _Int64Codes, _NestedSequence, _ShapeLike, - _SingleCodes, ) from .bit_generator import BitGenerator, SeedSequence @@ -27,8 +25,8 @@ type _ArrayF32 = NDArray[np.float32] type _ArrayF64 = NDArray[np.float64] type _DTypeLikeI64 = _DTypeLike[np.int64] | _Int64Codes -type _DTypeLikeF32 = _DTypeLike[np.float32] | _Float32Codes | _SingleCodes -type _DTypeLikeF64 = type[float] | _DTypeLike[np.float64] | _Float64Codes | _DoubleCodes +type _DTypeLikeF32 = _DTypeLike[np.float32] | _Float32Codes +type _DTypeLikeF64 = type[float] | _DTypeLike[np.float64] | _Float64Codes # we use `str` to avoid type-checker performance issues because of the many `Literal` variants type _DTypeLikeFloat = type[float] | _DTypeLike[np.float32 | np.float64] | str diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index c2cbb17aa98d..7a654971f19b 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -29,7 +29,7 @@ from numpy._typing import ( _Int16Codes, _Int32Codes, _Int64Codes, - _IntCodes, + _IntPCodes, _LongCodes, _ShapeLike, _SupportsDType, @@ -37,7 +37,7 @@ from numpy._typing import ( _UInt16Codes, _UInt32Codes, _UInt64Codes, - _UIntCodes, + _UIntPCodes, _ULongCodes, ) from numpy.random.bit_generator import BitGenerator @@ -209,7 +209,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 + dtype: dtype[uint] | type[uint] | _UIntPCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 ) -> uint: ... @overload def randint( @@ -257,7 +257,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 + dtype: dtype[int_] | type[int_] | _IntPCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 ) -> int_: ... @overload def randint( diff --git a/numpy/typing/tests/data/reveal/ctypeslib.pyi b/numpy/typing/tests/data/reveal/ctypeslib.pyi index 0564d725cf62..e3558925e4d0 100644 --- a/numpy/typing/tests/data/reveal/ctypeslib.pyi +++ b/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -6,19 +6,17 @@ import numpy.typing as npt from numpy import ctypeslib AR_bool: npt.NDArray[np.bool] -AR_ubyte: npt.NDArray[np.ubyte] -AR_ushort: npt.NDArray[np.ushort] -AR_uintc: npt.NDArray[np.uintc] -AR_ulong: npt.NDArray[np.ulong] -AR_ulonglong: npt.NDArray[np.ulonglong] -AR_byte: npt.NDArray[np.byte] -AR_short: npt.NDArray[np.short] -AR_intc: npt.NDArray[np.intc] -AR_long: npt.NDArray[np.long] -AR_longlong: npt.NDArray[np.longlong] -AR_single: npt.NDArray[np.single] -AR_double: npt.NDArray[np.double] -AR_longdouble: npt.NDArray[np.longdouble] +AR_i8: npt.NDArray[np.int8] +AR_u8: npt.NDArray[np.uint8] +AR_i16: npt.NDArray[np.int16] +AR_u16: npt.NDArray[np.uint16] +AR_i32: npt.NDArray[np.int32] +AR_u32: npt.NDArray[np.uint32] +AR_i64: npt.NDArray[np.int64] +AR_u64: npt.NDArray[np.uint64] +AR_f32: npt.NDArray[np.float32] +AR_f64: npt.NDArray[np.float64] +AR_f80: npt.NDArray[np.longdouble] AR_void: npt.NDArray[np.void] pointer: ct._Pointer[Any] @@ -33,49 +31,56 @@ assert_type(np.ctypeslib.ndpointer(np.int64, shape=(10, 3)), type[ctypeslib._con assert_type(np.ctypeslib.ndpointer(int, shape=(1,)), type[np.ctypeslib._concrete_ndptr[np.dtype]]) assert_type(np.ctypeslib.as_ctypes_type(np.bool), type[ct.c_bool]) -assert_type(np.ctypeslib.as_ctypes_type(np.ubyte), type[ct.c_ubyte]) -assert_type(np.ctypeslib.as_ctypes_type(np.ushort), type[ct.c_ushort]) -assert_type(np.ctypeslib.as_ctypes_type(np.uintc), type[ct.c_uint]) -assert_type(np.ctypeslib.as_ctypes_type(np.byte), type[ct.c_byte]) -assert_type(np.ctypeslib.as_ctypes_type(np.short), type[ct.c_short]) -assert_type(np.ctypeslib.as_ctypes_type(np.intc), type[ct.c_int]) -assert_type(np.ctypeslib.as_ctypes_type(np.single), type[ct.c_float]) -assert_type(np.ctypeslib.as_ctypes_type(np.double), type[ct.c_double]) -assert_type(np.ctypeslib.as_ctypes_type(ct.c_double), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(np.int8), type[ct.c_int8]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint8), type[ct.c_uint8]) +assert_type(np.ctypeslib.as_ctypes_type(np.int16), type[ct.c_int16]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint16), type[ct.c_uint16]) +assert_type(np.ctypeslib.as_ctypes_type(np.int32), type[ct.c_int32]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint32), type[ct.c_uint32]) +assert_type(np.ctypeslib.as_ctypes_type(np.int64), type[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint64), type[ct.c_uint64]) +assert_type(np.ctypeslib.as_ctypes_type(np.float32), type[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes_type(np.float64), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(np.longdouble), type[ct.c_longdouble]) +assert_type(np.ctypeslib.as_ctypes_type("?"), type[ct.c_bool]) +assert_type(np.ctypeslib.as_ctypes_type("intp"), type[ct.c_ssize_t]) assert_type(np.ctypeslib.as_ctypes_type("q"), type[ct.c_longlong]) +assert_type(np.ctypeslib.as_ctypes_type("i8"), type[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes_type("f8"), type[ct.c_double]) assert_type(np.ctypeslib.as_ctypes_type([("i8", np.int64), ("f8", np.float64)]), type[Any]) -assert_type(np.ctypeslib.as_ctypes_type("i8"), type[Any]) -assert_type(np.ctypeslib.as_ctypes_type("f8"), type[Any]) assert_type(np.ctypeslib.as_ctypes(AR_bool.take(0)), ct.c_bool) -assert_type(np.ctypeslib.as_ctypes(AR_ubyte.take(0)), ct.c_ubyte) -assert_type(np.ctypeslib.as_ctypes(AR_ushort.take(0)), ct.c_ushort) -assert_type(np.ctypeslib.as_ctypes(AR_uintc.take(0)), ct.c_uint) +assert_type(np.ctypeslib.as_ctypes(AR_u8.take(0)), ct.c_uint8) +assert_type(np.ctypeslib.as_ctypes(AR_u16.take(0)), ct.c_uint16) +assert_type(np.ctypeslib.as_ctypes(AR_u32.take(0)), ct.c_uint32) -assert_type(np.ctypeslib.as_ctypes(AR_byte.take(0)), ct.c_byte) -assert_type(np.ctypeslib.as_ctypes(AR_short.take(0)), ct.c_short) -assert_type(np.ctypeslib.as_ctypes(AR_intc.take(0)), ct.c_int) -assert_type(np.ctypeslib.as_ctypes(AR_single.take(0)), ct.c_float) -assert_type(np.ctypeslib.as_ctypes(AR_double.take(0)), ct.c_double) -assert_type(np.ctypeslib.as_ctypes(AR_void.take(0)), Any) +assert_type(np.ctypeslib.as_ctypes(np.bool()), ct.c_bool) +assert_type(np.ctypeslib.as_ctypes(np.int8()), ct.c_int8) +assert_type(np.ctypeslib.as_ctypes(np.uint8()), ct.c_uint8) +assert_type(np.ctypeslib.as_ctypes(np.int16()), ct.c_int16) +assert_type(np.ctypeslib.as_ctypes(np.uint16()), ct.c_uint16) +assert_type(np.ctypeslib.as_ctypes(np.int32()), ct.c_int32) +assert_type(np.ctypeslib.as_ctypes(np.uint32()), ct.c_uint32) +assert_type(np.ctypeslib.as_ctypes(np.int64()), ct.c_int64) +assert_type(np.ctypeslib.as_ctypes(np.uint64()), ct.c_uint64) +assert_type(np.ctypeslib.as_ctypes(np.float32()), ct.c_float) +assert_type(np.ctypeslib.as_ctypes(np.float64()), ct.c_double) +assert_type(np.ctypeslib.as_ctypes(np.longdouble()), ct.c_longdouble) +assert_type(np.ctypeslib.as_ctypes(np.void(b"")), Any) assert_type(np.ctypeslib.as_ctypes(AR_bool), ct.Array[ct.c_bool]) -assert_type(np.ctypeslib.as_ctypes(AR_ubyte), ct.Array[ct.c_ubyte]) -assert_type(np.ctypeslib.as_ctypes(AR_ushort), ct.Array[ct.c_ushort]) -assert_type(np.ctypeslib.as_ctypes(AR_uintc), ct.Array[ct.c_uint]) -assert_type(np.ctypeslib.as_ctypes(AR_byte), ct.Array[ct.c_byte]) -assert_type(np.ctypeslib.as_ctypes(AR_short), ct.Array[ct.c_short]) -assert_type(np.ctypeslib.as_ctypes(AR_intc), ct.Array[ct.c_int]) -assert_type(np.ctypeslib.as_ctypes(AR_single), ct.Array[ct.c_float]) -assert_type(np.ctypeslib.as_ctypes(AR_double), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_i8), ct.Array[ct.c_int8]) +assert_type(np.ctypeslib.as_ctypes(AR_u8), ct.Array[ct.c_uint8]) +assert_type(np.ctypeslib.as_ctypes(AR_i16), ct.Array[ct.c_int16]) +assert_type(np.ctypeslib.as_ctypes(AR_u16), ct.Array[ct.c_uint16]) +assert_type(np.ctypeslib.as_ctypes(AR_i32), ct.Array[ct.c_int32]) +assert_type(np.ctypeslib.as_ctypes(AR_u32), ct.Array[ct.c_uint32]) +assert_type(np.ctypeslib.as_ctypes(AR_i64), ct.Array[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes(AR_u64), ct.Array[ct.c_uint64]) +assert_type(np.ctypeslib.as_ctypes(AR_f32), ct.Array[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes(AR_f64), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_f80), ct.Array[ct.c_longdouble]) assert_type(np.ctypeslib.as_ctypes(AR_void), ct.Array[Any]) -assert_type(np.ctypeslib.as_array(AR_ubyte), npt.NDArray[np.ubyte]) +assert_type(np.ctypeslib.as_array(AR_u8), npt.NDArray[np.ubyte]) assert_type(np.ctypeslib.as_array(1), npt.NDArray[Any]) assert_type(np.ctypeslib.as_array(pointer), npt.NDArray[Any]) - -assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_long]) -assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_ulong]) -assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_ulong]) -assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_long]) -assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_long) -assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_ulong) diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index f8f939d4609f..48e9d54b7951 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -44,9 +44,9 @@ assert_type(np.dtype("str"), np.dtype[np.str_]) # Python types assert_type(np.dtype(bool), np.dtype[np.bool]) -assert_type(np.dtype(int), np.dtype[np.int_ | np.bool]) -assert_type(np.dtype(float), np.dtype[np.float64 | np.int_ | np.bool]) -assert_type(np.dtype(complex), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(int), np.dtype[np.int_ | Any]) +assert_type(np.dtype(float), np.dtype[np.float64 | Any]) +assert_type(np.dtype(complex), np.dtype[np.complex128 | Any]) assert_type(np.dtype(py_object), np.dtype[np.object_]) assert_type(np.dtype(str), np.dtype[np.str_]) assert_type(np.dtype(bytes), np.dtype[np.bytes_]) @@ -64,7 +64,7 @@ assert_type(np.dtype(Fraction), np.dtype[np.object_]) assert_type(np.dtype("?"), np.dtype[np.bool]) assert_type(np.dtype("|b1"), np.dtype[np.bool]) assert_type(np.dtype("u1"), np.dtype[np.uint8]) -assert_type(np.dtype("l"), np.dtype[np.long]) +assert_type(np.dtype("l"), np.dtype[np.int32 | np.int64]) assert_type(np.dtype("longlong"), np.dtype[np.longlong]) assert_type(np.dtype(">g"), np.dtype[np.longdouble]) assert_type(np.dtype(cs_integer), np.dtype[np.integer]) From a48fb8562cce7e564f50f84f3717621b08edf490 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Tue, 9 Dec 2025 04:20:42 -0500 Subject: [PATCH 0983/1018] BUG: Add missing return status check of NpyIter_EnableExternalLoop(). (#30404) --- numpy/_core/src/multiarray/nditer_pywrap.c | 5 ++++- numpy/_core/tests/test_nditer.py | 7 +++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index a0e1b09a584c..992bc013af3a 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -1423,7 +1423,10 @@ npyiter_enable_external_loop( return NULL; } - NpyIter_EnableExternalLoop(self->iter); + if (NpyIter_EnableExternalLoop(self->iter) != NPY_SUCCEED) { + return NULL; + } + /* EnableExternalLoop invalidates cached values */ if (npyiter_cache_values(self) < 0) { return NULL; diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 943c25cdaa13..d2fc69a03b5f 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -3206,6 +3206,13 @@ def test_iter_too_large_with_multiindex(): with assert_raises(ValueError): _multiarray_tests.test_nditer_too_large(arrays, i * 2 + 1, mode) + +def test_invalid_call_of_enable_external_loop(): + with pytest.raises(ValueError, + match='Iterator flag EXTERNAL_LOOP cannot be used'): + np.nditer(([[1], [2]], [3, 4]), ['multi_index']).enable_external_loop() + + def test_writebacks(): a = np.arange(6, dtype='f4') au = a.byteswap() From 3a3dc5e5d12da326d518ae080bd977a8874108de Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 15:33:50 +0100 Subject: [PATCH 0984/1018] TYP: `linalg`: improved `*inv` and `*vals` return types --- numpy/linalg/_linalg.pyi | 229 +++++++++++++--------- numpy/typing/tests/data/fail/linalg.pyi | 19 +- numpy/typing/tests/data/reveal/linalg.pyi | 32 +-- 3 files changed, 163 insertions(+), 117 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 6cbd3b43a07a..00135bc5d15b 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -11,7 +11,6 @@ from typing import ( import numpy as np from numpy import ( - complex128, complexfloating, float64, floating, @@ -23,7 +22,7 @@ from numpy import ( vecdot, ) from numpy._core.fromnumeric import matrix_transpose -from numpy._globals import _NoValueType +from numpy._globals import _NoValue, _NoValueType from numpy._typing import ( ArrayLike, DTypeLike, @@ -33,7 +32,6 @@ from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _ArrayLikeNumber_co, _ArrayLikeObject_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, @@ -78,34 +76,49 @@ __all__ = [ ] type _ModeKind = L["reduced", "complete", "r", "raw"] +type _SideKind = L["L", "U", "l", "u"] + +type _inexact64 = np.float32 | np.complex64 + +# anything that safe-casts (from floating) into float64/complex128 +type _ToArrayF64 = _ArrayLike[np.float64 | np.integer | np.bool] | _NestedSequence[float] +type _ToArrayC128 = _ArrayLike[np.complex128 | np.float64 | np.integer | np.bool] | _NestedSequence[complex] +# the invariant `list` type avoids overlap with `_IntoArrayF64` +type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] ### fortran_int = np.intc +# TODO: generic class EigResult(NamedTuple): eigenvalues: NDArray[Any] eigenvectors: NDArray[Any] +# TODO: generic class EighResult(NamedTuple): eigenvalues: NDArray[Any] eigenvectors: NDArray[Any] +# TODO: generic class QRResult(NamedTuple): Q: NDArray[Any] R: NDArray[Any] +# TODO: generic class SlogdetResult(NamedTuple): # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and # a `(x.ndim - 2)`` dimensional arrays otherwise sign: Any logabsdet: Any +# TODO: generic class SVDResult(NamedTuple): U: NDArray[Any] S: NDArray[Any] Vh: NDArray[Any] +# TODO: narrow return types @overload def tensorsolve( a: _ArrayLikeInt_co, @@ -125,6 +138,7 @@ def tensorsolve( axes: Iterable[int] | None = None, ) -> NDArray[complexfloating]: ... +# TODO: narrow return types @overload def solve( a: _ArrayLikeInt_co, @@ -141,28 +155,69 @@ def solve( b: _ArrayLikeComplex_co, ) -> NDArray[complexfloating]: ... -@overload -def tensorinv( - a: _ArrayLikeInt_co, - ind: int = 2, -) -> NDArray[float64]: ... -@overload -def tensorinv( - a: _ArrayLikeFloat_co, - ind: int = 2, -) -> NDArray[floating]: ... -@overload -def tensorinv( +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 array-likes +def tensorinv[ScalarT: _inexact64](a: _ArrayLike[ScalarT], ind: int = 2) -> NDArray[ScalarT]: ... +@overload # +float64 array-likes +def tensorinv(a: _ToArrayF64, ind: int = 2) -> NDArray[np.float64]: ... +@overload # ~complex128 array-likes +def tensorinv(a: _AsArrayC128, ind: int = 2) -> NDArray[np.complex128]: ... +@overload # fallback +def tensorinv(a: _ArrayLikeComplex_co, ind: int = 2) -> np.ndarray: ... + +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 array-likes +def inv[ScalarT: _inexact64](a: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... +@overload # +float64 array-likes +def inv(a: _ToArrayF64) -> NDArray[np.float64]: ... +@overload # ~complex128 array-likes +def inv(a: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # fallback +def inv(a: _ArrayLikeComplex_co) -> np.ndarray: ... + +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 array-likes +def pinv[ScalarT: _inexact64]( + a: _ArrayLike[ScalarT], + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[ScalarT]: ... +@overload # +float64 array-likes +def pinv( + a: _ToArrayF64, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[np.float64]: ... +@overload # ~complex128 array-likes +def pinv( + a: _AsArrayC128, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[np.complex128]: ... +@overload # fallback +def pinv( a: _ArrayLikeComplex_co, - ind: int = 2, -) -> NDArray[complexfloating]: ... + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[Any]: ... -@overload -def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ... -@overload -def inv(a: _ArrayLikeFloat_co) -> NDArray[floating]: ... -@overload -def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... +# keep in sync with the inverse functions +@overload # inexact32 array-likes +def cholesky[ScalarT: _inexact64](a: _ArrayLike[ScalarT], /, *, upper: bool = False) -> NDArray[ScalarT]: ... +@overload # +float64 array-likes +def cholesky(a: _ToArrayF64, /, *, upper: bool = False) -> NDArray[np.float64]: ... +@overload # ~complex128 array-likes +def cholesky(a: _AsArrayC128, /, *, upper: bool = False) -> NDArray[np.complex128]: ... +@overload # fallback +def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> np.ndarray: ... # TODO: The supported input and output dtypes are dependent on the value of `n`. # For example: `n < 0` always casts integer types to float64 @@ -171,13 +226,7 @@ def matrix_power( n: SupportsIndex, ) -> NDArray[Any]: ... -@overload -def cholesky(a: _ArrayLikeInt_co, /, *, upper: bool = False) -> NDArray[float64]: ... -@overload -def cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floating]: ... -@overload -def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating]: ... - +# TODO: narrow return types @overload def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never], /) -> NDArray[Any]: ... @overload @@ -203,6 +252,7 @@ def outer( /, ) -> NDArray[Any]: ... +# TODO: narrow return types @overload def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload @@ -210,18 +260,7 @@ def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = "reduced") -> QRResult: ... -@overload -def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... -@overload -def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating] | NDArray[complexfloating]: ... -@overload -def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... - -@overload -def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[float64]: ... -@overload -def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[floating]: ... - +# TODO: narrow return types @overload def eig(a: _ArrayLikeInt_co) -> EigResult: ... @overload @@ -229,22 +268,15 @@ def eig(a: _ArrayLikeFloat_co) -> EigResult: ... @overload def eig(a: _ArrayLikeComplex_co) -> EigResult: ... +# TODO: narrow return types @overload -def eigh( - a: _ArrayLikeInt_co, - UPLO: L["L", "U", "l", "u"] = "L", -) -> EighResult: ... +def eigh(a: _ArrayLikeInt_co, UPLO: _SideKind = "L") -> EighResult: ... @overload -def eigh( - a: _ArrayLikeFloat_co, - UPLO: L["L", "U", "l", "u"] = "L", -) -> EighResult: ... +def eigh(a: _ArrayLikeFloat_co, UPLO: _SideKind = "L") -> EighResult: ... @overload -def eigh( - a: _ArrayLikeComplex_co, - UPLO: L["L", "U", "l", "u"] = "L", -) -> EighResult: ... +def eigh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> EighResult: ... +# TODO: narrow return types @overload def svd( a: _ArrayLikeInt_co, @@ -297,15 +329,41 @@ def svd( hermitian: bool = False, ) -> NDArray[floating]: ... -# -@overload -def svdvals( - x: _ArrayLike[np.float64 | np.complex128 | np.integer | np.bool] | _NestedSequence[complex], / -) -> NDArray[np.float64]: ... -@overload -def svdvals(x: _ArrayLike[np.float32 | np.complex64], /) -> NDArray[np.float32]: ... -@overload -def svdvals(x: _ArrayLikeNumber_co, /) -> NDArray[floating]: ... +# NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values +@overload # abstract `complexfloating` (excluding concrete types) +def eigvals(a: NDArray[np.complexfloating[Never]]) -> NDArray[np.complexfloating]: ... +@overload # abstract `inexact` and `floating` (excluding concrete types) +def eigvals(a: NDArray[np.inexact[Never]]) -> np.ndarray: ... +@overload # ~complex128 +def eigvals(a: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # +float64 +def eigvals(a: _ToArrayF64) -> NDArray[np.complex128] | NDArray[np.float64]: ... +@overload # ~complex64 +def eigvals(a: _ArrayLike[np.complex64]) -> NDArray[np.complex64]: ... +@overload # ~float32 +def eigvals(a: _ArrayLike[np.float32]) -> NDArray[np.complex64] | NDArray[np.float32]: ... +@overload # fallback +def eigvals(a: _ArrayLikeComplex_co) -> np.ndarray: ... + +# keep in sync with svdvals +@overload # abstract `inexact` (excluding concrete types) +def eigvalsh(a: NDArray[np.inexact[Never]], UPLO: _SideKind = "L") -> NDArray[np.floating]: ... +@overload # ~inexact32 +def eigvalsh(a: _ArrayLike[_inexact64], UPLO: _SideKind = "L") -> NDArray[np.float32]: ... +@overload # +complex128 +def eigvalsh(a: _ToArrayC128, UPLO: _SideKind = "L") -> NDArray[np.float64]: ... +@overload # fallback +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> NDArray[np.floating]: ... + +# keep in sync with eigvalsh +@overload # abstract `inexact` (excluding concrete types) +def svdvals(a: NDArray[np.inexact[Never]], /) -> NDArray[np.floating]: ... +@overload # ~inexact32 +def svdvals(x: _ArrayLike[_inexact64], /) -> NDArray[np.float32]: ... +@overload # +complex128 +def svdvals(x: _ToArrayC128, /) -> NDArray[np.float64]: ... +@overload # fallback +def svdvals(a: _ArrayLikeComplex_co, /) -> NDArray[np.floating]: ... # TODO: Returns a scalar for 2D arrays and # a `(x.ndim - 2)`` dimensional array otherwise @@ -320,31 +378,6 @@ def matrix_rank( rtol: _ArrayLikeFloat_co | None = None, ) -> Any: ... -@overload -def pinv( - a: _ArrayLikeInt_co, - rcond: _ArrayLikeFloat_co | None = None, - hermitian: bool = False, - *, - rtol: _ArrayLikeFloat_co | _NoValueType = ..., -) -> NDArray[float64]: ... -@overload -def pinv( - a: _ArrayLikeFloat_co, - rcond: _ArrayLikeFloat_co | None = None, - hermitian: bool = False, - *, - rtol: _ArrayLikeFloat_co | _NoValueType = ..., -) -> NDArray[floating]: ... -@overload -def pinv( - a: _ArrayLikeComplex_co, - rcond: _ArrayLikeFloat_co | None = None, - hermitian: bool = False, - *, - rtol: _ArrayLikeFloat_co | _NoValueType = ..., -) -> NDArray[complexfloating]: ... - # TODO: Returns a 2-tuple of scalars for 2D arrays and # a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... @@ -353,28 +386,36 @@ def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... # a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise def det(a: _ArrayLikeComplex_co) -> Any: ... +# TODO: narrow return types @overload -def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = None) -> tuple[ +def lstsq( + a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = None +) -> tuple[ NDArray[float64], NDArray[float64], int32, NDArray[float64], ]: ... @overload -def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None) -> tuple[ +def lstsq( + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None +) -> tuple[ NDArray[floating], NDArray[floating], int32, NDArray[floating], ]: ... @overload -def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None) -> tuple[ +def lstsq( + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None +) -> tuple[ NDArray[complexfloating], NDArray[floating], int32, NDArray[floating], ]: ... +# TODO: narrow return types @overload def norm( x: ArrayLike, @@ -398,6 +439,7 @@ def norm( keepdims: bool = False, ) -> Any: ... +# TODO: narrow return types @overload def matrix_norm( x: ArrayLike, @@ -415,6 +457,7 @@ def matrix_norm( keepdims: bool = False, ) -> Any: ... +# TODO: narrow return types @overload def vector_norm( x: ArrayLike, @@ -483,6 +526,7 @@ def multi_dot( out: NDArray[Any] | None = None, ) -> Any: ... +# TODO: narrow return types def diagonal( x: ArrayLike, # >= 2D array /, @@ -490,6 +534,7 @@ def diagonal( offset: SupportsIndex = 0, ) -> NDArray[Any]: ... +# TODO: narrow return types def trace( x: ArrayLike, # >= 2D array /, @@ -498,6 +543,7 @@ def trace( dtype: DTypeLike | None = None, ) -> Any: ... +# TODO: narrow return types @overload def cross( x1: _ArrayLikeUInt_co, @@ -531,6 +577,7 @@ def cross( axis: int = -1, ) -> NDArray[complexfloating]: ... +# TODO: narrow return types @overload def matmul[ScalarT: np.number](x1: _ArrayLike[ScalarT], x2: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index 78aceb235f8d..b05327b7436d 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -9,14 +9,13 @@ np.linalg.tensorsolve(AR_O, AR_O) # type: ignore[arg-type] np.linalg.solve(AR_O, AR_O) # type: ignore[arg-type] -np.linalg.tensorinv(AR_O) # type: ignore[arg-type] - -np.linalg.inv(AR_O) # type: ignore[arg-type] +np.linalg.tensorinv(AR_O) # type: ignore[type-var] +np.linalg.inv(AR_O) # type: ignore[type-var] +np.linalg.pinv(AR_O) # type: ignore[type-var] +np.linalg.cholesky(AR_O) # type: ignore[type-var] np.linalg.matrix_power(AR_M, 5) # type: ignore[arg-type] -np.linalg.cholesky(AR_O) # type: ignore[arg-type] - np.linalg.qr(AR_O) # type: ignore[arg-type] np.linalg.qr(AR_f8, mode="bob") # type: ignore[call-overload] @@ -25,6 +24,10 @@ np.linalg.eigvals(AR_O) # type: ignore[arg-type] np.linalg.eigvalsh(AR_O) # type: ignore[arg-type] np.linalg.eigvalsh(AR_O, UPLO="bob") # type: ignore[call-overload] +np.linalg.svdvals(AR_O) # type: ignore[arg-type] +np.linalg.svdvals(AR_M) # type: ignore[arg-type] +np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] + np.linalg.eig(AR_O) # type: ignore[arg-type] np.linalg.eigh(AR_O) # type: ignore[arg-type] @@ -32,17 +35,11 @@ np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] np.linalg.svd(AR_O) # type: ignore[arg-type] -np.linalg.svdvals(AR_O) # type: ignore[arg-type] -np.linalg.svdvals(AR_M) # type: ignore[arg-type] -np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] - np.linalg.cond(AR_O) # type: ignore[arg-type] np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] -np.linalg.pinv(AR_O) # type: ignore[arg-type] - np.linalg.slogdet(AR_O) # type: ignore[arg-type] np.linalg.det(AR_O) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 3f73e842aa6c..947f0c08f9ef 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -10,7 +10,9 @@ from numpy.linalg._linalg import ( SVDResult, ) +int_list_2d: list[list[int]] float_list_2d: list[list[float]] +complex_list_2d: list[list[complex]] AR_i8: npt.NDArray[np.int64] AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] @@ -30,12 +32,12 @@ assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) assert_type(np.linalg.tensorinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[Any]) assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[Any]) @@ -43,8 +45,8 @@ assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[Any]) assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[Any]) assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.int64]) assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.float64]) @@ -59,12 +61,12 @@ assert_type(np.linalg.qr(AR_f8), QRResult) assert_type(np.linalg.qr(AR_c16), QRResult) assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) -assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.floating] | npt.NDArray[np.complexfloating]) -assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) +assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.eigvalsh(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.floating]) +assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.float64]) assert_type(np.linalg.eig(AR_i8), EigResult) assert_type(np.linalg.eig(AR_f8), EigResult) @@ -89,9 +91,9 @@ assert_type(np.linalg.svdvals(AR_f4), npt.NDArray[np.float32]) assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) assert_type(np.linalg.svdvals(AR_f8), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(AR_c16), npt.NDArray[np.float64]) -assert_type(np.linalg.svdvals([[1, 2], [3, 4]]), npt.NDArray[np.float64]) -assert_type(np.linalg.svdvals([[1.0, 2.0], [3.0, 4.0]]), npt.NDArray[np.float64]) -assert_type(np.linalg.svdvals([[1j, 2j], [3j, 4j]]), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(int_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(float_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(complex_list_2d), npt.NDArray[np.float64]) assert_type(np.linalg.cond(AR_i8), Any) assert_type(np.linalg.cond(AR_f8), Any) @@ -102,8 +104,8 @@ assert_type(np.linalg.matrix_rank(AR_f8), Any) assert_type(np.linalg.matrix_rank(AR_c16), Any) assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) From 4d249878bdb51a38c1fc6c28751644be9ad3ff27 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 16:06:52 +0100 Subject: [PATCH 0985/1018] TYP: `linalg`: improved `matrix_power` --- numpy/linalg/_linalg.pyi | 33 ++++++++++++++++++----- numpy/typing/tests/data/reveal/linalg.pyi | 9 ++++--- 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 00135bc5d15b..be25e8714e67 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -83,9 +83,14 @@ type _inexact64 = np.float32 | np.complex64 # anything that safe-casts (from floating) into float64/complex128 type _ToArrayF64 = _ArrayLike[np.float64 | np.integer | np.bool] | _NestedSequence[float] type _ToArrayC128 = _ArrayLike[np.complex128 | np.float64 | np.integer | np.bool] | _NestedSequence[complex] -# the invariant `list` type avoids overlap with `_IntoArrayF64` +# the invariant `list` type avoids overlap with bool, int, etc +type _AsArrayInt = _ArrayLike[np.int_] | list[int] | _NestedSequence[list[int]] +type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] +type _PosInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] +type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] + ### fortran_int = np.intc @@ -156,6 +161,7 @@ def solve( ) -> NDArray[complexfloating]: ... # keep in sync with the other inverse functions and cholesky +# TODO: transparent shape types @overload # inexact32 array-likes def tensorinv[ScalarT: _inexact64](a: _ArrayLike[ScalarT], ind: int = 2) -> NDArray[ScalarT]: ... @overload # +float64 array-likes @@ -176,6 +182,7 @@ def inv(a: _AsArrayC128) -> NDArray[np.complex128]: ... def inv(a: _ArrayLikeComplex_co) -> np.ndarray: ... # keep in sync with the other inverse functions and cholesky +# TODO: transparent shape types @overload # inexact32 array-likes def pinv[ScalarT: _inexact64]( a: _ArrayLike[ScalarT], @@ -210,6 +217,7 @@ def pinv( ) -> NDArray[Any]: ... # keep in sync with the inverse functions +# TODO: transparent shape types @overload # inexact32 array-likes def cholesky[ScalarT: _inexact64](a: _ArrayLike[ScalarT], /, *, upper: bool = False) -> NDArray[ScalarT]: ... @overload # +float64 array-likes @@ -219,12 +227,23 @@ def cholesky(a: _AsArrayC128, /, *, upper: bool = False) -> NDArray[np.complex12 @overload # fallback def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> np.ndarray: ... -# TODO: The supported input and output dtypes are dependent on the value of `n`. -# For example: `n < 0` always casts integer types to float64 -def matrix_power( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - n: SupportsIndex, -) -> NDArray[Any]: ... +# NOTE: Technically this also accepts boolean array-likes, but that case is not very useful, so we skip it. +# If you have a use case for it, please open an issue. +# TODO: transparent shape types +@overload # +int, n ≥ 0 +def matrix_power(a: _NestedSequence[int], n: _PosInt) -> NDArray[np.int_]: ... +@overload # +integer | ~object, n ≥ 0 +def matrix_power[ScalarT: np.integer | np.object_](a: _ArrayLike[ScalarT], n: _PosInt) -> NDArray[ScalarT]: ... +@overload # +float64, n < 0 +def matrix_power(a: _ToArrayF64, n: _NegInt) -> NDArray[np.float64]: ... +@overload # ~float64 +def matrix_power(a: _AsArrayF64, n: SupportsIndex) -> NDArray[np.float64]: ... +@overload # ~complex128 +def matrix_power(a: _AsArrayC128, n: SupportsIndex) -> NDArray[np.complex128]: ... +@overload # ~inexact32 +def matrix_power[ScalarT: _inexact64](a: _ArrayLike[ScalarT], n: SupportsIndex) -> NDArray[ScalarT]: ... +@overload # fallback +def matrix_power(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, n: SupportsIndex) -> np.ndarray: ... # TODO: narrow return types @overload diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 947f0c08f9ef..1c046912819b 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -39,10 +39,11 @@ assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.float64]) assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_power(AR_i8, 1), npt.NDArray[np.int64]) +assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[np.complex128]) +assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[np.object_]) assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.float64]) From 5b2ec65bac624cefb8fbcd0be147f051d25ea0e7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 16:34:00 +0100 Subject: [PATCH 0986/1018] TYP: `linalg`: improved `eigh` return types --- numpy/linalg/_linalg.pyi | 99 ++++++++++++----------- numpy/typing/tests/data/reveal/linalg.pyi | 13 ++- 2 files changed, 64 insertions(+), 48 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index be25e8714e67..ff4b0254db63 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -1,6 +1,7 @@ from collections.abc import Iterable from typing import ( Any, + Generic, Literal as L, NamedTuple, Never, @@ -8,6 +9,7 @@ from typing import ( SupportsInt, overload, ) +from typing_extensions import TypeVar import numpy as np from numpy import ( @@ -78,7 +80,7 @@ __all__ = [ type _ModeKind = L["reduced", "complete", "r", "raw"] type _SideKind = L["L", "U", "l", "u"] -type _inexact64 = np.float32 | np.complex64 +type _inexact32 = np.float32 | np.complex64 # anything that safe-casts (from floating) into float64/complex128 type _ToArrayF64 = _ArrayLike[np.float64 | np.integer | np.bool] | _NestedSequence[float] @@ -91,24 +93,27 @@ type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[ type _PosInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] +_FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) +_InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) + ### fortran_int = np.intc -# TODO: generic -class EigResult(NamedTuple): - eigenvalues: NDArray[Any] - eigenvectors: NDArray[Any] +# NOTE: only generic when `typing.TYPE_CHECKING` +class EigResult(NamedTuple, Generic[_InexactT_co]): + eigenvalues: NDArray[_InexactT_co] + eigenvectors: NDArray[_InexactT_co] -# TODO: generic -class EighResult(NamedTuple): - eigenvalues: NDArray[Any] - eigenvectors: NDArray[Any] +# NOTE: only generic when `typing.TYPE_CHECKING` +class EighResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): + eigenvalues: NDArray[_FloatingT_co] + eigenvectors: NDArray[_InexactT_co] -# TODO: generic -class QRResult(NamedTuple): - Q: NDArray[Any] - R: NDArray[Any] +# NOTE: only generic when `typing.TYPE_CHECKING` +class QRResult(NamedTuple, Generic[_InexactT_co]): + Q: NDArray[_InexactT_co] + R: NDArray[_InexactT_co] # TODO: generic class SlogdetResult(NamedTuple): @@ -162,36 +167,36 @@ def solve( # keep in sync with the other inverse functions and cholesky # TODO: transparent shape types -@overload # inexact32 array-likes -def tensorinv[ScalarT: _inexact64](a: _ArrayLike[ScalarT], ind: int = 2) -> NDArray[ScalarT]: ... -@overload # +float64 array-likes +@overload # inexact32 +def tensorinv[ScalarT: _inexact32](a: _ArrayLike[ScalarT], ind: int = 2) -> NDArray[ScalarT]: ... +@overload # +float64 def tensorinv(a: _ToArrayF64, ind: int = 2) -> NDArray[np.float64]: ... -@overload # ~complex128 array-likes +@overload # ~complex128 def tensorinv(a: _AsArrayC128, ind: int = 2) -> NDArray[np.complex128]: ... @overload # fallback def tensorinv(a: _ArrayLikeComplex_co, ind: int = 2) -> np.ndarray: ... # keep in sync with the other inverse functions and cholesky -@overload # inexact32 array-likes -def inv[ScalarT: _inexact64](a: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... -@overload # +float64 array-likes +@overload # inexact32 +def inv[ScalarT: _inexact32](a: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... +@overload # +float64 def inv(a: _ToArrayF64) -> NDArray[np.float64]: ... -@overload # ~complex128 array-likes +@overload # ~complex128 def inv(a: _AsArrayC128) -> NDArray[np.complex128]: ... @overload # fallback def inv(a: _ArrayLikeComplex_co) -> np.ndarray: ... # keep in sync with the other inverse functions and cholesky # TODO: transparent shape types -@overload # inexact32 array-likes -def pinv[ScalarT: _inexact64]( +@overload # inexact32 +def pinv[ScalarT: _inexact32]( a: _ArrayLike[ScalarT], rcond: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, ) -> NDArray[ScalarT]: ... -@overload # +float64 array-likes +@overload # +float64 def pinv( a: _ToArrayF64, rcond: _ArrayLikeFloat_co | None = None, @@ -199,7 +204,7 @@ def pinv( *, rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, ) -> NDArray[np.float64]: ... -@overload # ~complex128 array-likes +@overload # ~complex128 def pinv( a: _AsArrayC128, rcond: _ArrayLikeFloat_co | None = None, @@ -218,11 +223,11 @@ def pinv( # keep in sync with the inverse functions # TODO: transparent shape types -@overload # inexact32 array-likes -def cholesky[ScalarT: _inexact64](a: _ArrayLike[ScalarT], /, *, upper: bool = False) -> NDArray[ScalarT]: ... -@overload # +float64 array-likes +@overload # inexact32 +def cholesky[ScalarT: _inexact32](a: _ArrayLike[ScalarT], /, *, upper: bool = False) -> NDArray[ScalarT]: ... +@overload # +float64 def cholesky(a: _ToArrayF64, /, *, upper: bool = False) -> NDArray[np.float64]: ... -@overload # ~complex128 array-likes +@overload # ~complex128 def cholesky(a: _AsArrayC128, /, *, upper: bool = False) -> NDArray[np.complex128]: ... @overload # fallback def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> np.ndarray: ... @@ -241,7 +246,7 @@ def matrix_power(a: _AsArrayF64, n: SupportsIndex) -> NDArray[np.float64]: ... @overload # ~complex128 def matrix_power(a: _AsArrayC128, n: SupportsIndex) -> NDArray[np.complex128]: ... @overload # ~inexact32 -def matrix_power[ScalarT: _inexact64](a: _ArrayLike[ScalarT], n: SupportsIndex) -> NDArray[ScalarT]: ... +def matrix_power[ScalarT: _inexact32](a: _ArrayLike[ScalarT], n: SupportsIndex) -> NDArray[ScalarT]: ... @overload # fallback def matrix_power(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, n: SupportsIndex) -> np.ndarray: ... @@ -271,14 +276,6 @@ def outer( /, ) -> NDArray[Any]: ... -# TODO: narrow return types -@overload -def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... -@overload -def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = "reduced") -> QRResult: ... -@overload -def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = "reduced") -> QRResult: ... - # TODO: narrow return types @overload def eig(a: _ArrayLikeInt_co) -> EigResult: ... @@ -287,13 +284,25 @@ def eig(a: _ArrayLikeFloat_co) -> EigResult: ... @overload def eig(a: _ArrayLikeComplex_co) -> EigResult: ... +# +@overload # workaround for microsoft/pyright#10232 +def eigh(a: NDArray[Never], UPLO: _SideKind = "L") -> EighResult: ... +@overload # ~inexact32 +def eigh[ScalarT: _inexact32](a: _ArrayLike[ScalarT], UPLO: _SideKind = "L") -> EighResult[np.float32, ScalarT]: ... +@overload # +float64 +def eigh(a: _ToArrayF64, UPLO: _SideKind = "L") -> EighResult[np.float64, np.float64]: ... +@overload # ~complex128 +def eigh(a: _AsArrayC128, UPLO: _SideKind = "L") -> EighResult[np.float64, np.complex128]: ... +@overload # fallback +def eigh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> EighResult: ... + # TODO: narrow return types @overload -def eigh(a: _ArrayLikeInt_co, UPLO: _SideKind = "L") -> EighResult: ... +def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload -def eigh(a: _ArrayLikeFloat_co, UPLO: _SideKind = "L") -> EighResult: ... +def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload -def eigh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> EighResult: ... +def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = "reduced") -> QRResult: ... # TODO: narrow return types @overload @@ -324,14 +333,14 @@ def svd( *, compute_uv: L[False], hermitian: bool = False, -) -> NDArray[float64]: ... +) -> NDArray[np.float64]: ... @overload def svd( a: _ArrayLikeInt_co, full_matrices: bool, compute_uv: L[False], hermitian: bool = False, -) -> NDArray[float64]: ... +) -> NDArray[np.float64]: ... @overload def svd( a: _ArrayLikeComplex_co, @@ -368,7 +377,7 @@ def eigvals(a: _ArrayLikeComplex_co) -> np.ndarray: ... @overload # abstract `inexact` (excluding concrete types) def eigvalsh(a: NDArray[np.inexact[Never]], UPLO: _SideKind = "L") -> NDArray[np.floating]: ... @overload # ~inexact32 -def eigvalsh(a: _ArrayLike[_inexact64], UPLO: _SideKind = "L") -> NDArray[np.float32]: ... +def eigvalsh(a: _ArrayLike[_inexact32], UPLO: _SideKind = "L") -> NDArray[np.float32]: ... @overload # +complex128 def eigvalsh(a: _ToArrayC128, UPLO: _SideKind = "L") -> NDArray[np.float64]: ... @overload # fallback @@ -378,7 +387,7 @@ def eigvalsh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> NDArray[np.float @overload # abstract `inexact` (excluding concrete types) def svdvals(a: NDArray[np.inexact[Never]], /) -> NDArray[np.floating]: ... @overload # ~inexact32 -def svdvals(x: _ArrayLike[_inexact64], /) -> NDArray[np.float32]: ... +def svdvals(x: _ArrayLike[_inexact32], /) -> NDArray[np.float32]: ... @overload # +complex128 def svdvals(x: _ToArrayC128, /) -> NDArray[np.float64]: ... @overload # fallback diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 1c046912819b..71c02c6b7500 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -13,6 +13,8 @@ from numpy.linalg._linalg import ( int_list_2d: list[list[int]] float_list_2d: list[list[float]] complex_list_2d: list[list[complex]] + +AR_any: np.ndarray AR_i8: npt.NDArray[np.int64] AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] @@ -73,9 +75,14 @@ assert_type(np.linalg.eig(AR_i8), EigResult) assert_type(np.linalg.eig(AR_f8), EigResult) assert_type(np.linalg.eig(AR_c16), EigResult) -assert_type(np.linalg.eigh(AR_i8), EighResult) -assert_type(np.linalg.eigh(AR_f8), EighResult) -assert_type(np.linalg.eigh(AR_c16), EighResult) +assert_type(np.linalg.eigh(AR_i8), EighResult[np.float64, np.float64]) +assert_type(np.linalg.eigh(AR_f4), EighResult[np.float32, np.float32]) +assert_type(np.linalg.eigh(AR_f8), EighResult[np.float64, np.float64]) +assert_type(np.linalg.eigh(AR_c8), EighResult[np.float32, np.complex64]) +assert_type(np.linalg.eigh(AR_c16), EighResult[np.float64, np.complex128]) +# mypy is being silly again here: +# > Expression is of type "EighResult[Any, Any]", not "EighResult[Any, Any]" +assert_type(np.linalg.eigh(AR_any), EighResult[Any, Any]) # type: ignore[assert-type] assert_type(np.linalg.svd(AR_i8), SVDResult) assert_type(np.linalg.svd(AR_f8), SVDResult) From 2ce9c89507d54b1994f7be80f8277bfe29ee91b2 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 16:56:13 +0100 Subject: [PATCH 0987/1018] TYP: `linalg`: improved `eig` return types --- numpy/linalg/_linalg.pyi | 20 ++++++++++++-------- numpy/typing/tests/data/reveal/linalg.pyi | 15 ++++++++++++--- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index ff4b0254db63..645c9f8bfc2d 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -276,12 +276,18 @@ def outer( /, ) -> NDArray[Any]: ... -# TODO: narrow return types -@overload -def eig(a: _ArrayLikeInt_co) -> EigResult: ... -@overload -def eig(a: _ArrayLikeFloat_co) -> EigResult: ... -@overload +# NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values +@overload # abstract `inexact` and `floating` (excluding concrete types) +def eig(a: NDArray[np.inexact[Never]]) -> EigResult: ... +@overload # ~complex128 +def eig(a: _AsArrayC128) -> EigResult[np.complex128]: ... +@overload # +float64 +def eig(a: _ToArrayF64) -> EigResult[np.complex128] | EigResult[np.float64]: ... +@overload # ~complex64 +def eig(a: _ArrayLike[np.complex64]) -> EigResult[np.complex64]: ... +@overload # ~float32 +def eig(a: _ArrayLike[np.float32]) -> EigResult[np.complex64] | EigResult[np.float32]: ... +@overload # fallback def eig(a: _ArrayLikeComplex_co) -> EigResult: ... # @@ -358,8 +364,6 @@ def svd( ) -> NDArray[floating]: ... # NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values -@overload # abstract `complexfloating` (excluding concrete types) -def eigvals(a: NDArray[np.complexfloating[Never]]) -> NDArray[np.complexfloating]: ... @overload # abstract `inexact` and `floating` (excluding concrete types) def eigvals(a: NDArray[np.inexact[Never]]) -> np.ndarray: ... @overload # ~complex128 diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 71c02c6b7500..76eebd15470a 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -16,8 +16,10 @@ complex_list_2d: list[list[complex]] AR_any: np.ndarray AR_i8: npt.NDArray[np.int64] +AR_f_: npt.NDArray[np.floating] AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_c_: npt.NDArray[np.complexfloating] AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] @@ -71,9 +73,16 @@ assert_type(np.linalg.eigvalsh(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.float64]) assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.float64]) -assert_type(np.linalg.eig(AR_i8), EigResult) -assert_type(np.linalg.eig(AR_f8), EigResult) -assert_type(np.linalg.eig(AR_c16), EigResult) +assert_type(np.linalg.eig(AR_i8), EigResult[np.float64] | EigResult[np.complex128]) +assert_type(np.linalg.eig(AR_f4), EigResult[np.float32] | EigResult[np.complex64]) +assert_type(np.linalg.eig(AR_f8), EigResult[np.float64] | EigResult[np.complex128]) +assert_type(np.linalg.eig(AR_c8), EigResult[np.complex64]) +assert_type(np.linalg.eig(AR_c16), EigResult[np.complex128]) +# mypy is being silly again here: +# > Expression is of type "EigResult[Any]", not "EigResult[Any]" +assert_type(np.linalg.eig(AR_f_), EigResult) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_c_), EigResult) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_any), EigResult) # type: ignore[assert-type] assert_type(np.linalg.eigh(AR_i8), EighResult[np.float64, np.float64]) assert_type(np.linalg.eigh(AR_f4), EighResult[np.float32, np.float32]) From 6a0f1e016f92d6b3ae23a09b0893c8f460f81c1f Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 17:25:38 +0100 Subject: [PATCH 0988/1018] TYP: `linalg`: improved `qr` return types --- numpy/linalg/_linalg.pyi | 34 ++++++++++++++++++----- numpy/typing/tests/data/fail/linalg.pyi | 16 +++++------ numpy/typing/tests/data/reveal/linalg.pyi | 30 +++++++++++++++----- 3 files changed, 58 insertions(+), 22 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 645c9f8bfc2d..1c51277fe3a8 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -93,6 +93,8 @@ type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[ type _PosInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] +type _tuple2[T] = tuple[T, T] + _FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) _InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) @@ -302,13 +304,31 @@ def eigh(a: _AsArrayC128, UPLO: _SideKind = "L") -> EighResult[np.float64, np.co @overload # fallback def eigh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> EighResult: ... -# TODO: narrow return types -@overload -def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... -@overload -def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = "reduced") -> QRResult: ... -@overload -def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = "reduced") -> QRResult: ... +# +@overload # ~inexact32, reduced|complete +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["reduced", "complete"] = "reduced") -> QRResult[ScalarT]: ... +@overload # +float64, reduced|complete +def qr(a: _ToArrayF64, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.float64]: ... +@overload # ~complex128, reduced|complete +def qr(a: _AsArrayC128, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.complex128]: ... +@overload # fallback, reduced|complete +def qr(a: _ArrayLikeComplex_co, mode: L["reduced", "complete"] = "reduced") -> QRResult: ... +@overload # ~inexact32, r +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["r"]) -> NDArray[ScalarT]: ... +@overload # +float64, r +def qr(a: _ToArrayF64, mode: L["r"]) -> NDArray[np.float64]: ... +@overload # ~complex128, r +def qr(a: _AsArrayC128, mode: L["r"]) -> NDArray[np.complex128]: ... +@overload # fallback, r +def qr(a: _ArrayLikeComplex_co, mode: L["r"]) -> np.ndarray: ... +@overload # ~inexact32, raw +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["raw"]) -> _tuple2[NDArray[ScalarT]]: ... +@overload # +float64, raw +def qr(a: _ToArrayF64, mode: L["raw"]) -> _tuple2[NDArray[np.float64]]: ... +@overload # ~complex128, raw +def qr(a: _AsArrayC128, mode: L["raw"]) -> _tuple2[NDArray[np.complex128]]: ... +@overload # fallback, raw +def qr(a: _ArrayLikeComplex_co, mode: L["raw"]) -> _tuple2[np.ndarray]: ... # TODO: narrow return types @overload diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index b05327b7436d..426c80555d9b 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -16,9 +16,16 @@ np.linalg.cholesky(AR_O) # type: ignore[type-var] np.linalg.matrix_power(AR_M, 5) # type: ignore[arg-type] -np.linalg.qr(AR_O) # type: ignore[arg-type] +np.linalg.eig(AR_O) # type: ignore[arg-type] + +np.linalg.eigh(AR_O) # type: ignore[arg-type] +np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] + +np.linalg.qr(AR_O) # type: ignore[type-var] np.linalg.qr(AR_f8, mode="bob") # type: ignore[call-overload] +np.linalg.svd(AR_O) # type: ignore[arg-type] + np.linalg.eigvals(AR_O) # type: ignore[arg-type] np.linalg.eigvalsh(AR_O) # type: ignore[arg-type] @@ -28,13 +35,6 @@ np.linalg.svdvals(AR_O) # type: ignore[arg-type] np.linalg.svdvals(AR_M) # type: ignore[arg-type] np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] -np.linalg.eig(AR_O) # type: ignore[arg-type] - -np.linalg.eigh(AR_O) # type: ignore[arg-type] -np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] - -np.linalg.svd(AR_O) # type: ignore[arg-type] - np.linalg.cond(AR_O) # type: ignore[arg-type] np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 76eebd15470a..0538b45ddc71 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -61,9 +61,27 @@ assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) # NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) # type: ignore[assert-type] -assert_type(np.linalg.qr(AR_i8), QRResult) -assert_type(np.linalg.qr(AR_f8), QRResult) -assert_type(np.linalg.qr(AR_c16), QRResult) +assert_type(np.linalg.qr(AR_i8), QRResult[np.float64]) +assert_type(np.linalg.qr(AR_i8, "r"), npt.NDArray[np.float64]) +assert_type(np.linalg.qr(AR_i8, "raw"), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.qr(AR_f4), QRResult[np.float32]) +assert_type(np.linalg.qr(AR_f4, "r"), npt.NDArray[np.float32]) +assert_type(np.linalg.qr(AR_f4, "raw"), tuple[npt.NDArray[np.float32], npt.NDArray[np.float32]]) +assert_type(np.linalg.qr(AR_f8), QRResult[np.float64]) +assert_type(np.linalg.qr(AR_f8, "r"), npt.NDArray[np.float64]) +assert_type(np.linalg.qr(AR_f8, "raw"), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.qr(AR_c8), QRResult[np.complex64]) +assert_type(np.linalg.qr(AR_c8, "r"), npt.NDArray[np.complex64]) +assert_type(np.linalg.qr(AR_c8, "raw"), tuple[npt.NDArray[np.complex64], npt.NDArray[np.complex64]]) +assert_type(np.linalg.qr(AR_c16), QRResult[np.complex128]) +assert_type(np.linalg.qr(AR_c16, "r"), npt.NDArray[np.complex128]) +assert_type(np.linalg.qr(AR_c16, "raw"), tuple[npt.NDArray[np.complex128], npt.NDArray[np.complex128]]) +# Mypy bug: `Expression is of type "QRResult[Any]", not "QRResult[Any]"` +assert_type(np.linalg.qr(AR_any), QRResult[Any]) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` +assert_type(np.linalg.qr(AR_any, "r"), np.ndarray) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "tuple[Any, ...]", <--snip-->"` +assert_type(np.linalg.qr(AR_any, "raw"), tuple[np.ndarray, np.ndarray]) # type: ignore[assert-type] assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) @@ -78,8 +96,7 @@ assert_type(np.linalg.eig(AR_f4), EigResult[np.float32] | EigResult[np.complex64 assert_type(np.linalg.eig(AR_f8), EigResult[np.float64] | EigResult[np.complex128]) assert_type(np.linalg.eig(AR_c8), EigResult[np.complex64]) assert_type(np.linalg.eig(AR_c16), EigResult[np.complex128]) -# mypy is being silly again here: -# > Expression is of type "EigResult[Any]", not "EigResult[Any]" +# Mypy bug: `Expression is of type "EigResult[Any]", not "EigResult[Any]"` assert_type(np.linalg.eig(AR_f_), EigResult) # type: ignore[assert-type] assert_type(np.linalg.eig(AR_c_), EigResult) # type: ignore[assert-type] assert_type(np.linalg.eig(AR_any), EigResult) # type: ignore[assert-type] @@ -89,8 +106,7 @@ assert_type(np.linalg.eigh(AR_f4), EighResult[np.float32, np.float32]) assert_type(np.linalg.eigh(AR_f8), EighResult[np.float64, np.float64]) assert_type(np.linalg.eigh(AR_c8), EighResult[np.float32, np.complex64]) assert_type(np.linalg.eigh(AR_c16), EighResult[np.float64, np.complex128]) -# mypy is being silly again here: -# > Expression is of type "EighResult[Any, Any]", not "EighResult[Any, Any]" +# Mypy bug: `Expression is of type "EighResult[Any, Any]", not "EighResult[Any, Any]"` assert_type(np.linalg.eigh(AR_any), EighResult[Any, Any]) # type: ignore[assert-type] assert_type(np.linalg.svd(AR_i8), SVDResult) From 3f2b499c64703d7a1c14558db514035427d69f7b Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 17:50:48 +0100 Subject: [PATCH 0989/1018] TYP: `linalg`: improved `svd` return types --- numpy/linalg/_linalg.pyi | 125 +++++++++------------- numpy/typing/tests/data/reveal/linalg.pyi | 38 ++++--- 2 files changed, 78 insertions(+), 85 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 1c51277fe3a8..e5293b6c965d 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -102,34 +102,30 @@ _InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant= fortran_int = np.intc -# NOTE: only generic when `typing.TYPE_CHECKING` +# NOTE: These names tuples are only generic when `typing.TYPE_CHECKING`. + class EigResult(NamedTuple, Generic[_InexactT_co]): eigenvalues: NDArray[_InexactT_co] eigenvectors: NDArray[_InexactT_co] -# NOTE: only generic when `typing.TYPE_CHECKING` class EighResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): eigenvalues: NDArray[_FloatingT_co] eigenvectors: NDArray[_InexactT_co] -# NOTE: only generic when `typing.TYPE_CHECKING` class QRResult(NamedTuple, Generic[_InexactT_co]): Q: NDArray[_InexactT_co] R: NDArray[_InexactT_co] -# TODO: generic +class SVDResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): + U: NDArray[_InexactT_co] + S: NDArray[_FloatingT_co] + Vh: NDArray[_InexactT_co] + +# TODO: Make generic class SlogdetResult(NamedTuple): - # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and - # a `(x.ndim - 2)`` dimensional arrays otherwise sign: Any logabsdet: Any -# TODO: generic -class SVDResult(NamedTuple): - U: NDArray[Any] - S: NDArray[Any] - Vh: NDArray[Any] - # TODO: narrow return types @overload def tensorsolve( @@ -307,81 +303,66 @@ def eigh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> EighResult: ... # @overload # ~inexact32, reduced|complete def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["reduced", "complete"] = "reduced") -> QRResult[ScalarT]: ... -@overload # +float64, reduced|complete -def qr(a: _ToArrayF64, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.float64]: ... -@overload # ~complex128, reduced|complete -def qr(a: _AsArrayC128, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.complex128]: ... -@overload # fallback, reduced|complete -def qr(a: _ArrayLikeComplex_co, mode: L["reduced", "complete"] = "reduced") -> QRResult: ... @overload # ~inexact32, r def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["r"]) -> NDArray[ScalarT]: ... -@overload # +float64, r -def qr(a: _ToArrayF64, mode: L["r"]) -> NDArray[np.float64]: ... -@overload # ~complex128, r -def qr(a: _AsArrayC128, mode: L["r"]) -> NDArray[np.complex128]: ... -@overload # fallback, r -def qr(a: _ArrayLikeComplex_co, mode: L["r"]) -> np.ndarray: ... @overload # ~inexact32, raw def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["raw"]) -> _tuple2[NDArray[ScalarT]]: ... +@overload # +float64, reduced|complete +def qr(a: _ToArrayF64, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.float64]: ... +@overload # +float64, r +def qr(a: _ToArrayF64, mode: L["r"]) -> NDArray[np.float64]: ... @overload # +float64, raw def qr(a: _ToArrayF64, mode: L["raw"]) -> _tuple2[NDArray[np.float64]]: ... +@overload # ~complex128, reduced|complete +def qr(a: _AsArrayC128, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.complex128]: ... +@overload # ~complex128, r +def qr(a: _AsArrayC128, mode: L["r"]) -> NDArray[np.complex128]: ... @overload # ~complex128, raw def qr(a: _AsArrayC128, mode: L["raw"]) -> _tuple2[NDArray[np.complex128]]: ... +@overload # fallback, reduced|complete +def qr(a: _ArrayLikeComplex_co, mode: L["reduced", "complete"] = "reduced") -> QRResult: ... +@overload # fallback, r +def qr(a: _ArrayLikeComplex_co, mode: L["r"]) -> np.ndarray: ... @overload # fallback, raw def qr(a: _ArrayLikeComplex_co, mode: L["raw"]) -> _tuple2[np.ndarray]: ... -# TODO: narrow return types -@overload -def svd( - a: _ArrayLikeInt_co, - full_matrices: bool = True, - compute_uv: L[True] = True, - hermitian: bool = False, -) -> SVDResult: ... -@overload +# +@overload # workaround for microsoft/pyright#10232, compute_uv=True (default) +def svd(a: NDArray[Never], full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False) -> SVDResult: ... +@overload # workaround for microsoft/pyright#10232, compute_uv=False (positional) +def svd(a: NDArray[Never], full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # workaround for microsoft/pyright#10232, compute_uv=False (keyword) +def svd(a: NDArray[Never], full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # ~inexact32, compute_uv=True (default) +def svd[ScalarT: _inexact32]( + a: _ArrayLike[ScalarT], full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float32, ScalarT]: ... +@overload # ~inexact32, compute_uv=False (positional) +def svd(a: _ArrayLike[_inexact32], full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float32]: ... +@overload # ~inexact32, compute_uv=False (keyword) def svd( - a: _ArrayLikeFloat_co, - full_matrices: bool = True, - compute_uv: L[True] = True, - hermitian: bool = False, -) -> SVDResult: ... -@overload + a: _ArrayLike[_inexact32], full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False +) -> NDArray[np.float32]: ... +@overload # +float64, compute_uv=True (default) def svd( - a: _ArrayLikeComplex_co, - full_matrices: bool = True, - compute_uv: L[True] = True, - hermitian: bool = False, -) -> SVDResult: ... -@overload + a: _ToArrayF64, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float64, np.float64]: ... +@overload # ~complex128, compute_uv=True (default) def svd( - a: _ArrayLikeInt_co, - full_matrices: bool = True, - *, - compute_uv: L[False], - hermitian: bool = False, -) -> NDArray[np.float64]: ... -@overload + a: _AsArrayC128, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float64, np.complex128]: ... +@overload # +float64 | ~complex128, compute_uv=False (positional) +def svd(a: _ToArrayC128, full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float64]: ... +@overload # +float64 | ~complex128, compute_uv=False (keyword) +def svd(a: _ToArrayC128, full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float64]: ... +@overload # fallback, compute_uv=True (default) def svd( - a: _ArrayLikeInt_co, - full_matrices: bool, - compute_uv: L[False], - hermitian: bool = False, -) -> NDArray[np.float64]: ... -@overload -def svd( - a: _ArrayLikeComplex_co, - full_matrices: bool = True, - *, - compute_uv: L[False], - hermitian: bool = False, -) -> NDArray[floating]: ... -@overload -def svd( - a: _ArrayLikeComplex_co, - full_matrices: bool, - compute_uv: L[False], - hermitian: bool = False, -) -> NDArray[floating]: ... + a: _ArrayLikeComplex_co, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult: ... +@overload # fallback, compute_uv=False (positional) +def svd(a: _ArrayLikeComplex_co, full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # fallback, compute_uv=False (keyword) +def svd(a: _ArrayLikeComplex_co, full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... # NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values @overload # abstract `inexact` and `floating` (excluding concrete types) diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 0538b45ddc71..5401e01aad31 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -79,9 +79,9 @@ assert_type(np.linalg.qr(AR_c16, "raw"), tuple[npt.NDArray[np.complex128], npt.N # Mypy bug: `Expression is of type "QRResult[Any]", not "QRResult[Any]"` assert_type(np.linalg.qr(AR_any), QRResult[Any]) # type: ignore[assert-type] # Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` -assert_type(np.linalg.qr(AR_any, "r"), np.ndarray) # type: ignore[assert-type] +assert_type(np.linalg.qr(AR_any, "r"), npt.NDArray[Any]) # type: ignore[assert-type] # Mypy bug: `Expression is of type "tuple[Any, ...]", <--snip-->"` -assert_type(np.linalg.qr(AR_any, "raw"), tuple[np.ndarray, np.ndarray]) # type: ignore[assert-type] +assert_type(np.linalg.qr(AR_any, "raw"), tuple[npt.NDArray[Any], npt.NDArray[Any]]) # type: ignore[assert-type] assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) @@ -97,9 +97,9 @@ assert_type(np.linalg.eig(AR_f8), EigResult[np.float64] | EigResult[np.complex12 assert_type(np.linalg.eig(AR_c8), EigResult[np.complex64]) assert_type(np.linalg.eig(AR_c16), EigResult[np.complex128]) # Mypy bug: `Expression is of type "EigResult[Any]", not "EigResult[Any]"` -assert_type(np.linalg.eig(AR_f_), EigResult) # type: ignore[assert-type] -assert_type(np.linalg.eig(AR_c_), EigResult) # type: ignore[assert-type] -assert_type(np.linalg.eig(AR_any), EigResult) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_f_), EigResult[Any]) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_c_), EigResult[Any]) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_any), EigResult[Any]) # type: ignore[assert-type] assert_type(np.linalg.eigh(AR_i8), EighResult[np.float64, np.float64]) assert_type(np.linalg.eigh(AR_f4), EighResult[np.float32, np.float32]) @@ -109,20 +109,32 @@ assert_type(np.linalg.eigh(AR_c16), EighResult[np.float64, np.complex128]) # Mypy bug: `Expression is of type "EighResult[Any, Any]", not "EighResult[Any, Any]"` assert_type(np.linalg.eigh(AR_any), EighResult[Any, Any]) # type: ignore[assert-type] -assert_type(np.linalg.svd(AR_i8), SVDResult) -assert_type(np.linalg.svd(AR_f8), SVDResult) -assert_type(np.linalg.svd(AR_c16), SVDResult) +assert_type(np.linalg.svd(AR_i8), SVDResult[np.float64, np.float64]) assert_type(np.linalg.svd(AR_i8, compute_uv=False), npt.NDArray[np.float64]) -assert_type(np.linalg.svd(AR_i8, True, False), npt.NDArray[np.float64]) -assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating]) -assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating]) -assert_type(np.linalg.svd(AR_c16, True, False), npt.NDArray[np.floating]) +assert_type(np.linalg.svd(AR_f4), SVDResult[np.float32, np.float32]) +assert_type(np.linalg.svd(AR_f4, compute_uv=False), npt.NDArray[np.float32]) +assert_type(np.linalg.svd(AR_f8), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_c8), SVDResult[np.float32, np.complex64]) +assert_type(np.linalg.svd(AR_c8, compute_uv=False), npt.NDArray[np.float32]) +assert_type(np.linalg.svd(AR_c16), SVDResult[np.float64, np.complex128]) +assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(int_list_2d), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(int_list_2d, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(float_list_2d), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(float_list_2d, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(complex_list_2d), SVDResult[np.float64, np.complex128]) +assert_type(np.linalg.svd(complex_list_2d, compute_uv=False), npt.NDArray[np.float64]) +# Mypy bug: `Expression is of type "SVDResult[Any, Any]", not "SVDResult[Any, Any]"` +assert_type(np.linalg.svd(AR_any), SVDResult[Any]) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` +assert_type(np.linalg.svd(AR_any, compute_uv=False), npt.NDArray[Any]) # type: ignore[assert-type] assert_type(np.linalg.svdvals(AR_b), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(AR_f4), npt.NDArray[np.float32]) -assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) assert_type(np.linalg.svdvals(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) assert_type(np.linalg.svdvals(AR_c16), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(int_list_2d), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(float_list_2d), npt.NDArray[np.float64]) From 86c7f6e86467169a578db8676f45ceaa7110aaea Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 17:55:18 +0100 Subject: [PATCH 0990/1018] TYP: `linalg`: optionally generic `SlogdetResult` --- numpy/linalg/_linalg.pyi | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index e5293b6c965d..c5ac21745c31 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -96,13 +96,15 @@ type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -1 type _tuple2[T] = tuple[T, T] _FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) +_FloatingOrArrayT_co = TypeVar("_FloatingOrArrayT_co", bound=np.floating | NDArray[np.floating], default=Any, covariant=True) _InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) +_InexactOrArrayT_co = TypeVar("_InexactOrArrayT_co", bound=np.inexact | NDArray[np.inexact], default=Any, covariant=True) ### fortran_int = np.intc -# NOTE: These names tuples are only generic when `typing.TYPE_CHECKING`. +# NOTE: These named tuple types are only generic when `typing.TYPE_CHECKING` class EigResult(NamedTuple, Generic[_InexactT_co]): eigenvalues: NDArray[_InexactT_co] @@ -121,10 +123,9 @@ class SVDResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): S: NDArray[_FloatingT_co] Vh: NDArray[_InexactT_co] -# TODO: Make generic -class SlogdetResult(NamedTuple): - sign: Any - logabsdet: Any +class SlogdetResult(NamedTuple, Generic[_FloatingOrArrayT_co, _InexactOrArrayT_co]): + sign: _FloatingOrArrayT_co + logabsdet: _InexactOrArrayT_co # TODO: narrow return types @overload @@ -398,10 +399,6 @@ def svdvals(x: _ToArrayC128, /) -> NDArray[np.float64]: ... @overload # fallback def svdvals(a: _ArrayLikeComplex_co, /) -> NDArray[np.floating]: ... -# TODO: Returns a scalar for 2D arrays and -# a `(x.ndim - 2)`` dimensional array otherwise -def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = None) -> Any: ... - # TODO: Returns `int` for <2D arrays and `intp` otherwise def matrix_rank( A: _ArrayLikeComplex_co, @@ -411,6 +408,10 @@ def matrix_rank( rtol: _ArrayLikeFloat_co | None = None, ) -> Any: ... +# TODO: Returns a scalar for 2D arrays and +# a `(x.ndim - 2)`` dimensional array otherwise +def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = None) -> Any: ... + # TODO: Returns a 2-tuple of scalars for 2D arrays and # a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... From f1805ba20cc805c6da8436003f691b699f9dff5c Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 18:23:23 +0100 Subject: [PATCH 0991/1018] TYP: `linalg`: improved `matrix_rank` return (shape-)types --- numpy/linalg/_linalg.pyi | 55 ++++++++++++++++++++--- numpy/typing/tests/data/reveal/linalg.pyi | 27 +++++++++-- 2 files changed, 72 insertions(+), 10 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index c5ac21745c31..73602fcdb29d 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -1,4 +1,4 @@ -from collections.abc import Iterable +from collections.abc import Iterable, Sequence from typing import ( Any, Generic, @@ -77,8 +77,10 @@ __all__ = [ "vecdot", ] -type _ModeKind = L["reduced", "complete", "r", "raw"] -type _SideKind = L["L", "U", "l", "u"] +type _AtMost1D = tuple[()] | tuple[int] +type _AtLeast4D = tuple[int, int, int, int, *tuple[int, ...]] + +type _tuple2[T] = tuple[T, T] type _inexact32 = np.float32 | np.complex64 @@ -86,15 +88,13 @@ type _inexact32 = np.float32 | np.complex64 type _ToArrayF64 = _ArrayLike[np.float64 | np.integer | np.bool] | _NestedSequence[float] type _ToArrayC128 = _ArrayLike[np.complex128 | np.float64 | np.integer | np.bool] | _NestedSequence[complex] # the invariant `list` type avoids overlap with bool, int, etc -type _AsArrayInt = _ArrayLike[np.int_] | list[int] | _NestedSequence[list[int]] type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] +type _SideKind = L["L", "U", "l", "u"] type _PosInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] -type _tuple2[T] = tuple[T, T] - _FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) _FloatingOrArrayT_co = TypeVar("_FloatingOrArrayT_co", bound=np.floating | NDArray[np.floating], default=Any, covariant=True) _InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) @@ -399,7 +399,48 @@ def svdvals(x: _ToArrayC128, /) -> NDArray[np.float64]: ... @overload # fallback def svdvals(a: _ArrayLikeComplex_co, /) -> NDArray[np.floating]: ... -# TODO: Returns `int` for <2D arrays and `intp` otherwise +# +@overload # workaround for microsoft/pyright#10232 +def matrix_rank( + A: np.ndarray[tuple[Never, ...], np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> Any: ... +@overload # <2d +def matrix_rank( + A: complex | np.number | Sequence[complex | np.number] | np.ndarray[_AtMost1D, np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> L[0, 1]: ... +@overload # =2d +def matrix_rank( + A: Sequence[Sequence[complex | np.number]] | np.ndarray[tuple[int, int], np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> np.int_: ... +@overload # =3d +def matrix_rank( + A: Sequence[Sequence[Sequence[complex | np.number]]] | np.ndarray[tuple[int, int, int], np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> np.ndarray[tuple[int], np.dtype[np.int_]]: ... +@overload # ≥4d +def matrix_rank( + A: Sequence[Sequence[Sequence[_NestedSequence[complex | np.number]]]] | np.ndarray[_AtLeast4D, np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.int_]: ... +@overload # ?d def matrix_rank( A: _ArrayLikeComplex_co, tol: _ArrayLikeFloat_co | None = None, diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 5401e01aad31..7ffb8df594ad 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -1,4 +1,4 @@ -from typing import Any, assert_type +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt @@ -11,15 +11,18 @@ from numpy.linalg._linalg import ( ) int_list_2d: list[list[int]] +float_list_1d: list[float] float_list_2d: list[list[float]] +float_list_3d: list[list[list[float]]] +float_list_4d: list[list[list[list[float]]]] complex_list_2d: list[list[complex]] AR_any: np.ndarray -AR_i8: npt.NDArray[np.int64] AR_f_: npt.NDArray[np.floating] +AR_c_: npt.NDArray[np.complexfloating] +AR_i8: npt.NDArray[np.int64] AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] -AR_c_: npt.NDArray[np.complexfloating] AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] @@ -27,6 +30,15 @@ AR_m: npt.NDArray[np.timedelta64] AR_S: npt.NDArray[np.str_] AR_b: npt.NDArray[np.bool] +SC_f8: np.float64 +AR_f8_0d: np.ndarray[tuple[()], np.dtype[np.float64]] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +AR_f8_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.float64]] + +### + assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) @@ -147,6 +159,15 @@ assert_type(np.linalg.cond(AR_c16), Any) assert_type(np.linalg.matrix_rank(AR_i8), Any) assert_type(np.linalg.matrix_rank(AR_f8), Any) assert_type(np.linalg.matrix_rank(AR_c16), Any) +assert_type(np.linalg.matrix_rank(SC_f8), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(AR_f8_1d), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(float_list_1d), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(AR_f8_2d), np.int_) +assert_type(np.linalg.matrix_rank(float_list_2d), np.int_) +assert_type(np.linalg.matrix_rank(AR_f8_3d), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.linalg.matrix_rank(float_list_3d), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.linalg.matrix_rank(AR_f8_4d), npt.NDArray[np.int_]) +assert_type(np.linalg.matrix_rank(float_list_4d), npt.NDArray[np.int_]) assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.float64]) From eb2b304fe8a355d8f4d980667af0aec73bb6c958 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 18:46:53 +0100 Subject: [PATCH 0992/1018] TYP: `linalg`: improved `cond` return types (shape-type-dependent) --- numpy/linalg/_linalg.pyi | 37 ++++++++++++++++++----- numpy/typing/tests/data/fail/linalg.pyi | 2 +- numpy/typing/tests/data/reveal/linalg.pyi | 21 ++++++++----- 3 files changed, 44 insertions(+), 16 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 73602fcdb29d..46927bc7b044 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -78,19 +78,26 @@ __all__ = [ ] type _AtMost1D = tuple[()] | tuple[int] +type _AtLeast3D = tuple[int, int, int, *tuple[int, ...]] type _AtLeast4D = tuple[int, int, int, int, *tuple[int, ...]] +type _JustAnyShape = tuple[Never, ...] # workaround for microsoft/pyright#10232 type _tuple2[T] = tuple[T, T] type _inexact32 = np.float32 | np.complex64 +type _to_inexact64 = np.complex128 | np.float64 | np.integer | np.bool # anything that safe-casts (from floating) into float64/complex128 type _ToArrayF64 = _ArrayLike[np.float64 | np.integer | np.bool] | _NestedSequence[float] -type _ToArrayC128 = _ArrayLike[np.complex128 | np.float64 | np.integer | np.bool] | _NestedSequence[complex] +type _ToArrayC128 = _ArrayLike[_to_inexact64] | _NestedSequence[complex] # the invariant `list` type avoids overlap with bool, int, etc type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] +type _ToArrayC128_2d = np.ndarray[tuple[int, int], np.dtype[_to_inexact64]] | Sequence[Sequence[complex]] +type _ToArrayC128_3nd = np.ndarray[_AtLeast3D, np.dtype[_to_inexact64]] | Sequence[Sequence[_NestedSequence[complex]]] + +type _OrderKind = L[1, -1, 2, -2, "fro", "nuc"] | float # only accepts `-inf` and `inf` as `float` type _SideKind = L["L", "U", "l", "u"] type _PosInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] @@ -410,7 +417,7 @@ def matrix_rank( ) -> Any: ... @overload # <2d def matrix_rank( - A: complex | np.number | Sequence[complex | np.number] | np.ndarray[_AtMost1D, np.dtype[np.number]], + A: complex | Sequence[complex] | np.ndarray[_AtMost1D, np.dtype[np.number]], tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -418,7 +425,7 @@ def matrix_rank( ) -> L[0, 1]: ... @overload # =2d def matrix_rank( - A: Sequence[Sequence[complex | np.number]] | np.ndarray[tuple[int, int], np.dtype[np.number]], + A: Sequence[Sequence[complex]] | np.ndarray[tuple[int, int], np.dtype[np.number]], tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -426,7 +433,7 @@ def matrix_rank( ) -> np.int_: ... @overload # =3d def matrix_rank( - A: Sequence[Sequence[Sequence[complex | np.number]]] | np.ndarray[tuple[int, int, int], np.dtype[np.number]], + A: Sequence[Sequence[Sequence[complex]]] | np.ndarray[tuple[int, int, int], np.dtype[np.number]], tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -434,7 +441,7 @@ def matrix_rank( ) -> np.ndarray[tuple[int], np.dtype[np.int_]]: ... @overload # ≥4d def matrix_rank( - A: Sequence[Sequence[Sequence[_NestedSequence[complex | np.number]]]] | np.ndarray[_AtLeast4D, np.dtype[np.number]], + A: Sequence[Sequence[Sequence[_NestedSequence[complex]]]] | np.ndarray[_AtLeast4D, np.dtype[np.number]], tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -449,9 +456,23 @@ def matrix_rank( rtol: _ArrayLikeFloat_co | None = None, ) -> Any: ... -# TODO: Returns a scalar for 2D arrays and -# a `(x.ndim - 2)`` dimensional array otherwise -def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = None) -> Any: ... +# +@overload # workaround for microsoft/pyright#10232 +def cond(x: np.ndarray[_JustAnyShape, np.dtype[np.number]], p: _OrderKind | None = None) -> Any: ... +@overload # 2d ~inexact32 +def cond(x: np.ndarray[tuple[int, int], np.dtype[_inexact32]], p: _OrderKind | None = None) -> np.float32: ... +@overload # 2d +inexact64 +def cond(x: _ToArrayC128_2d, p: _OrderKind | None = None) -> np.float64: ... +@overload # 2d ~number +def cond(x: np.ndarray[tuple[int, int], np.dtype[np.number]], p: _OrderKind | None = None) -> np.floating: ... +@overload # >2d ~inexact32 +def cond(x: np.ndarray[_AtLeast3D, np.dtype[_inexact32]], p: _OrderKind | None = None) -> NDArray[np.float32]: ... +@overload # >2d +inexact64 +def cond(x: _ToArrayC128_3nd, p: _OrderKind | None = None) -> NDArray[np.float64]: ... +@overload # >2d ~number +def cond(x: np.ndarray[_AtLeast3D, np.dtype[np.number]], p: _OrderKind | None = None) -> NDArray[np.floating]: ... +@overload # fallback +def cond(x: _ArrayLikeComplex_co, p: _OrderKind | None = None) -> Any: ... # TODO: Returns a 2-tuple of scalars for 2D arrays and # a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index 426c80555d9b..ec79af1d90e0 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -36,7 +36,7 @@ np.linalg.svdvals(AR_M) # type: ignore[arg-type] np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] np.linalg.cond(AR_O) # type: ignore[arg-type] -np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] +np.linalg.cond(AR_f8, p="bob") # type: ignore[call-overload] np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 7ffb8df594ad..c8c281cee77c 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -37,6 +37,9 @@ AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] AR_f8_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.float64]] +AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] +AR_f4_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float32]] + ### assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) @@ -55,6 +58,10 @@ assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.float64]) assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complex128]) + assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[np.float64]) assert_type(np.linalg.matrix_power(AR_i8, 1), npt.NDArray[np.int64]) assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[np.float64]) @@ -152,10 +159,6 @@ assert_type(np.linalg.svdvals(int_list_2d), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(float_list_2d), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(complex_list_2d), npt.NDArray[np.float64]) -assert_type(np.linalg.cond(AR_i8), Any) -assert_type(np.linalg.cond(AR_f8), Any) -assert_type(np.linalg.cond(AR_c16), Any) - assert_type(np.linalg.matrix_rank(AR_i8), Any) assert_type(np.linalg.matrix_rank(AR_f8), Any) assert_type(np.linalg.matrix_rank(AR_c16), Any) @@ -169,9 +172,13 @@ assert_type(np.linalg.matrix_rank(float_list_3d), np.ndarray[tuple[int], np.dtyp assert_type(np.linalg.matrix_rank(AR_f8_4d), npt.NDArray[np.int_]) assert_type(np.linalg.matrix_rank(float_list_4d), npt.NDArray[np.int_]) -assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.float64]) -assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.cond(AR_i8), Any) +assert_type(np.linalg.cond(AR_f8), Any) +assert_type(np.linalg.cond(AR_c16), Any) +assert_type(np.linalg.cond(AR_f4_2d), np.float32) +assert_type(np.linalg.cond(AR_f8_2d), np.float64) +assert_type(np.linalg.cond(AR_f4_3d), npt.NDArray[np.float32]) +assert_type(np.linalg.cond(AR_f8_3d), npt.NDArray[np.float64]) assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) From 884494b757ed91153cad58ca7440e344f388cc64 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 19:08:11 +0100 Subject: [PATCH 0993/1018] TYP: `linalg`: improved `det` and `slogdet` return types (shape-type-dependent) --- numpy/linalg/_linalg.pyi | 62 +++++++++++++++++------ numpy/typing/tests/data/fail/linalg.pyi | 5 +- numpy/typing/tests/data/reveal/linalg.pyi | 13 +++++ 3 files changed, 62 insertions(+), 18 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 46927bc7b044..f54ed43662d6 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -85,17 +85,23 @@ type _JustAnyShape = tuple[Never, ...] # workaround for microsoft/pyright#10232 type _tuple2[T] = tuple[T, T] type _inexact32 = np.float32 | np.complex64 -type _to_inexact64 = np.complex128 | np.float64 | np.integer | np.bool +type _to_float64 = np.float64 | np.integer | np.bool +type _to_inexact64 = np.complex128 | _to_float64 + +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3ND[ScalarT: np.generic] = np.ndarray[_AtLeast3D, np.dtype[ScalarT]] # anything that safe-casts (from floating) into float64/complex128 -type _ToArrayF64 = _ArrayLike[np.float64 | np.integer | np.bool] | _NestedSequence[float] +type _ToArrayF64 = _ArrayLike[_to_float64] | _NestedSequence[float] type _ToArrayC128 = _ArrayLike[_to_inexact64] | _NestedSequence[complex] # the invariant `list` type avoids overlap with bool, int, etc type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] -type _ToArrayC128_2d = np.ndarray[tuple[int, int], np.dtype[_to_inexact64]] | Sequence[Sequence[complex]] -type _ToArrayC128_3nd = np.ndarray[_AtLeast3D, np.dtype[_to_inexact64]] | Sequence[Sequence[_NestedSequence[complex]]] +type _ToArrayF64_2d = _Array2D[_to_float64] | Sequence[Sequence[float]] +type _ToArrayF64_3nd = _Array3ND[_to_float64] | Sequence[Sequence[_NestedSequence[float]]] +type _ToArrayC128_2d = _Array2D[_to_inexact64] | Sequence[Sequence[complex]] +type _ToArrayC128_3nd = _Array3ND[_to_inexact64] | Sequence[Sequence[_NestedSequence[complex]]] type _OrderKind = L[1, -1, 2, -2, "fro", "nuc"] | float # only accepts `-inf` and `inf` as `float` type _SideKind = L["L", "U", "l", "u"] @@ -172,7 +178,6 @@ def solve( ) -> NDArray[complexfloating]: ... # keep in sync with the other inverse functions and cholesky -# TODO: transparent shape types @overload # inexact32 def tensorinv[ScalarT: _inexact32](a: _ArrayLike[ScalarT], ind: int = 2) -> NDArray[ScalarT]: ... @overload # +float64 @@ -193,7 +198,6 @@ def inv(a: _AsArrayC128) -> NDArray[np.complex128]: ... def inv(a: _ArrayLikeComplex_co) -> np.ndarray: ... # keep in sync with the other inverse functions and cholesky -# TODO: transparent shape types @overload # inexact32 def pinv[ScalarT: _inexact32]( a: _ArrayLike[ScalarT], @@ -228,7 +232,6 @@ def pinv( ) -> NDArray[Any]: ... # keep in sync with the inverse functions -# TODO: transparent shape types @overload # inexact32 def cholesky[ScalarT: _inexact32](a: _ArrayLike[ScalarT], /, *, upper: bool = False) -> NDArray[ScalarT]: ... @overload # +float64 @@ -240,7 +243,6 @@ def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> np.ndarray: # NOTE: Technically this also accepts boolean array-likes, but that case is not very useful, so we skip it. # If you have a use case for it, please open an issue. -# TODO: transparent shape types @overload # +int, n ≥ 0 def matrix_power(a: _NestedSequence[int], n: _PosInt) -> NDArray[np.int_]: ... @overload # +integer | ~object, n ≥ 0 @@ -425,7 +427,7 @@ def matrix_rank( ) -> L[0, 1]: ... @overload # =2d def matrix_rank( - A: Sequence[Sequence[complex]] | np.ndarray[tuple[int, int], np.dtype[np.number]], + A: Sequence[Sequence[complex]] | _Array2D[np.number], tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -460,11 +462,11 @@ def matrix_rank( @overload # workaround for microsoft/pyright#10232 def cond(x: np.ndarray[_JustAnyShape, np.dtype[np.number]], p: _OrderKind | None = None) -> Any: ... @overload # 2d ~inexact32 -def cond(x: np.ndarray[tuple[int, int], np.dtype[_inexact32]], p: _OrderKind | None = None) -> np.float32: ... +def cond(x: _Array2D[_inexact32], p: _OrderKind | None = None) -> np.float32: ... @overload # 2d +inexact64 def cond(x: _ToArrayC128_2d, p: _OrderKind | None = None) -> np.float64: ... @overload # 2d ~number -def cond(x: np.ndarray[tuple[int, int], np.dtype[np.number]], p: _OrderKind | None = None) -> np.floating: ... +def cond(x: _Array2D[np.number], p: _OrderKind | None = None) -> np.floating: ... @overload # >2d ~inexact32 def cond(x: np.ndarray[_AtLeast3D, np.dtype[_inexact32]], p: _OrderKind | None = None) -> NDArray[np.float32]: ... @overload # >2d +inexact64 @@ -474,12 +476,42 @@ def cond(x: np.ndarray[_AtLeast3D, np.dtype[np.number]], p: _OrderKind | None = @overload # fallback def cond(x: _ArrayLikeComplex_co, p: _OrderKind | None = None) -> Any: ... -# TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise +# keep in sync with `det` +@overload # workaround for microsoft/pyright#10232 +def slogdet(a: np.ndarray[_JustAnyShape, np.dtype[np.number]]) -> SlogdetResult: ... +@overload # 2d ~inexact32 +def slogdet[ScalarT: _inexact32](a: _Array2D[ScalarT]) -> SlogdetResult[np.float32, ScalarT]: ... +@overload # >2d ~inexact32 +def slogdet[ScalarT: _inexact32](a: _Array3ND[ScalarT]) -> SlogdetResult[NDArray[np.float32], NDArray[ScalarT]]: ... +@overload # 2d +float64 +def slogdet(a: _Array2D[_to_float64]) -> SlogdetResult[np.float64, np.float64]: ... +@overload # >2d +float64 +def slogdet(a: _Array3ND[_to_float64]) -> SlogdetResult[NDArray[np.float64], NDArray[np.float64]]: ... +@overload # 2d ~complex128 +def slogdet(a: _Array2D[np.complex128] | Sequence[list[complex]]) -> SlogdetResult[np.float64, np.complex128]: ... +@overload # >2d ~complex128 +def slogdet( + a: _Array3ND[np.complex128] | _NestedSequence[Sequence[list[complex]]] +) -> SlogdetResult[NDArray[np.float64], NDArray[np.complex128]]: ... +@overload # fallback def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... -# TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise +# keep in sync with `slogdet` +@overload # workaround for microsoft/pyright#10232 +def det(a: np.ndarray[_JustAnyShape, np.dtype[np.number]]) -> Any: ... +@overload # 2d ~inexact32 +def det[ScalarT: _inexact32](a: _Array2D[ScalarT]) -> ScalarT: ... +@overload # >2d ~inexact32 +def det[ScalarT: _inexact32](a: _Array3ND[ScalarT]) -> NDArray[ScalarT]: ... +@overload # 2d +float64 +def det(a: _Array2D[_to_float64]) -> np.float64: ... +@overload # >2d +float64 +def det(a: _Array3ND[_to_float64]) -> NDArray[np.float64]: ... +@overload # 2d ~complex128 +def det(a: _Array2D[np.complex128] | Sequence[list[complex]]) -> np.complex128: ... +@overload # >2d ~complex128 +def det(a: _Array3ND[np.complex128] | _NestedSequence[Sequence[list[complex]]]) -> NDArray[np.complex128]: ... +@overload # fallback def det(a: _ArrayLikeComplex_co) -> Any: ... # TODO: narrow return types diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index ec79af1d90e0..eda82c48c85a 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -35,13 +35,12 @@ np.linalg.svdvals(AR_O) # type: ignore[arg-type] np.linalg.svdvals(AR_M) # type: ignore[arg-type] np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] +np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] + np.linalg.cond(AR_O) # type: ignore[arg-type] np.linalg.cond(AR_f8, p="bob") # type: ignore[call-overload] -np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] - np.linalg.slogdet(AR_O) # type: ignore[arg-type] - np.linalg.det(AR_O) # type: ignore[arg-type] np.linalg.norm(AR_f8, ord="bob") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index c8c281cee77c..d638166df386 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -16,6 +16,7 @@ float_list_2d: list[list[float]] float_list_3d: list[list[list[float]]] float_list_4d: list[list[list[list[float]]]] complex_list_2d: list[list[complex]] +complex_list_3d: list[list[list[complex]]] AR_any: np.ndarray AR_f_: npt.NDArray[np.floating] @@ -183,10 +184,22 @@ assert_type(np.linalg.cond(AR_f8_3d), npt.NDArray[np.float64]) assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) assert_type(np.linalg.slogdet(AR_c16), SlogdetResult) +assert_type(np.linalg.slogdet(AR_f4_2d), SlogdetResult[np.float32, np.float32]) +assert_type(np.linalg.slogdet(AR_f8_2d), SlogdetResult[np.float64, np.float64]) +assert_type(np.linalg.slogdet(AR_f4_3d), SlogdetResult[npt.NDArray[np.float32], npt.NDArray[np.float32]]) +assert_type(np.linalg.slogdet(AR_f8_3d), SlogdetResult[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.slogdet(complex_list_2d), SlogdetResult[np.float64, np.complex128]) +assert_type(np.linalg.slogdet(complex_list_3d), SlogdetResult[npt.NDArray[np.float64], npt.NDArray[np.complex128]]) assert_type(np.linalg.det(AR_i8), Any) assert_type(np.linalg.det(AR_f8), Any) assert_type(np.linalg.det(AR_c16), Any) +assert_type(np.linalg.det(AR_f4_2d), np.float32) +assert_type(np.linalg.det(AR_f8_2d), np.float64) +assert_type(np.linalg.det(AR_f4_3d), npt.NDArray[np.float32]) +assert_type(np.linalg.det(AR_f8_3d), npt.NDArray[np.float64]) +assert_type(np.linalg.det(complex_list_2d), np.complex128) +assert_type(np.linalg.det(complex_list_3d), npt.NDArray[np.complex128]) assert_type(np.linalg.lstsq(AR_i8, AR_i8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], np.int32, npt.NDArray[np.float64]]) assert_type(np.linalg.lstsq(AR_i8, AR_f8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) From 6715fe9c65be186df907a347ff25db20dcc3fb1c Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 19:53:23 +0100 Subject: [PATCH 0994/1018] TYP: apply copilot's nits --- numpy/linalg/_linalg.pyi | 10 +++++----- numpy/typing/tests/data/reveal/linalg.pyi | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index f54ed43662d6..38926e84d0a6 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -105,7 +105,7 @@ type _ToArrayC128_3nd = _Array3ND[_to_inexact64] | Sequence[Sequence[_NestedSequ type _OrderKind = L[1, -1, 2, -2, "fro", "nuc"] | float # only accepts `-inf` and `inf` as `float` type _SideKind = L["L", "U", "l", "u"] -type _PosInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] +type _NonNegInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] _FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) @@ -244,9 +244,9 @@ def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> np.ndarray: # NOTE: Technically this also accepts boolean array-likes, but that case is not very useful, so we skip it. # If you have a use case for it, please open an issue. @overload # +int, n ≥ 0 -def matrix_power(a: _NestedSequence[int], n: _PosInt) -> NDArray[np.int_]: ... +def matrix_power(a: _NestedSequence[int], n: _NonNegInt) -> NDArray[np.int_]: ... @overload # +integer | ~object, n ≥ 0 -def matrix_power[ScalarT: np.integer | np.object_](a: _ArrayLike[ScalarT], n: _PosInt) -> NDArray[ScalarT]: ... +def matrix_power[ScalarT: np.integer | np.object_](a: _ArrayLike[ScalarT], n: _NonNegInt) -> NDArray[ScalarT]: ... @overload # +float64, n < 0 def matrix_power(a: _ToArrayF64, n: _NegInt) -> NDArray[np.float64]: ... @overload # ~float64 @@ -402,9 +402,9 @@ def eigvalsh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> NDArray[np.float @overload # abstract `inexact` (excluding concrete types) def svdvals(a: NDArray[np.inexact[Never]], /) -> NDArray[np.floating]: ... @overload # ~inexact32 -def svdvals(x: _ArrayLike[_inexact32], /) -> NDArray[np.float32]: ... +def svdvals(a: _ArrayLike[_inexact32], /) -> NDArray[np.float32]: ... @overload # +complex128 -def svdvals(x: _ToArrayC128, /) -> NDArray[np.float64]: ... +def svdvals(a: _ToArrayC128, /) -> NDArray[np.float64]: ... @overload # fallback def svdvals(a: _ArrayLikeComplex_co, /) -> NDArray[np.floating]: ... diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index d638166df386..a40aa84d9c05 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -146,7 +146,7 @@ assert_type(np.linalg.svd(float_list_2d, compute_uv=False), npt.NDArray[np.float assert_type(np.linalg.svd(complex_list_2d), SVDResult[np.float64, np.complex128]) assert_type(np.linalg.svd(complex_list_2d, compute_uv=False), npt.NDArray[np.float64]) # Mypy bug: `Expression is of type "SVDResult[Any, Any]", not "SVDResult[Any, Any]"` -assert_type(np.linalg.svd(AR_any), SVDResult[Any]) # type: ignore[assert-type] +assert_type(np.linalg.svd(AR_any), SVDResult[Any, Any]) # type: ignore[assert-type] # Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` assert_type(np.linalg.svd(AR_any, compute_uv=False), npt.NDArray[Any]) # type: ignore[assert-type] From a64d310e17f67df3f7ef0383362c944eb8003e17 Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Wed, 10 Dec 2025 03:34:23 -0500 Subject: [PATCH 0995/1018] ENH,REF: New-style generic sorting ArrayMethod loops (#30328) * ENH,REF: New arraymethod-style default sorting loops * ENH: Implement single default sorting loops in npy_sort.c * STYLE: Add missing newline --- numpy/_core/meson.build | 1 + numpy/_core/src/common/npy_sort.c | 67 +++++++++++++++++++ numpy/_core/src/common/npy_sort.h.src | 13 ++++ numpy/_core/src/multiarray/item_selection.c | 2 + .../_core/src/multiarray/stringdtype/dtype.c | 20 ++---- 5 files changed, 89 insertions(+), 14 deletions(-) create mode 100644 numpy/_core/src/common/npy_sort.c diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index b5695c8f3cde..aa4da9c11146 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1121,6 +1121,7 @@ src_multiarray_umath_common = [ 'src/common/numpyos.c', 'src/common/npy_cpu_features.c', 'src/common/npy_cpu_dispatch.c', + 'src/common/npy_sort.c', src_file.process('src/common/templ_common.h.src') ] if have_blas diff --git a/numpy/_core/src/common/npy_sort.c b/numpy/_core/src/common/npy_sort.c new file mode 100644 index 000000000000..632962e884dd --- /dev/null +++ b/numpy/_core/src/common/npy_sort.c @@ -0,0 +1,67 @@ +#include +#include +#include +#include "npy_sort.h" +#include "dtypemeta.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NPY_NO_EXPORT int +npy_default_sort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_CompareFunc *cmp = (PyArray_CompareFunc *)context->method->static_data; + + PyArrayMethod_SortParameters *sort_params = + (PyArrayMethod_SortParameters *)context->parameters; + PyArray_SortImpl *sort_func = NULL; + + switch (sort_params->flags) { + case NPY_SORT_DEFAULT: + sort_func = npy_quicksort_impl; + break; + case NPY_SORT_STABLE: + sort_func = npy_mergesort_impl; + break; + default: + PyErr_SetString(PyExc_ValueError, "Invalid sort kind"); + return -1; + } + + return sort_func(data[0], dimensions[0], context, + context->descriptors[0]->elsize, cmp); +} + +NPY_NO_EXPORT int +npy_default_argsort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_CompareFunc *cmp = (PyArray_CompareFunc *)context->method->static_data; + + PyArrayMethod_SortParameters *sort_params = + (PyArrayMethod_SortParameters *)context->parameters; + PyArray_ArgSortImpl *argsort_func = NULL; + + switch (sort_params->flags) { + case NPY_SORT_DEFAULT: + argsort_func = npy_aquicksort_impl; + break; + case NPY_SORT_STABLE: + argsort_func = npy_amergesort_impl; + break; + default: + PyErr_SetString(PyExc_ValueError, "Invalid sort kind"); + return -1; + } + + return argsort_func(data[0], (npy_intp *)data[1], dimensions[0], context, + context->descriptors[0]->elsize, cmp); +} + +#ifdef __cplusplus +} +#endif diff --git a/numpy/_core/src/common/npy_sort.h.src b/numpy/_core/src/common/npy_sort.h.src index 1f82b07659f4..95d6f9d1ee70 100644 --- a/numpy/_core/src/common/npy_sort.h.src +++ b/numpy/_core/src/common/npy_sort.h.src @@ -5,6 +5,7 @@ #include #include #include +#include #define NPY_ENOMEM 1 #define NPY_ECOMP 2 @@ -107,6 +108,18 @@ NPY_NO_EXPORT int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *ar NPY_NO_EXPORT int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); NPY_NO_EXPORT int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); +/* + ***************************************************************************** + ** NEW-STYLE GENERIC SORT ** + ***************************************************************************** + */ + +NPY_NO_EXPORT int npy_default_sort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); +NPY_NO_EXPORT int npy_default_argsort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); /* ***************************************************************************** diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index f9d753f9e7ba..ff100c3d9d5d 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -3179,6 +3179,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) } context.descriptors = loop_descrs; context.parameters = &sort_params; + context.method = sort_method; // Arrays are always contiguous for sorting npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; @@ -3290,6 +3291,7 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) } context.descriptors = loop_descrs; context.parameters = &sort_params; + context.method = argsort_method; // Arrays are always contiguous for sorting npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 85514ef15df6..77216d25d46f 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -692,14 +692,9 @@ stringdtype_wrap_sort_loop( { PyArray_StringDTypeObject *sdescr = (PyArray_StringDTypeObject *)context->descriptors[0]; - PyArray_SortImpl *sort_loop = - ((PyArrayMethod_SortParameters *)context->parameters)->flags - == NPY_SORT_STABLE ? &npy_mergesort_impl : &npy_quicksort_impl; npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); - int ret = sort_loop( - data[0], dimensions[0], context, - context->descriptors[0]->elsize, &_sort_compare); + int ret = npy_default_sort_loop(context, data, dimensions, strides, transferdata); NpyString_release_allocator(allocator); return ret; } @@ -737,14 +732,9 @@ stringdtype_wrap_argsort_loop( { PyArray_StringDTypeObject *sdescr = (PyArray_StringDTypeObject *)context->descriptors[0]; - PyArray_ArgSortImpl *argsort_loop = - ((PyArrayMethod_SortParameters *)context->parameters) - ->flags == NPY_SORT_STABLE ? &npy_amergesort_impl : &npy_aquicksort_impl; npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); - int ret = argsort_loop( - data[0], (npy_intp *)data[1], dimensions[0], context, - context->descriptors[0]->elsize, &_sort_compare); + int ret = npy_default_argsort_loop(context, data, dimensions, strides, transferdata); NpyString_release_allocator(allocator); return ret; } @@ -965,9 +955,10 @@ init_stringdtype_sorts(void) PyArray_DTypeMeta *stringdtype = &PyArray_StringDType; PyArray_DTypeMeta *sort_dtypes[2] = {stringdtype, stringdtype}; - PyType_Slot sort_slots[3] = { + PyType_Slot sort_slots[4] = { {NPY_METH_resolve_descriptors, &stringdtype_sort_resolve_descriptors}, {NPY_METH_get_loop, &stringdtype_get_sort_loop}, + {_NPY_METH_static_data, &_sort_compare}, {0, NULL} }; PyArrayMethod_Spec sort_spec = { @@ -989,8 +980,9 @@ init_stringdtype_sorts(void) Py_DECREF(sort_method); PyArray_DTypeMeta *argsort_dtypes[2] = {stringdtype, &PyArray_IntpDType}; - PyType_Slot argsort_slots[2] = { + PyType_Slot argsort_slots[3] = { {NPY_METH_get_loop, &stringdtype_get_argsort_loop}, + {_NPY_METH_static_data, &_sort_compare}, {0, NULL} }; PyArrayMethod_Spec argsort_spec = { From 5b62b47946c342f777c55ef445e00e76693a01f3 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Wed, 10 Dec 2025 20:03:38 +0530 Subject: [PATCH 0996/1018] TST: skip deep recursion tests on TSAN and ASAN builds (#30410) --- numpy/_core/tests/test_dtype.py | 12 ++++-------- numpy/_core/tests/test_multiarray.py | 13 ++++++------- numpy/_core/tests/test_regression.py | 13 +++++++------ numpy/testing/_private/utils.py | 27 +++++++++++++++++++++++++++ 4 files changed, 44 insertions(+), 21 deletions(-) diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 72bcc3e22962..b1f965d5164b 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -21,13 +21,12 @@ HAS_REFCOUNT, IS_64BIT, IS_PYPY, - IS_PYSTON, - IS_WASM, assert_, assert_array_equal, assert_equal, assert_raises, ) +from numpy.testing._private.utils import requires_deep_recursion def assert_dtype_equal(a, b): @@ -978,16 +977,14 @@ def test1(self): ('yi', np.dtype((a, (3, 2))))]) assert_dtype_equal(c, d) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_list_recursion(self): l = [] l.append(('f', l)) with pytest.raises(RecursionError): np.dtype(l) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_tuple_recursion(self): d = np.int32 for i in range(100000): @@ -997,8 +994,7 @@ def test_tuple_recursion(self): with contextlib.suppress(RecursionError): np.dtype(d) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_dict_recursion(self): d = {"names": ['self'], "formats": [None], "offsets": [0]} d['formats'][0] = d diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index aa2dee8d58bb..11365d1d1efb 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -38,7 +38,6 @@ HAS_REFCOUNT, IS_64BIT, IS_PYPY, - IS_PYSTON, IS_WASM, assert_, assert_allclose, @@ -55,7 +54,11 @@ runstring, temppath, ) -from numpy.testing._private.utils import _no_tracing, requires_memory +from numpy.testing._private.utils import ( + _no_tracing, + requires_deep_recursion, + requires_memory, +) def assert_arg_sorted(arr, arg): @@ -9349,6 +9352,7 @@ def test_to_bool_scalar(self): assert_equal(bool(np.array([True])), True) assert_equal(bool(np.array([[42]])), True) + @requires_deep_recursion def test_to_bool_scalar_not_convertible(self): class NotConvertible: @@ -9357,11 +9361,6 @@ def __bool__(self): assert_raises(NotImplementedError, bool, np.array(NotConvertible())) assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) - if IS_PYSTON: - pytest.skip("Pyston disables recursion checking") - if IS_WASM: - pytest.skip("Pyodide/WASM has limited stack size") - self_containing = np.array([None]) self_containing[0] = self_containing diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index dc457b2d5fc1..8844c461211a 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -18,7 +18,6 @@ HAS_REFCOUNT, IS_64BIT, IS_PYPY, - IS_PYSTON, IS_WASM, _assert_valid_refcount, assert_, @@ -29,7 +28,11 @@ assert_raises, assert_raises_regex, ) -from numpy.testing._private.utils import _no_tracing, requires_memory +from numpy.testing._private.utils import ( + _no_tracing, + requires_deep_recursion, + requires_memory, +) class TestRegression: @@ -1777,8 +1780,7 @@ def test_reduce_contiguous(self): assert_(a.flags.f_contiguous) assert_(b.flags.c_contiguous) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_object_array_self_reference(self): # Object arrays with references to themselves can cause problems a = np.array(0, dtype=object) @@ -1787,8 +1789,7 @@ def test_object_array_self_reference(self): assert_raises(RecursionError, float, a) a[()] = None - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_object_array_circular_reference(self): # Test the same for a circular reference. a = np.array(0, dtype=object) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 8a89c464b49d..87d9f0394fb3 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2830,3 +2830,30 @@ def run_threaded(func, max_workers=8, pass_count=False, barrier.abort() for f in futures: f.result() + + +def requires_deep_recursion(func): + """Decorator to skip test if deep recursion is not supported.""" + import pytest + + @wraps(func) + def wrapper(*args, **kwargs): + if IS_PYSTON: + pytest.skip("Pyston disables recursion checking") + if IS_WASM: + pytest.skip("WASM has limited stack size") + cflags = sysconfig.get_config_var('CFLAGS') or '' + config_args = sysconfig.get_config_var('CONFIG_ARGS') or '' + address_sanitizer = ( + '-fsanitize=address' in cflags or + '--with-address-sanitizer' in config_args + ) + thread_sanitizer = ( + '-fsanitize=thread' in cflags or + '--with-thread-sanitizer' in config_args + ) + if address_sanitizer or thread_sanitizer: + pytest.skip("AddressSanitizer and ThreadSanitizer do not support " + "deep recursion") + return func(*args, **kwargs) + return wrapper From 5ab88be054a9a522ade872c82938e9e8d6206190 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 10 Dec 2025 08:59:17 +0100 Subject: [PATCH 0997/1018] MAINT: clean up `setuptools` installs and `setup.py`-related leftovers No longer used for anything after we dropped Python 3.11 support. There are a few example `setup.py` files in the repo, but it turns out we are not testing those (which is fine). --- .github/workflows/linux.yml | 1 + numpy/_build_utils/__init__.py | 22 ---------------------- numpy/_core/include/numpy/numpyconfig.h | 6 ------ numpy/tests/test_scripts.py | 5 +---- requirements/setuptools_requirement.txt | 2 -- requirements/test_requirements.txt | 9 ++------- 6 files changed, 4 insertions(+), 41 deletions(-) delete mode 100644 numpy/_build_utils/__init__.py delete mode 100644 requirements/setuptools_requirement.txt diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 516f0538572b..c42e3e95a5d7 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -269,6 +269,7 @@ jobs: run: | sudo apt update sudo apt install make swig + pip install setuptools make -C tools/swig/test test array_api_tests: diff --git a/numpy/_build_utils/__init__.py b/numpy/_build_utils/__init__.py deleted file mode 100644 index 10b282d8d9ee..000000000000 --- a/numpy/_build_utils/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Don't use the deprecated NumPy C API. Define this to a fixed version -# instead of NPY_API_VERSION in order not to break compilation for -# released SciPy versions when NumPy introduces a new deprecation. Use -# in setup.py:: -# -# config.add_extension('_name', sources=['source_fname'], **numpy_nodepr_api) -# -numpy_nodepr_api = { - "define_macros": [("NPY_NO_DEPRECATED_API", "NPY_1_9_API_VERSION")] -} - - -def import_file(folder, module_name): - """Import a file directly, avoiding importing scipy""" - import importlib - import pathlib - - fname = pathlib.Path(folder) / f'{module_name}.py' - spec = importlib.util.spec_from_file_location(module_name, str(fname)) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - return module diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index 0bd934e37a8d..40b5f1454d67 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -7,12 +7,6 @@ * On Mac OS X, because there is only one configuration stage for all the archs * in universal builds, any macro which depends on the arch needs to be * hardcoded. - * - * Note that distutils/pip will attempt a universal2 build when Python itself - * is built as universal2, hence this hardcoding is needed even if we do not - * support universal2 wheels anymore (see gh-22796). - * This code block can be removed after we have dropped the setup.py based - * build completely. */ #ifdef __APPLE__ #undef NPY_SIZEOF_LONG diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index 01b743941cf2..e5f0a07436c8 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -5,15 +5,13 @@ import os import subprocess import sys -from os.path import dirname, isfile, join as pathjoin +from os.path import dirname import pytest import numpy as np from numpy.testing import IS_WASM, assert_equal -is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) - def find_f2py_commands(): if sys.platform == 'win32': @@ -33,7 +31,6 @@ def find_f2py_commands(): return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor] -@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") @pytest.mark.xfail(reason="Test is unreliable") @pytest.mark.parametrize('f2py_cmd', find_f2py_commands()) def test_f2py(f2py_cmd): diff --git a/requirements/setuptools_requirement.txt b/requirements/setuptools_requirement.txt deleted file mode 100644 index 21f900d46078..000000000000 --- a/requirements/setuptools_requirement.txt +++ /dev/null @@ -1,2 +0,0 @@ -setuptools==65.5.1 ; python_version < '3.12' -setuptools ; python_version >= '3.12' diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 2e2f679cee72..3094d4eb8470 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,7 +1,4 @@ Cython -wheel==0.38.1 -setuptools==65.5.1 ; python_version < '3.12' -setuptools ; python_version >= '3.12' hypothesis==6.142.2 pytest==7.4.0 pytest-cov==4.1.0 @@ -9,10 +6,8 @@ meson ninja; sys_platform != "emscripten" pytest-xdist pytest-timeout -# For testing types. Notes on the restrictions: -# - Mypy relies on C API features not present in PyPy -# NOTE: Keep mypy in sync with environment.yml -mypy==1.19.0; platform_python_implementation != "PyPy" +# For testing types +mypy==1.19.0 # for optional f2py encoding detection charset-normalizer tzdata From 2df152d2a2410eac37542ed5f4f414e3aa99c1a4 Mon Sep 17 00:00:00 2001 From: Ian Hunt-Isaak Date: Thu, 11 Dec 2025 05:50:02 -0500 Subject: [PATCH 0998/1018] DOC: crosslinking and explanation for (g)ufuncs (#30384) * DOC: crosslinking and explanation for (g)ufuncs Added more crosslinks between gufunc and ufunc pages to increase discoverability. Expanded example of basics.ufuncs, and corrected statement about the number of methods ufuncs possess. * fix doctest * simplify cross linking * Apply suggestion from @mattip Co-authored-by: Matti Picus * Extend "scalar" to "single-element scalar" * Extend "scalar" to "single-element scalar" --------- Co-authored-by: Matti Picus --- .../reference/c-api/generalized-ufuncs.rst | 2 + doc/source/reference/ufuncs.rst | 4 ++ doc/source/user/basics.ufuncs.rst | 43 +++++++++++++++++-- 3 files changed, 46 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/c-api/generalized-ufuncs.rst b/doc/source/reference/c-api/generalized-ufuncs.rst index 755ab2141cbd..b8a37e98b81e 100644 --- a/doc/source/reference/c-api/generalized-ufuncs.rst +++ b/doc/source/reference/c-api/generalized-ufuncs.rst @@ -4,6 +4,8 @@ Generalized universal function API ================================== +.. seealso:: :ref:`ufuncs` + There is a general need for looping over not only functions on scalars but also over functions on vectors (or arrays). This concept is realized in NumPy by generalizing the universal functions diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 0c675718818b..cac15b66cf14 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -19,6 +19,10 @@ that takes a fixed number of specific inputs and produces a fixed number of specific outputs. For detailed information on universal functions, see :ref:`ufuncs-basics`. + +There are also :ref:`generalized ufuncs ` which +are functions over vectors (or arrays) instead of only single-element scalars. + :class:`ufunc` ============== diff --git a/doc/source/user/basics.ufuncs.rst b/doc/source/user/basics.ufuncs.rst index 8607c2abda9b..5c91ab6c0168 100644 --- a/doc/source/user/basics.ufuncs.rst +++ b/doc/source/user/basics.ufuncs.rst @@ -18,6 +18,25 @@ is, a ufunc is a ":term:`vectorized `" wrapper for a function that takes a fixed number of specific inputs and produces a fixed number of specific outputs. +There are also :ref:`generalized ufuncs ` which +are functions over vectors (or arrays) instead of single-element scalars. +For example, :func:`numpy.add` is a ufunc that operates element-by-element, +while :func:`numpy.matmul` is a gufunc that operates on vectors/matrices:: + + >>> a = np.arange(6).reshape(3, 2) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.add(a, a) # element-wise addition + array([[ 0, 2], + [ 4, 6], + [ 8, 10]]) + >>> np.matmul(a, a.T) # matrix multiplication (3x2) @ (2x3) -> (3x3) + array([[ 1, 3, 5], + [ 3, 13, 23], + [ 5, 23, 41]]) + In NumPy, universal functions are instances of the :class:`numpy.ufunc` class. Many of the built-in functions are implemented in compiled C code. The basic ufuncs operate on scalars, but @@ -35,12 +54,30 @@ One can also produce custom :class:`numpy.ufunc` instances using the Ufunc methods ============= -All ufuncs have four methods. They can be found at -:ref:`ufuncs.methods`. However, these methods only make sense on scalar -ufuncs that take two input arguments and return one output argument. +All ufuncs have 5 methods. 4 reduce-like methods +(:meth:`~numpy.ufunc.reduce`, :meth:`~numpy.ufunc.accumulate`, +:meth:`~numpy.ufunc.reduceat`, :meth:`~numpy.ufunc.outer`) and one +for inplace operations (:meth:`~numpy.ufunc.at`). +See :ref:`ufuncs.methods` for more. However, these methods only make sense on +ufuncs that take two input arguments and return one output argument (so-called +"scalar" ufuncs since the inner loop operates on a single scalar value). Attempting to call these methods on other ufuncs will cause a :exc:`ValueError`. +For example, :func:`numpy.add` takes two inputs and returns one output, +so its methods work:: + + >>> np.add.reduce([1, 2, 3]) + 6 + +But :func:`numpy.divmod` returns two outputs (quotient and remainder), +so calling its methods raises an error:: + + >>> np.divmod.reduce([1, 2, 3]) + Traceback (most recent call last): + ... + ValueError: reduce only supported for functions returning a single value + The reduce-like methods all take an *axis* keyword, a *dtype* keyword, and an *out* keyword, and the arrays must all have dimension >= 1. The *axis* keyword specifies the axis of the array over which the reduction From bd7e23aafe5747ff36ea3b3e286e2282f5f09e14 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 11 Dec 2025 17:09:44 +0200 Subject: [PATCH 0999/1018] MAINT: remove some old work-arounds and test skips for PyPy (#30417) * MAINT: remove some old work-arounds and test skips for PyPy * typos * lint --- .../_core/src/multiarray/stringdtype/dtype.c | 7 ----- numpy/_core/tests/test_array_coercion.py | 3 +- numpy/_core/tests/test_mem_policy.py | 6 ---- numpy/_core/tests/test_multiarray.py | 6 +--- numpy/_core/tests/test_regression.py | 2 -- numpy/_core/tests/test_strings.py | 29 ++++--------------- numpy/lib/tests/test_format.py | 2 -- numpy/lib/tests/test_io.py | 6 ---- numpy/lib/tests/test_loadtxt.py | 22 +------------- 9 files changed, 9 insertions(+), 74 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 77216d25d46f..7abf4b9303af 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -399,14 +399,7 @@ stringdtype_getitem(PyArray_StringDTypeObject *descr, char **dataptr) } } else { -#ifndef PYPY_VERSION val_obj = PyUnicode_FromStringAndSize(sdata.buf, sdata.size); -#else - // work around pypy issue #4046, can delete this when the fix is in - // a released version of pypy - val_obj = PyUnicode_FromStringAndSize( - sdata.buf == NULL ? "" : sdata.buf, sdata.size); -#endif if (val_obj == NULL) { goto fail; } diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index ba28e886282f..9c8c4a09cfc9 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -12,7 +12,7 @@ import numpy as np import numpy._core._multiarray_umath as ncu from numpy._core._rational_tests import rational -from numpy.testing import IS_64BIT, IS_PYPY, assert_array_equal +from numpy.testing import IS_64BIT, assert_array_equal def arraylikes(): @@ -282,7 +282,6 @@ def test_scalar_coercion(self, scalar): assert_array_equal(arr, arr3) assert_array_equal(arr, arr4) - @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy") @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") @pytest.mark.parametrize("cast_to", scalar_instances()) def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 313d3efe779a..720ea1aa91b8 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -343,9 +343,6 @@ async def async_test_context_locality(get_module): def test_context_locality(get_module): - if (sys.implementation.name == 'pypy' - and sys.pypy_version_info[:3] < (7, 3, 6)): - pytest.skip('no context-locality support in PyPy < 7.3.6') asyncio.run(async_test_context_locality(get_module)) @@ -411,9 +408,6 @@ def test_new_policy(get_module): assert np._core.multiarray.get_handler_name(c) == orig_policy_name -@pytest.mark.xfail(sys.implementation.name == "pypy", - reason=("bad interaction between getenv and " - "os.environ inside pytest")) @pytest.mark.parametrize("policy", ["0", "1", None]) @pytest.mark.thread_unsafe(reason="modifies environment variables") def test_switch_owner(get_module, policy): diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 11365d1d1efb..62f3bd4a77c4 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4017,7 +4017,7 @@ def test_inplace(self): # - defer if other has __array_ufunc__ and it is None # or other is not a subclass and has higher array priority # - else, call ufunc - @pytest.mark.xfail(IS_PYPY, reason="Bug in pypy3.{9, 10}-v7.3.13, #24862") + @pytest.mark.xfail(IS_PYPY, reason="Bug in pypy, #24862") def test_ufunc_binop_interaction(self): # Python method name (without underscores) # -> (numpy ufunc, has_in_place_version, preferred_dtype) @@ -5928,7 +5928,6 @@ def test_fromfile_offset(self, tmp_path, param_filename): np.fromfile, tmp_filename, dtype=x.dtype, sep=",", offset=1) - @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") def test_fromfile_bad_dup(self, tmp_path, param_filename, monkeypatch): def dup_str(fd): return 'abc' @@ -9824,9 +9823,6 @@ def test_1d_format(self): assert_raises(TypeError, '{:30}'.format, a) -from numpy.testing import IS_PYPY - - class TestCTypes: def test_ctypes_is_available(self): diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 8844c461211a..66b462df9784 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -2291,8 +2291,6 @@ def test_reshape_size_overflow(self): new_shape = (2, 7, 7, 43826197) assert_raises(ValueError, a.reshape, new_shape) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_invalid_structured_dtypes(self): # gh-2865 # mapping python objects to other dtypes diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index b36d1db76b20..d8d23d47b5b8 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -5,7 +5,7 @@ import numpy as np from numpy._core._exceptions import _UFuncNoLoopError -from numpy.testing import IS_PYPY, assert_array_equal, assert_raises +from numpy.testing import assert_array_equal, assert_raises from numpy.testing._private.utils import requires_memory COMPARISONS = [ @@ -19,8 +19,6 @@ MAX = np.iinfo(np.int64).max -IS_PYPY_LT_7_3_16 = IS_PYPY and sys.implementation.version < (7, 3, 16) - @pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym): arr_string = np.array(["a", "b"], dtype="S") @@ -1123,10 +1121,7 @@ def test_replace_unicode(self, buf, old, new, count, res, dt): '\U0001D7F6', '\U00011066', '\U000104A0', - pytest.param('\U0001F107', marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISALNUM", - strict=True)), + '\U0001F107', ]) def test_isalnum_unicode(self, in_, dt): in_ = np.array(in_, dtype=dt) @@ -1140,10 +1135,7 @@ def test_isalnum_unicode(self, in_, dt): ('\U0001F40D', False), ('\U0001F46F', False), ('\u2177', True), - pytest.param('\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISLOWER", - strict=True)), + ('\U00010429', True), ('\U0001044E', True), ]) def test_islower_unicode(self, in_, out, dt): @@ -1158,10 +1150,7 @@ def test_islower_unicode(self, in_, out, dt): ('\U0001F40D', False), ('\U0001F46F', False), ('\u2177', False), - pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISUPPER", - strict=True)), + ('\U00010429', False), ('\U0001044E', False), ]) def test_isupper_unicode(self, in_, out, dt): @@ -1171,15 +1160,9 @@ def test_isupper_unicode(self, in_, out, dt): @pytest.mark.parametrize("in_,out", [ ('\u1FFc', True), ('Greek \u1FFcitlecases ...', True), - pytest.param('\U00010401\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISISTITLE", - strict=True)), + ('\U00010401\U00010429', True), ('\U00010427\U0001044E', True), - pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISISTITLE", - strict=True)), + ('\U00010429', False), ('\U0001044E', False), ('\U0001F40D', False), ('\U0001F46F', False), diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index c70e3d5ebd43..52994f13bd05 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -1035,8 +1035,6 @@ def test_header_growth_axis(): float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]}) ]}), ]) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_metadata_dtype(dt): # gh-14142 arr = np.ones(10, dtype=dt) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 5ba634b9f612..4051e203dacf 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -843,8 +843,6 @@ def test_comments_multiple(self): a = np.array([[1, 2, 3], [4, 5, 6]], int) assert_array_equal(x, a) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_comments_multi_chars(self): c = TextIO() c.write('/* comment\n1,2,3,5\n') @@ -1061,8 +1059,6 @@ def test_from_float_hex(self): c, dtype=dt, converters=float.fromhex, encoding="latin1") assert_equal(res, tgt, err_msg=f"{dt}") - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_default_float_converter_no_default_hex_conversion(self): """ Ensure that fromhex is only used for values with the correct prefix and @@ -1073,8 +1069,6 @@ def test_default_float_converter_no_default_hex_conversion(self): match=".*convert string 'a' to float64 at row 0, column 1"): np.loadtxt(c) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_default_float_converter_exception(self): """ Ensure that the exception message raised during failed floating point diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index 7a4ed17e7f07..a164bf38f189 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -13,7 +13,7 @@ import numpy as np from numpy.ma.testutils import assert_equal -from numpy.testing import HAS_REFCOUNT, IS_PYPY, assert_array_equal +from numpy.testing import HAS_REFCOUNT, assert_array_equal def test_scientific_notation(): @@ -204,8 +204,6 @@ def test_maxrows_no_blank_lines(dtype): assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype)) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2"))) def test_exception_message_bad_values(dtype): txt = StringIO("1,2\n3,XXX\n5,6") @@ -393,8 +391,6 @@ def test_bool(): assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]]) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) @pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") def test_integer_signs(dtype): @@ -411,8 +407,6 @@ def test_integer_signs(dtype): np.loadtxt([f"{sign}2\n"], dtype=dtype) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) @pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") def test_implicit_cast_float_to_int_fails(dtype): @@ -483,8 +477,6 @@ def conv(x): assert sys.getrefcount(sentinel) == 2 -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_character_not_bytes_compatible(): """Test exception when a character cannot be encoded as 'S'.""" data = StringIO("–") # == \u2013 @@ -502,8 +494,6 @@ def test_invalid_converter(conv): np.loadtxt(StringIO("1 2\n3 4"), converters=conv) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_converters_dict_raises_non_integer_key(): with pytest.raises(TypeError, match="keys of the converters dict"): np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}) @@ -569,8 +559,6 @@ def test_quote_support_default(): assert_array_equal(res, expected) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_quotechar_multichar_error(): txt = StringIO("1,2\n3,4") msg = r".*must be a single unicode character or None" @@ -730,8 +718,6 @@ def test_unicode_whitespace_stripping_complex(dtype): assert_array_equal(res, np.array([[1, 2 + 3j, 4 + 5j, 6 - 7j, 8j, 9j]] * 2)) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", "FD") @pytest.mark.parametrize("field", ["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"]) @@ -740,8 +726,6 @@ def test_bad_complex(dtype, field): np.loadtxt([field + "\n"], dtype=dtype, delimiter=",") -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"] + "efgdFDG" + "?") def test_nul_character_error(dtype): @@ -753,8 +737,6 @@ def test_nul_character_error(dtype): np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"') -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"] + "efgdFDG" + "?") def test_no_thousands_support(dtype): @@ -1022,8 +1004,6 @@ def test_str_dtype_unit_discovery_with_converter(): assert_equal(a, expected) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_control_character_empty(): with pytest.raises(TypeError, match="Text reading control character must"): np.loadtxt(StringIO("1 2 3"), delimiter="") From 067f8b0d397d1cb1f4146b41771d4d3ea26236cd Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 11 Dec 2025 17:42:18 +0100 Subject: [PATCH 1000/1018] DOC: Improve cross-links in thread safety documentation (#30373) * DOC: Improve cross-links in thread safety documentation Those are mostly cosmetic changes and extra cross-links to improve the documentation. In particular I'd like to use this page as an example of good top-level documentation for threading/freethreading in the py-threading-guide https://py-free-threading.github.io/ This also does a number of updates to the misc.rst file which was basically too indented, and missing a bunch of links. I think many of the info misc.rst on how to interop with C/C++ could be removed, and likely all the information on seterr/geterr moved to routines.err.rst but I don't want to include this moves as part of this PR as it's likely more subject to discussion. There are minor updates in conf.py to avoid 1 sphinx warnings, and I know the thread safety backlink in geterr/seterr/...etc, are a bit overkill, but as numpy will be used as an example and in general is referred to as one of the core scientific Python project I thinks it's OK go go a bit further than usual. [skip actions] [skip cirrus] * typo --- doc/neps/conf.py | 6 +- doc/source/conf.py | 2 +- doc/source/reference/c-api/array.rst | 2 + .../reference/random/multithreading.rst | 3 + doc/source/reference/routines.err.rst | 72 +++++++ doc/source/reference/routines.io.rst | 7 + doc/source/reference/thread_safety.rst | 49 ++++- doc/source/user/misc.rst | 197 ++---------------- numpy/_core/_ufunc_config.py | 24 +++ numpy/_core/arrayprint.py | 7 + 10 files changed, 178 insertions(+), 191 deletions(-) diff --git a/doc/neps/conf.py b/doc/neps/conf.py index 33faaf17ff64..056002135dbd 100644 --- a/doc/neps/conf.py +++ b/doc/neps/conf.py @@ -39,10 +39,10 @@ templates_path = ['../source/_templates/'] # The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: +# You can specify multiple suffix as a dict mapping suffixes to parsers: # -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +# source_suffix = {'.rst': 'restructuredtext', '.md': 'markdown'} +source_suffix = {'.rst': 'restructuredtext'} # The master toctree document. master_doc = 'content' diff --git a/doc/source/conf.py b/doc/source/conf.py index 238a7d11f8a4..f6e7fc57bde7 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -114,7 +114,7 @@ class PyTypeObject(ctypes.Structure): templates_path = ['_templates'] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = {'.rst': 'restructuredtext'} # General substitutions. project = 'NumPy' diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 885bdb17181e..d28e535f9428 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -4272,6 +4272,8 @@ Memory management Returns 0 if nothing was done, -1 on error, and 1 if action was taken. +.. _array.ndarray.capi.threading: + Threading support ~~~~~~~~~~~~~~~~~ diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst index 73d2fc9ee5ad..28e045f10dc0 100644 --- a/doc/source/reference/random/multithreading.rst +++ b/doc/source/reference/random/multithreading.rst @@ -9,6 +9,9 @@ well-behaved (writable and aligned). Under normal circumstances, arrays created using the common constructors such as :meth:`numpy.empty` will satisfy these requirements. +.. seealso:: + :ref:`thread_safety` for general information about thread safety in NumPy. + This example makes use of :mod:`concurrent.futures` to fill an array using multiple threads. Threads are long-lived so that repeated calls do not require any additional overheads from thread creation. diff --git a/doc/source/reference/routines.err.rst b/doc/source/reference/routines.err.rst index 5272073a3b00..f46634793fa3 100644 --- a/doc/source/reference/routines.err.rst +++ b/doc/source/reference/routines.err.rst @@ -1,8 +1,80 @@ +.. _fp_error_handling: + Floating point error handling ============================= .. currentmodule:: numpy +Error handling settings are stored in :py:mod:`python:contextvars` +allowing different threads or async tasks to have independent configurations. +For more information, see :ref:`thread_safety`. + +.. _misc-error-handling: + +How numpy handles numerical exceptions +-------------------------------------- + +The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` +and ``'ignore'`` for ``underflow``. But this can be changed, and it can be +set individually for different kinds of exceptions. The different behaviors +are: + +- ``'ignore'`` : Take no action when the exception occurs. +- ``'warn'`` : Print a :py:exc:`RuntimeWarning` (via the Python :py:mod:`warnings` module). +- ``'raise'`` : Raise a :py:exc:`FloatingPointError`. +- ``'call'`` : Call a specified function. +- ``'print'`` : Print a warning directly to ``stdout``. +- ``'log'`` : Record error in a Log object. + +These behaviors can be set for all kinds of errors or specific ones: + +- ``all`` : apply to all numeric exceptions +- ``invalid`` : when NaNs are generated +- ``divide`` : divide by zero (for integers as well!) +- ``overflow`` : floating point overflows +- ``underflow`` : floating point underflows + +Note that integer divide-by-zero is handled by the same machinery. + +The error handling mode can be configured :func:`numpy.errstate` +context manager. + +Examples +-------- + +:: + + >>> with np.errstate(all='warn'): + ... np.zeros(5, dtype=np.float32) / 0.0 + :2: RuntimeWarning: invalid value encountered in divide + array([nan, nan, nan, nan, nan], dtype=float32) + +:: + + >>> with np.errstate(under='ignore'): + ... np.array([1.e-100])**10 + array([0.]) + +:: + + >>> with np.errstate(invalid='raise'): + ... np.sqrt(np.array([-1.])) + ... + Traceback (most recent call last): + File "", line 2, in + np.sqrt(np.array([-1.])) + ~~~~~~~^^^^^^^^^^^^^^^^^ + FloatingPointError: invalid value encountered in sqrt + +:: + + >>> def errorhandler(errstr, errflag): + ... print("saw stupid error!") + >>> with np.errstate(call=errorhandler, all='call'): + ... np.zeros(5, dtype=np.int32) / 0 + saw stupid error! + array([nan, nan, nan, nan, nan]) + Setting and getting error handling ---------------------------------- diff --git a/doc/source/reference/routines.io.rst b/doc/source/reference/routines.io.rst index 2b8dd98f36a4..ccd4467af545 100644 --- a/doc/source/reference/routines.io.rst +++ b/doc/source/reference/routines.io.rst @@ -59,8 +59,15 @@ Memory mapping files memmap lib.format.open_memmap +.. _text_formatting_options: + Text formatting options ----------------------- + +Text formatting settings are maintained in a :py:mod:`context variable `, +allowing different threads or async tasks to have independent configurations. +For more information, see :ref:`thread_safety`. + .. autosummary:: :toctree: generated/ diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst index 6b6b9b0ea054..b07419259690 100644 --- a/doc/source/reference/thread_safety.rst +++ b/doc/source/reference/thread_safety.rst @@ -5,7 +5,7 @@ Thread Safety ************* NumPy supports use in a multithreaded context via the `threading` module in the -standard library. Many NumPy operations release the GIL, so unlike many +standard library. Many NumPy operations release the :term:`python:GIL`, so unlike many situations in Python, it is possible to improve parallel performance by exploiting multithreaded parallelism in Python. @@ -22,7 +22,7 @@ are not reproducible, let alone correct. It is also possible to crash the Python interpreter by, for example, resizing an array while another thread is reading from it to compute a ufunc operation. -In the future, we may add locking to ndarray to make writing multithreaded +In the future, we may add locking to :class:`~numpy.ndarray` to make writing multithreaded algorithms using NumPy arrays safer, but for now we suggest focusing on read-only access of arrays that are shared between threads, or adding your own locking if you need to mutation and multithreading. @@ -32,6 +32,18 @@ from use of the `threading` module, and instead might be better served with `multiprocessing`. In particular, operations on arrays with ``dtype=np.object_`` do not release the GIL. +Context-local state +------------------- + +NumPy maintains some state for ufuncs context-local basis, which means each +thread in a multithreaded program or task in an asyncio program has its own +independent configuration of the `numpy.errstate` (see +:doc:`/reference/routines.err`), and of :ref:`text_formatting_options`. + +You can update state stored in a context variable by entering a context manager. +As soon as the context manager exits, the state will be reset to its value +before entering the context manager. + Free-threaded Python -------------------- @@ -40,12 +52,27 @@ Free-threaded Python Starting with NumPy 2.1 and CPython 3.13, NumPy also has experimental support for python runtimes with the GIL disabled. See https://py-free-threading.github.io for more information about installing and -using free-threaded Python, as well as information about supporting it in -libraries that depend on NumPy. - -Because free-threaded Python does not have a global interpreter lock to -serialize access to Python objects, there are more opportunities for threads to -mutate shared state and create thread safety issues. In addition to the -limitations about locking of the ndarray object noted above, this also means -that arrays with ``dtype=np.object_`` are not protected by the GIL, creating data -races for python objects that are not possible outside free-threaded python. +using :py:term:`free-threaded ` Python, as well as +information about supporting it in libraries that depend on NumPy. + +Because free-threaded Python does not have a +global interpreter lock to serialize access to Python objects, there are more +opportunities for threads to mutate shared state and create thread safety +issues. In addition to the limitations about locking of the +:class:`~numpy.ndarray` object noted above, this also means that arrays with +``dtype=np.object_`` are not protected by the GIL, creating data races for python +objects that are not possible outside free-threaded python. + +C-API Threading Support +----------------------- + +For developers writing C extensions that interact with NumPy, several parts of +the :doc:`C-API array documentation ` provide detailed +information about multithreading considerations. + +See Also +-------- + +* :doc:`/reference/random/multithreading` - Practical example of using NumPy's + random number generators in a multithreaded context with + :mod:`concurrent.futures`. diff --git a/doc/source/user/misc.rst b/doc/source/user/misc.rst index 6d652e3ca67f..a882afa37afd 100644 --- a/doc/source/user/misc.rst +++ b/doc/source/user/misc.rst @@ -7,7 +7,7 @@ Miscellaneous IEEE 754 floating point special values -------------------------------------- -Special values defined in numpy: nan, inf, +Special values defined in numpy: :data:`~numpy.nan`, :data:`~numpy.inf` NaNs can be used as a poor-man's mask (if you don't care what the original value was) @@ -17,29 +17,39 @@ Note: cannot use equality to test NaNs. E.g.: :: >>> myarr = np.array([1., 0., np.nan, 3.]) >>> np.nonzero(myarr == np.nan) (array([], dtype=int64),) + +:: + >>> np.nan == np.nan # is always False! Use special numpy functions instead. False + +:: + >>> myarr[myarr == np.nan] = 0. # doesn't work >>> myarr array([ 1., 0., nan, 3.]) + +:: + >>> myarr[np.isnan(myarr)] = 0. # use this instead find >>> myarr array([1., 0., 0., 3.]) -Other related special value functions: :: +Other related special value functions: - isinf(): True if value is inf - isfinite(): True if not nan or inf - nan_to_num(): Map nan to 0, inf to max float, -inf to min float +- :func:`~numpy.isnan` - True if value is nan +- :func:`~numpy.isinf` - True if value is inf +- :func:`~numpy.isfinite` - True if not nan or inf +- :func:`~numpy.nan_to_num` - Map nan to 0, inf to max float, -inf to min float The following corresponds to the usual functions except that nans are excluded -from the results: :: +from the results: - nansum() - nanmax() - nanmin() - nanargmax() - nanargmin() +- :func:`~numpy.nansum` +- :func:`~numpy.nanmax` +- :func:`~numpy.nanmin` +- :func:`~numpy.nanargmax` +- :func:`~numpy.nanargmin` >>> x = np.arange(10.) >>> x[3] = np.nan @@ -47,168 +57,3 @@ from the results: :: nan >>> np.nansum(x) 42.0 - -How numpy handles numerical exceptions --------------------------------------- - -The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` -and ``'ignore'`` for ``underflow``. But this can be changed, and it can be -set individually for different kinds of exceptions. The different behaviors -are: - - - 'ignore' : Take no action when the exception occurs. - - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module). - - 'raise' : Raise a `FloatingPointError`. - - 'call' : Call a function specified using the `seterrcall` function. - - 'print' : Print a warning directly to ``stdout``. - - 'log' : Record error in a Log object specified by `seterrcall`. - -These behaviors can be set for all kinds of errors or specific ones: - - - all : apply to all numeric exceptions - - invalid : when NaNs are generated - - divide : divide by zero (for integers as well!) - - overflow : floating point overflows - - underflow : floating point underflows - -Note that integer divide-by-zero is handled by the same machinery. -These behaviors are set on a per-thread basis. - -Examples --------- - -:: - - >>> oldsettings = np.seterr(all='warn') - >>> np.zeros(5,dtype=np.float32)/0. - Traceback (most recent call last): - ... - RuntimeWarning: invalid value encountered in divide - >>> j = np.seterr(under='ignore') - >>> np.array([1.e-100])**10 - array([0.]) - >>> j = np.seterr(invalid='raise') - >>> np.sqrt(np.array([-1.])) - Traceback (most recent call last): - ... - FloatingPointError: invalid value encountered in sqrt - >>> def errorhandler(errstr, errflag): - ... print("saw stupid error!") - >>> np.seterrcall(errorhandler) - >>> j = np.seterr(all='call') - >>> np.zeros(5, dtype=np.int32)/0 - saw stupid error! - array([nan, nan, nan, nan, nan]) - >>> j = np.seterr(**oldsettings) # restore previous - ... # error-handling settings - -Interfacing to C ----------------- -Only a survey of the choices. Little detail on how each works. - -1) Bare metal, wrap your own C-code manually. - - - Plusses: - - - Efficient - - No dependencies on other tools - - - Minuses: - - - Lots of learning overhead: - - - need to learn basics of Python C API - - need to learn basics of numpy C API - - need to learn how to handle reference counting and love it. - - - Reference counting often difficult to get right. - - - getting it wrong leads to memory leaks, and worse, segfaults - -2) Cython - - - Plusses: - - - avoid learning C API's - - no dealing with reference counting - - can code in pseudo python and generate C code - - can also interface to existing C code - - should shield you from changes to Python C api - - has become the de-facto standard within the scientific Python community - - fast indexing support for arrays - - - Minuses: - - - Can write code in non-standard form which may become obsolete - - Not as flexible as manual wrapping - -3) ctypes - - - Plusses: - - - part of Python standard library - - good for interfacing to existing shareable libraries, particularly - Windows DLLs - - avoids API/reference counting issues - - good numpy support: arrays have all these in their ctypes - attribute: :: - - a.ctypes.data - a.ctypes.data_as - a.ctypes.shape - a.ctypes.shape_as - a.ctypes.strides - a.ctypes.strides_as - - - Minuses: - - - can't use for writing code to be turned into C extensions, only a wrapper - tool. - -4) SWIG (automatic wrapper generator) - - - Plusses: - - - around a long time - - multiple scripting language support - - C++ support - - Good for wrapping large (many functions) existing C libraries - - - Minuses: - - - generates lots of code between Python and the C code - - can cause performance problems that are nearly impossible to optimize - out - - interface files can be hard to write - - doesn't necessarily avoid reference counting issues or needing to know - API's - -5) Psyco - - - Plusses: - - - Turns pure python into efficient machine code through jit-like - optimizations - - very fast when it optimizes well - - - Minuses: - - - Only on intel (windows?) - - Doesn't do much for numpy? - -Interfacing to Fortran: ------------------------ -The clear choice to wrap Fortran code is -`f2py `_. - -Pyfort is an older alternative, but not supported any longer. -Fwrap is a newer project that looked promising but isn't being developed any -longer. - -Interfacing to C++: -------------------- - 1) Cython - 2) CXX - 3) Boost.python - 4) SWIG - 5) SIP (used mainly in PyQT) diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index b16147c18ee6..6a7476670d95 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -57,6 +57,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): seterrcall : Set a callback function for the 'call' mode. geterr, geterrcall, errstate + Notes ----- The floating-point exceptions are defined in the IEEE 754 standard [1]_: @@ -68,6 +69,8 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): - Invalid operation: result is not an expressible number, typically indicates that a NaN was produced. + **Concurrency note:** see :ref:`fp_error_handling` + .. [1] https://en.wikipedia.org/wiki/IEEE_754 Examples @@ -127,6 +130,8 @@ def geterr(): For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- >>> import numpy as np @@ -172,6 +177,10 @@ def setbufsize(size): bufsize : int Previous size of ufunc buffer in bytes. + Notes + ----- + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- When exiting a `numpy.errstate` context manager the bufsize is restored: @@ -205,6 +214,12 @@ def getbufsize(): getbufsize : int Size of ufunc buffer in bytes. + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + + Examples -------- >>> import numpy as np @@ -256,6 +271,11 @@ def seterrcall(func): -------- seterr, geterr, geterrcall + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- Callback upon error: @@ -331,6 +351,8 @@ def geterrcall(): For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :ref:`fp_error_handling` + Examples -------- >>> import numpy as np @@ -399,6 +421,8 @@ class errstate: For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :ref:`fp_error_handling` + Examples -------- >>> import numpy as np diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 8d576d9e1d56..96c17285bb3d 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -248,6 +248,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, -------- get_printoptions, printoptions, array2string + Notes ----- @@ -255,6 +256,8 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, * Use `printoptions` as a context manager to set the values temporarily. * These print options apply only to NumPy ndarrays, not to scalars. + **Concurrency note:** see :ref:`text_formatting_options` + Examples -------- Floating point precision can be set: @@ -357,6 +360,8 @@ def get_printoptions(): ----- These print options apply only to NumPy ndarrays, not to scalars. + **Concurrency note:** see :ref:`text_formatting_options` + See Also -------- set_printoptions, printoptions @@ -419,6 +424,8 @@ def printoptions(*args, **kwargs): ----- These print options apply only to NumPy ndarrays, not to scalars. + **Concurrency note:** see :ref:`text_formatting_options` + """ token = _set_printoptions(*args, **kwargs) From 753dd1f2709c1b6bf3c0c0b084031da16fd1c920 Mon Sep 17 00:00:00 2001 From: Kaiyuan Yang <1079700998@qq.com> Date: Fri, 12 Dec 2025 03:27:27 +0800 Subject: [PATCH 1001/1018] BUG: fix double evaluation in PyArrayScalar_RETURN_BOOL_FROM_LONG (#30418) * BUG: fix double evaluation in PyArrayScalar_RETURN_BOOL_FROM_LONG Closes #30389 * fix: lint errors and wasm import problems --- numpy/_core/include/numpy/arrayscalars.h | 8 ++-- numpy/_core/tests/test_multiprocessing.py | 51 +++++++++++++++++++++++ 2 files changed, 56 insertions(+), 3 deletions(-) create mode 100644 numpy/_core/tests/test_multiprocessing.py diff --git a/numpy/_core/include/numpy/arrayscalars.h b/numpy/_core/include/numpy/arrayscalars.h index ff048061f70a..46bc58cc2a35 100644 --- a/numpy/_core/include/numpy/arrayscalars.h +++ b/numpy/_core/include/numpy/arrayscalars.h @@ -173,9 +173,11 @@ typedef struct { #define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) #define PyArrayScalar_FromLong(i) \ ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) -#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ - return Py_INCREF(PyArrayScalar_FromLong(i)), \ - PyArrayScalar_FromLong(i) +#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) do { \ + PyObject *obj = PyArrayScalar_FromLong(i); \ + Py_INCREF(obj); \ + return obj; \ +} while (0) #define PyArrayScalar_RETURN_FALSE \ return Py_INCREF(PyArrayScalar_False), \ PyArrayScalar_False diff --git a/numpy/_core/tests/test_multiprocessing.py b/numpy/_core/tests/test_multiprocessing.py new file mode 100644 index 000000000000..2c5c2fcfb8ed --- /dev/null +++ b/numpy/_core/tests/test_multiprocessing.py @@ -0,0 +1,51 @@ +import pytest + +import numpy as np +from numpy.testing import IS_WASM + +pytestmark = pytest.mark.thread_unsafe( + reason="tests in this module are explicitly multi-processed" +) + +def bool_array_writer(shm_name, n): + # writer routine for test_read_write_bool_array + import time + from multiprocessing import shared_memory + shm = shared_memory.SharedMemory(name=shm_name) + arr = np.ndarray(n, dtype=np.bool_, buffer=shm.buf) + for i in range(n): + arr[i] = True + time.sleep(0.00001) + +def bool_array_reader(shm_name, n): + # reader routine for test_read_write_bool_array + from multiprocessing import shared_memory + shm = shared_memory.SharedMemory(name=shm_name) + arr = np.ndarray(n, dtype=np.bool_, buffer=shm.buf) + for i in range(n): + while not arr[i]: + pass + +@pytest.mark.skipif(IS_WASM, + reason="WASM does not support _posixshmem") +def test_read_write_bool_array(): + # See: gh-30389 + # + # Prior to Python 3.13, boolean scalar singletons (np.True / np.False) were + # regular reference-counted objects. Due to the double evaluation in + # PyArrayScalar_RETURN_BOOL_FROM_LONG, concurrent reads and writes of a + # boolean array could corrupt their refcounts, potentially causing a crash + # (e.g., `free(): invalid pointer`). + # + # This test creates a multi-process race between a writer and a reader to + # ensure that NumPy does not exhibit such failures. + from concurrent.futures import ProcessPoolExecutor + from multiprocessing import shared_memory + n = 10000 + shm = shared_memory.SharedMemory(create=True, size=n) + with ProcessPoolExecutor(max_workers=2) as executor: + f_writer = executor.submit(bool_array_writer, shm.name, n) + f_reader = executor.submit(bool_array_reader, shm.name, n) + shm.unlink() + f_writer.result() + f_reader.result() From 54840fb112db7b4cfa0b8dddf180407a01b8da2c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Dec 2025 17:05:47 +0000 Subject: [PATCH 1002/1018] MAINT: Bump github/codeql-action from 4.31.7 to 4.31.8 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.7 to 4.31.8. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/cf1bb45a277cb3c205638b2cd5c984db1c46a412...1b168cd39490f61582a9beae412bb7057a6b2c4e) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.8 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index d7622b4d6dff..aaa14b37588a 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 + uses: github/codeql-action/init@1b168cd39490f61582a9beae412bb7057a6b2c4e # v4.31.8 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 + uses: github/codeql-action/autobuild@1b168cd39490f61582a9beae412bb7057a6b2c4e # v4.31.8 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 + uses: github/codeql-action/analyze@1b168cd39490f61582a9beae412bb7057a6b2c4e # v4.31.8 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 8488e97ef116..140d92d43e61 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v2.1.27 + uses: github/codeql-action/upload-sarif@1b168cd39490f61582a9beae412bb7057a6b2c4e # v2.1.27 with: sarif_file: results.sarif From d601b6cbf93ff8f6316a8f2c30ed35059f8a2808 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Dec 2025 17:05:52 +0000 Subject: [PATCH 1003/1018] MAINT: Bump actions/cache from 4.3.0 to 5.0.1 Bumps [actions/cache](https://github.com/actions/cache) from 4.3.0 to 5.0.1. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/0057852bfaa89a56745cba8c7296529d2fc39830...9255dc7a253b0ccc959486e2bca901246202afeb) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 5.0.1 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/macos.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 916417ebc513..f82bf1aa2626 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -96,7 +96,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -204,7 +204,7 @@ jobs: sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index f2af68370b99..15d3cf947222 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -49,7 +49,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -73,7 +73,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From 0fe53eacf0aa1b5c931661d68b8668e34874cd98 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Dec 2025 23:26:55 +0100 Subject: [PATCH 1004/1018] TYP: avoid using `np.` in `__init__.pyi` --- numpy/__init__.pyi | 275 ++++++++++++++++++++++----------------------- 1 file changed, 137 insertions(+), 138 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index aad324f54c4d..af13ddd20cce 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -10,7 +10,6 @@ from decimal import Decimal from fractions import Fraction from uuid import UUID -import numpy as np from numpy.__config__ import show as show_config from numpy._pytesttester import PytestTester from numpy._core._internal import _ctypes @@ -729,8 +728,8 @@ _DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=_DT64Item, default=Any, covariant ### Type Aliases (for internal use only) -type _Falsy = L[False, 0] | np.bool[L[False]] -type _Truthy = L[True, 1] | np.bool[L[True]] +type _Falsy = L[False, 0] | bool_[L[False]] +type _Truthy = L[True, 1] | bool_[L[True]] type _1D = tuple[int] type _2D = tuple[int, int] @@ -750,8 +749,8 @@ type _ArrayNumeric = NDArray[number | timedelta64 | object_] type _ScalarNotObject = bool_ | number | flexible | datetime64 | timedelta64 -type _Float64_co = float | floating[_64Bit] | float32 | float16 | integer | np.bool -type _Complex64_co = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool +type _Float64_co = float | floating[_64Bit] | float32 | float16 | integer | bool_ +type _Complex64_co = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | bool_ type _Complex128_co = complex | number[_64Bit] | _Complex64_co type _ToIndex = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None @@ -888,8 +887,8 @@ type _SortSide = L["left", "right"] type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co type _ConvertibleToComplex = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co -type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None -type _ConvertibleToDT64 = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | bool_ | None +type _ConvertibleToDT64 = dt.date | int | _CharLike_co | character | number | datetime64 | bool_ | None type _NDIterFlagsKind = L[ "buffered", @@ -1063,8 +1062,8 @@ pi: Final[float] = ... inf: Final[float] = ... nan: Final[float] = ... little_endian: Final[builtins.bool] = ... -False_: Final[np.bool[L[False]]] = ... -True_: Final[np.bool[L[True]]] = ... +False_: Final[bool_[L[False]]] = ... +True_: Final[bool_[L[True]]] = ... newaxis: Final[None] = None # not in __all__ @@ -1129,12 +1128,12 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: type[builtins.bool | np.bool | ct.c_bool] | _BoolCodes, + dtype: type[builtins.bool | bool_ | ct.c_bool] | _BoolCodes, align: builtins.bool = False, copy: builtins.bool = False, *, metadata: dict[str, Any] = ..., - ) -> dtype[np.bool]: ... + ) -> dtype[bool_]: ... @overload def __new__( cls, @@ -1732,7 +1731,7 @@ class _ArrayOrScalarCommon: SupportsIndex, # version _ShapeLike, # Shape _DTypeT_co, # DType - np.bool, # F-continuous + bool_, # F-continuous bytes | list[Any], # Data ], /) -> None: ... @@ -2168,7 +2167,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # flexible | object_ | bool def __setitem__( - self: ndarray[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + self: ndarray[Any, dtype[flexible | object_ | bool_] | dtypes.StringDType], key: _ToIndices, value: object, /, @@ -2286,7 +2285,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): keepdims: L[False, 0] = False, *, where: _ArrayLikeBool_co = True - ) -> np.bool: ... + ) -> bool_: ... @overload def all( self, @@ -2295,7 +2294,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): keepdims: SupportsIndex = False, *, where: _ArrayLikeBool_co = True, - ) -> np.bool | NDArray[np.bool]: ... + ) -> bool_ | NDArray[bool_]: ... @overload def all[ArrayT: ndarray]( self, @@ -2323,7 +2322,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): keepdims: L[False, 0] = False, *, where: _ArrayLikeBool_co = True - ) -> np.bool: ... + ) -> bool_: ... @overload def any( self, @@ -2332,7 +2331,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): keepdims: SupportsIndex = False, *, where: _ArrayLikeBool_co = True, - ) -> np.bool | NDArray[np.bool]: ... + ) -> bool_ | NDArray[bool_]: ... @overload def any[ArrayT: ndarray]( self, @@ -2410,7 +2409,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def dot[ArrayT: ndarray](self, b: ArrayLike, /, out: ArrayT) -> ArrayT: ... # `nonzero()` raises for 0d arrays/generics - def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... + def nonzero(self) -> tuple[ndarray[tuple[int], dtype[intp]], ...]: ... @overload def searchsorted( @@ -2657,7 +2656,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... def __index__(self: NDArray[integer], /) -> int: ... - def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... + def __complex__(self: NDArray[number | bool_ | object_], /) -> complex: ... def __len__(self) -> int: ... def __contains__(self, value: object, /) -> builtins.bool: ... @@ -2682,67 +2681,67 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # @overload - def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # @overload - def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... + def __le__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # @overload - def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # @overload - def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # Unary ops @@ -2772,9 +2771,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __matmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __matmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __matmul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __matmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2801,9 +2800,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __matmul__ def __rmatmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __rmatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rmatmul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __rmatmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2829,14 +2828,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mod__[ScalarT: floating | integer]( - self: NDArray[ScalarT], other: int | np.bool, / + self: NDArray[ScalarT], other: int | bool_, / ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __mod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __mod__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __mod__[ScalarT: floating | integer](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __mod__[ScalarT: floating | integer](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2856,14 +2855,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __mod__ def __rmod__[ScalarT: floating | integer]( - self: NDArray[ScalarT], other: int | np.bool, / + self: NDArray[ScalarT], other: int | bool_, / ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __rmod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __rmod__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rmod__[ScalarT: floating | integer](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __rmod__[ScalarT: floating | integer](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2883,17 +2882,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __divmod__[ScalarT: floating | integer]( - self: NDArray[ScalarT], rhs: int | np.bool, / + self: NDArray[ScalarT], rhs: int | bool_, / ) -> _2Tuple[ndarray[_ShapeT_co, dtype[ScalarT]]]: ... @overload def __divmod__[ScalarT: floating | integer]( self: NDArray[ScalarT], rhs: _ArrayLikeBool_co, / ) -> _2Tuple[NDArray[ScalarT]]: ... @overload - def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... + def __divmod__(self: NDArray[bool_], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload def __divmod__[ScalarT: floating | integer]( - self: NDArray[np.bool], rhs: _ArrayLike[ScalarT], / + self: NDArray[bool_], rhs: _ArrayLike[ScalarT], / ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @@ -2910,17 +2909,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __divmod__ def __rdivmod__[ScalarT: floating | integer]( - self: NDArray[ScalarT], lhs: int | np.bool, / + self: NDArray[ScalarT], lhs: int | bool_, / ) -> _2Tuple[ndarray[_ShapeT_co, dtype[ScalarT]]]: ... @overload def __rdivmod__[ScalarT: floating | integer]( self: NDArray[ScalarT], lhs: _ArrayLikeBool_co, / ) -> _2Tuple[NDArray[ScalarT]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... + def __rdivmod__(self: NDArray[bool_], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload def __rdivmod__[ScalarT: floating | integer]( - self: NDArray[np.bool], lhs: _ArrayLike[ScalarT], / + self: NDArray[bool_], lhs: _ArrayLike[ScalarT], / ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @@ -2937,13 +2936,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__add__` @overload - def __add__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __add__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __add__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __add__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __add__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __add__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2985,13 +2984,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__radd__` @overload # signature equivalent to __add__ - def __radd__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __radd__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __radd__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __radd__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __radd__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __radd__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3033,13 +3032,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__sub__` @overload - def __sub__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __sub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __sub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __sub__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __sub__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3071,13 +3070,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rsub__` @overload - def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __rsub__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __rsub__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3109,13 +3108,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__mul__` @overload - def __mul__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __mul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __mul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __mul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __mul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __mul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3151,13 +3150,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rmul__` @overload # signature equivalent to __mul__ - def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __rmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rmul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __rmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3256,14 +3255,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__floordiv__` @overload def __floordiv__[ScalarT: integer | floating]( - self: NDArray[ScalarT], other: int | np.bool, / + self: NDArray[ScalarT], other: int | bool_, / ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __floordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __floordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __floordiv__[ScalarT: integer | floating](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __floordiv__[ScalarT: integer | floating](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3288,14 +3287,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rfloordiv__` @overload def __rfloordiv__[ScalarT: integer | floating]( - self: NDArray[ScalarT], other: int | np.bool, / + self: NDArray[ScalarT], other: int | bool_, / ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __rfloordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rfloordiv__[ScalarT: integer | floating](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __rfloordiv__[ScalarT: integer | floating](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3317,13 +3316,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__pow__` @overload - def __pow__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __pow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __pow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... + def __pow__(self: NDArray[bool_], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __pow__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... + def __pow__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3351,13 +3350,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rpow__` @overload - def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... + def __rpow__(self: NDArray[bool_], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __rpow__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... + def __rpow__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3384,7 +3383,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... @overload - def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __lshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3395,7 +3394,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __rlshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3406,7 +3405,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __rshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3417,7 +3416,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __rrshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3428,7 +3427,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __and__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3439,7 +3438,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __rand__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3450,7 +3449,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __xor__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3461,7 +3460,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __rxor__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3472,7 +3471,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __or__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3483,7 +3482,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __ror__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3944,8 +3943,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True - ) -> np.bool: ... + where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True + ) -> bool_: ... @overload def all[ScalarT: generic]( self, @@ -3954,7 +3953,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... @overload def all[ScalarT: generic]( @@ -3964,7 +3963,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... @overload @@ -3975,8 +3974,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True - ) -> np.bool: ... + where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True + ) -> bool_: ... @overload def any[ScalarT: generic]( self, @@ -3985,7 +3984,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... @overload def any[ScalarT: generic]( @@ -3995,7 +3994,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @@ -4058,36 +4057,36 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property def real(self) -> Self: ... @property - def imag(self) -> np.bool[L[False]]: ... + def imag(self) -> bool_[L[False]]: ... @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 - def __new__(cls, value: Never, /) -> np.bool[builtins.bool]: ... + def __new__(cls, value: Never, /) -> bool_[builtins.bool]: ... @overload - def __new__(cls, value: _Falsy = ..., /) -> np.bool[L[False]]: ... + def __new__(cls, value: _Falsy = ..., /) -> bool_[L[False]]: ... @overload - def __new__(cls, value: _Truthy, /) -> np.bool[L[True]]: ... + def __new__(cls, value: _Truthy, /) -> bool_[L[True]]: ... @overload - def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... + def __new__(cls, value: object, /) -> bool_[builtins.bool]: ... def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... def __bool__(self, /) -> _BoolItemT_co: ... @overload - def __int__(self: np.bool[L[False]], /) -> L[0]: ... + def __int__(self: bool_[L[False]], /) -> L[0]: ... @overload - def __int__(self: np.bool[L[True]], /) -> L[1]: ... + def __int__(self: bool_[L[True]], /) -> L[1]: ... @overload def __int__(self, /) -> L[0, 1]: ... def __abs__(self) -> Self: ... @overload - def __invert__(self: np.bool[L[False]], /) -> np.bool[L[True]]: ... + def __invert__(self: bool_[L[False]], /) -> bool_[L[True]]: ... @overload - def __invert__(self: np.bool[L[True]], /) -> np.bool[L[False]]: ... + def __invert__(self: bool_[L[True]], /) -> bool_[L[False]]: ... @overload - def __invert__(self, /) -> np.bool: ... + def __invert__(self, /) -> bool_: ... @overload def __add__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @@ -4276,45 +4275,45 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __rrshift__(self, other: int, /) -> int_: ... @overload - def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ... + def __and__(self: bool_[L[False]], other: builtins.bool | bool_, /) -> bool_[L[False]]: ... @overload - def __and__(self, other: L[False] | np.bool[L[False]], /) -> np.bool[L[False]]: ... + def __and__(self, other: L[False] | bool_[L[False]], /) -> bool_[L[False]]: ... @overload - def __and__(self, other: L[True] | np.bool[L[True]], /) -> Self: ... + def __and__(self, other: L[True] | bool_[L[True]], /) -> Self: ... @overload - def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __and__(self, other: builtins.bool | bool_, /) -> bool_: ... @overload def __and__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __and__(self, other: int, /) -> np.bool | intp: ... + def __and__(self, other: int, /) -> bool_ | intp: ... __rand__ = __and__ @overload - def __xor__[ItemT: builtins.bool](self: np.bool[L[False]], other: ItemT | np.bool[ItemT], /) -> np.bool[ItemT]: ... + def __xor__[ItemT: builtins.bool](self: bool_[L[False]], other: ItemT | bool_[ItemT], /) -> bool_[ItemT]: ... @overload - def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ... + def __xor__(self: bool_[L[True]], other: L[True] | bool_[L[True]], /) -> bool_[L[False]]: ... @overload - def __xor__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + def __xor__(self, other: L[False] | bool_[L[False]], /) -> Self: ... @overload - def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __xor__(self, other: builtins.bool | bool_, /) -> bool_: ... @overload def __xor__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __xor__(self, other: int, /) -> np.bool | intp: ... + def __xor__(self, other: int, /) -> bool_ | intp: ... __rxor__ = __xor__ @overload - def __or__(self: np.bool[L[True]], other: builtins.bool | np.bool, /) -> np.bool[L[True]]: ... + def __or__(self: bool_[L[True]], other: builtins.bool | bool_, /) -> bool_[L[True]]: ... @overload - def __or__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + def __or__(self, other: L[False] | bool_[L[False]], /) -> Self: ... @overload - def __or__(self, other: L[True] | np.bool[L[True]], /) -> np.bool[L[True]]: ... + def __or__(self, other: L[True] | bool_[L[True]], /) -> bool_[L[True]]: ... @overload - def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __or__(self, other: builtins.bool | bool_, /) -> bool_: ... @overload def __or__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __or__(self, other: int, /) -> np.bool | intp: ... + def __or__(self, other: int, /) -> bool_ | intp: ... __ror__ = __or__ @overload @@ -5487,7 +5486,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __mul__(self, x: _IntLike_co, /) -> Self: ... @overload - def __mul__(self, x: float | np.floating, /) -> timedelta64[_TD64ItemT_co | None]: ... + def __mul__(self, x: float | floating, /) -> timedelta64[_TD64ItemT_co | None]: ... @overload def __mul__(self, x: _FloatLike_co, /) -> timedelta64: ... __rmul__ = __mul__ @@ -6265,4 +6264,4 @@ def from_dlpack( *, device: L["cpu"] | None = None, copy: builtins.bool | None = None, -) -> NDArray[number | np.bool]: ... +) -> NDArray[number | bool_]: ... From d1bd5371258a5391295e901b0adbf5b8c5669c58 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Dec 2025 23:29:35 +0100 Subject: [PATCH 1005/1018] TYP: alias `builtins.bool` as `py_bool` in `__init__.pyi` --- numpy/__init__.pyi | 425 +++++++++++++++++++++++---------------------- 1 file changed, 213 insertions(+), 212 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index af13ddd20cce..bae953b7194e 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5,9 +5,10 @@ import datetime as dt import inspect import sys from abc import abstractmethod -from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias +from builtins import bool as py_bool from decimal import Decimal from fractions import Fraction +from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias from uuid import UUID from numpy.__config__ import show as show_config @@ -713,7 +714,7 @@ _NBitT1 = TypeVar("_NBitT1", bound=NBitBase, default=Any) # pyright: ignore[rep _NBitT2 = TypeVar("_NBitT2", bound=NBitBase, default=_NBitT1) # pyright: ignore[reportDeprecated] _ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) -_BoolItemT_co = TypeVar("_BoolItemT_co", bound=builtins.bool, default=builtins.bool, covariant=True) +_BoolItemT_co = TypeVar("_BoolItemT_co", bound=py_bool, default=py_bool, covariant=True) _NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=int | float | complex, covariant=True) _InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=float | complex, covariant=True) _FlexibleItemT_co = TypeVar( @@ -750,7 +751,7 @@ type _ArrayNumeric = NDArray[number | timedelta64 | object_] type _ScalarNotObject = bool_ | number | flexible | datetime64 | timedelta64 type _Float64_co = float | floating[_64Bit] | float32 | float16 | integer | bool_ -type _Complex64_co = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | bool_ +type _Complex64_co = number[_32Bit] | number[_16Bit] | number[_8Bit] | py_bool | bool_ type _Complex128_co = complex | number[_64Bit] | _Complex64_co type _ToIndex = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None @@ -1061,7 +1062,7 @@ euler_gamma: Final[float] = ... pi: Final[float] = ... inf: Final[float] = ... nan: Final[float] = ... -little_endian: Final[builtins.bool] = ... +little_endian: Final[py_bool] = ... False_: Final[bool_[L[False]]] = ... True_: Final[bool_[L[True]]] = ... newaxis: Final[None] = None @@ -1097,8 +1098,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[float64 | ct.c_double] | _Float64Codes | None, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ... ) -> dtype[float64]: ... @@ -1109,8 +1110,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__[ScalarT: generic]( cls, dtype: _DTypeLike[ScalarT], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[ScalarT]: ... @@ -1128,18 +1129,18 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: type[builtins.bool | bool_ | ct.c_bool] | _BoolCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: type[py_bool | bool_ | ct.c_bool] | _BoolCodes, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[bool_]: ... @overload def __new__( cls, - dtype: type[int], # also accepts `type[builtins.bool]` - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: type[int], # also accepts `type[py_bool]` + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[int_ | Any]: ... @@ -1147,8 +1148,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[float], # also accepts `type[int | bool]` - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[float64 | Any]: ... @@ -1156,8 +1157,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[complex], # also accepts `type[float | int | bool]` - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[complex128 | Any]: ... @@ -1165,8 +1166,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[bytes | ct.c_char] | _BytesCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[bytes_]: ... @@ -1174,8 +1175,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[str] | _StrCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[str_]: ... @@ -1189,8 +1190,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[void]: ... @@ -1200,8 +1201,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[object_]: ... @@ -1211,8 +1212,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _UInt8Codes | type[ct.c_uint8], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint8]: ... @@ -1220,8 +1221,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _UInt16Codes | type[ct.c_uint16 | ct.c_ushort], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint16]: ... @@ -1229,8 +1230,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint32]: ... @@ -1238,8 +1239,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint64]: ... @@ -1247,8 +1248,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uintp]: ... @@ -1256,8 +1257,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _ULongCodes | type[ct.c_ulong], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint32 | uint64]: ... @@ -1267,8 +1268,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Int8Codes | type[ct.c_int8], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int8]: ... @@ -1276,8 +1277,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Int16Codes | type[ct.c_int16 | ct.c_short], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int16]: ... @@ -1285,8 +1286,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int32]: ... @@ -1294,8 +1295,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int64]: ... @@ -1303,8 +1304,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _IntPCodes | type[intp | ct.c_ssize_t], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[intp]: ... @@ -1312,8 +1313,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _LongCodes | type[ct.c_long], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int32 | int64]: ... @@ -1323,8 +1324,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Float16Codes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[float16]: ... @@ -1332,8 +1333,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Float32Codes | type[ct.c_float], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[float32]: ... @@ -1342,8 +1343,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[longdouble]: ... @@ -1354,8 +1355,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Complex64Codes | type[ct.c_float_complex], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complex64]: ... @@ -1363,8 +1364,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Complex128Codes | type[ct.c_double_complex], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complex128]: ... @@ -1372,8 +1373,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _CLongDoubleCodes | type[ct.c_longdouble_complex], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[clongdouble]: ... @@ -1382,8 +1383,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Complex64Codes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complex64]: ... @@ -1391,8 +1392,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Complex128Codes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complex128]: ... @@ -1400,8 +1401,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _CLongDoubleCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[clongdouble]: ... @@ -1411,8 +1412,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _TD64Codes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[timedelta64]: ... @@ -1420,8 +1421,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _DT64Codes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[datetime64]: ... @@ -1431,8 +1432,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: dtypes.StringDType | _StringCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtypes.StringDType: ... @@ -1442,8 +1443,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[unsignedinteger]: ... @@ -1451,8 +1452,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _SignedIntegerCodes | _SignedIntegerCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[signedinteger]: ... @@ -1460,8 +1461,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _IntegerCodes | _IntegerCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[integer]: ... @@ -1469,8 +1470,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _FloatingCodes | _FloatingCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[floating]: ... @@ -1478,8 +1479,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _ComplexFloatingCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complexfloating]: ... @@ -1487,8 +1488,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _InexactCodes | _FloatingCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[inexact]: ... @@ -1496,8 +1497,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[character]: ... @@ -1507,8 +1508,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: builtins.str, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype: ... @@ -1523,8 +1524,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[object], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[object_ | Any]: ... @@ -1552,16 +1553,16 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __rmul__(self, value: SupportsIndex, /) -> dtype: ... - def __gt__(self, other: DTypeLike | None, /) -> builtins.bool: ... - def __ge__(self, other: DTypeLike | None, /) -> builtins.bool: ... - def __lt__(self, other: DTypeLike | None, /) -> builtins.bool: ... - def __le__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __gt__(self, other: DTypeLike | None, /) -> py_bool: ... + def __ge__(self, other: DTypeLike | None, /) -> py_bool: ... + def __lt__(self, other: DTypeLike | None, /) -> py_bool: ... + def __le__(self, other: DTypeLike | None, /) -> py_bool: ... # Explicitly defined `__eq__` and `__ne__` to get around mypy's # `strict_equality` option; even though their signatures are # identical to their `object`-based counterpart - def __eq__(self, other: Any, /) -> builtins.bool: ... - def __ne__(self, other: Any, /) -> builtins.bool: ... + def __eq__(self, other: Any, /) -> py_bool: ... + def __ne__(self, other: Any, /) -> py_bool: ... @property def alignment(self) -> int: ... @@ -1578,13 +1579,13 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @property def flags(self) -> int: ... @property - def hasobject(self) -> builtins.bool: ... + def hasobject(self) -> py_bool: ... @property def isbuiltin(self) -> _DTypeBuiltinKind: ... @property - def isnative(self) -> builtins.bool: ... + def isnative(self) -> py_bool: ... @property - def isalignedstruct(self) -> builtins.bool: ... + def isalignedstruct(self) -> py_bool: ... @property def itemsize(self) -> int: ... @property @@ -1686,7 +1687,7 @@ class _ArrayOrScalarCommon: @property def device(self) -> L["cpu"]: ... - def __bool__(self, /) -> builtins.bool: ... + def __bool__(self, /) -> py_bool: ... def __int__(self, /) -> int: ... def __float__(self, /) -> float: ... def __copy__(self) -> Self: ... @@ -1715,9 +1716,9 @@ class _ArrayOrScalarCommon: self, /, *, - write: builtins.bool | None = None, - align: builtins.bool | None = None, - uic: builtins.bool | None = None, + write: py_bool | None = None, + align: py_bool | None = None, + uic: py_bool | None = None, ) -> None: ... @property @@ -1744,33 +1745,33 @@ class _ArrayOrScalarCommon: kind: _SortKind | None = ..., order: str | Sequence[str] | None = ..., *, - stable: builtins.bool | None = ..., + stable: py_bool | None = ..., ) -> NDArray[intp]: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... @overload # axis=index, out=None (default) - def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: py_bool = False) -> Any: ... @overload # axis=index, out=ndarray def argmax[OutT: _ArrayInt_co]( - self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: builtins.bool = False + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: py_bool = False ) -> OutT: ... @overload def argmax[OutT: _ArrayInt_co]( - self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: builtins.bool = False + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: py_bool = False ) -> OutT: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... @overload # axis=index, out=None (default) - def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: py_bool = False) -> Any: ... @overload # axis=index, out=ndarray def argmin[OutT: _ArrayInt_co]( - self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: builtins.bool = False + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: py_bool = False ) -> OutT: ... @overload def argmin[OutT: _ArrayInt_co]( - self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: builtins.bool = False + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: py_bool = False ) -> OutT: ... # Keep in sync with `MaskedArray.round` @@ -1838,7 +1839,7 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @@ -1849,7 +1850,7 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None, out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1860,7 +1861,7 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, *, out: ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1872,7 +1873,7 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @@ -1883,7 +1884,7 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None, out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1894,7 +1895,7 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, *, out: ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1907,7 +1908,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @@ -1919,7 +1920,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None, out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1931,7 +1932,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1944,7 +1945,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @@ -1956,7 +1957,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None, out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1968,7 +1969,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1980,7 +1981,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload @@ -1991,7 +1992,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None, out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @overload @@ -2002,7 +2003,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -2014,7 +2015,7 @@ class _ArrayOrScalarCommon: out: None = None, ddof: float = 0, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -2027,7 +2028,7 @@ class _ArrayOrScalarCommon: out: ArrayT, ddof: float = 0, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -2040,7 +2041,7 @@ class _ArrayOrScalarCommon: *, out: ArrayT, ddof: float = 0, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -2054,7 +2055,7 @@ class _ArrayOrScalarCommon: out: None = None, ddof: float = 0, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -2067,7 +2068,7 @@ class _ArrayOrScalarCommon: out: ArrayT, ddof: float = 0, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -2080,7 +2081,7 @@ class _ArrayOrScalarCommon: *, out: ArrayT, ddof: float = 0, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -2120,9 +2121,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __array__(self, dtype: None = None, /, *, copy: py_bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__[DTypeT: dtype](self, dtype: DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, DTypeT]: ... + def __array__[DTypeT: dtype](self, dtype: DTypeT, /, *, copy: py_bool | None = None) -> ndarray[_ShapeT_co, DTypeT]: ... def __array_ufunc__( self, @@ -2149,7 +2150,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self, array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[Any, ...], int] | None = ..., - return_scalar: builtins.bool = ..., + return_scalar: py_bool = ..., /, ) -> ndarray[ShapeT, DTypeT]: ... @@ -2230,7 +2231,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def strides(self, value: _ShapeLike) -> None: ... # - def byteswap(self, inplace: builtins.bool = ...) -> Self: ... + def byteswap(self, inplace: py_bool = ...) -> Self: ... @property def flat(self) -> flatiter[Self]: ... @@ -2259,9 +2260,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def tolist(self, /) -> Any: ... @overload - def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = True) -> None: ... + def resize(self, new_shape: _ShapeLike, /, *, refcheck: py_bool = True) -> None: ... @overload - def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = True) -> None: ... + def resize(self, /, *new_shape: SupportsIndex, refcheck: py_bool = True) -> None: ... # keep in sync with `ma.MaskedArray.squeeze` def squeeze( @@ -2435,7 +2436,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): kind: _SortKind | None = None, order: str | Sequence[str] | None = None, *, - stable: builtins.bool | None = None, + stable: py_bool | None = None, ) -> None: ... # Keep in sync with `MaskedArray.trace` @@ -2522,7 +2523,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.reshape` # NOTE: reshape also accepts negative integers, so we can't use integer literals @overload # (None) - def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: py_bool | None = None) -> Self: ... @overload # (empty_sequence) def reshape( # mypy false positive self, @@ -2530,7 +2531,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[()], _DTypeT_co]: ... @overload # (() | (int) | (int, int) | ....) # up to 8-d def reshape[ @@ -2551,7 +2552,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[AnyShapeT, _DTypeT_co]: ... @overload # (index) def reshape( @@ -2560,7 +2561,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int], _DTypeT_co]: ... @overload # (index, index) def reshape( @@ -2570,7 +2571,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int], _DTypeT_co]: ... @overload # (index, index, index) def reshape( @@ -2581,7 +2582,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int], _DTypeT_co]: ... @overload # (index, index, index, index) def reshape( @@ -2593,7 +2594,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int, int], _DTypeT_co]: ... @overload # (int, *(index, ...)) def reshape( @@ -2602,7 +2603,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *shape: SupportsIndex, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload # (sequence[index]) def reshape( @@ -2611,7 +2612,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload @@ -2620,8 +2621,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., + subok: py_bool = ..., + copy: py_bool | _CopyMode = ..., ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def astype( @@ -2629,8 +2630,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): dtype: DTypeLike | None, order: _OrderKACF = ..., casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., + subok: py_bool = ..., + copy: py_bool | _CopyMode = ..., ) -> ndarray[_ShapeT_co, dtype]: ... # @@ -2659,7 +2660,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __complex__(self: NDArray[number | bool_ | object_], /) -> complex: ... def __len__(self) -> int: ... - def __contains__(self, value: object, /) -> builtins.bool: ... + def __contains__(self, value: object, /) -> py_bool: ... # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. @@ -3644,7 +3645,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): stream: int | Any | None = None, max_version: tuple[int, int] | None = None, dl_device: tuple[int, int] | None = None, - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> CapsuleType: ... def __dlpack_device__(self, /) -> tuple[L[1], L[0]]: ... @@ -3764,9 +3765,9 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): # NOTE: this wont't raise, but won't do anything either @overload - def resize(self, /, *, refcheck: builtins.bool = True) -> None: ... + def resize(self, /, *, refcheck: py_bool = True) -> None: ... @overload - def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: builtins.bool = True) -> None: ... + def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: py_bool = True) -> None: ... # def byteswap(self, /, inplace: L[False] = False) -> Self: ... @@ -3779,8 +3780,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", casting: _CastingKind = "unsafe", - subok: builtins.bool = True, - copy: builtins.bool | _CopyMode = True, + subok: py_bool = True, + copy: py_bool | _CopyMode = True, ) -> ScalarT: ... @overload def astype( @@ -3789,8 +3790,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): dtype: DTypeLike | None, order: _OrderKACF = "K", casting: _CastingKind = "unsafe", - subok: builtins.bool = True, - copy: builtins.bool | _CopyMode = True, + subok: py_bool = True, + copy: py_bool | _CopyMode = True, ) -> Incomplete: ... # NOTE: `view` will perform a 0D->scalar cast, @@ -3856,7 +3857,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> Self: ... @overload # (ShapeT: (index, ...)) def reshape[ShapeT: tuple[int, *tuple[int, ...]]]( @@ -3865,7 +3866,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[ShapeT, dtype[Self]]: ... @overload # (Sequence[index, ...]) # not recommended def reshape( @@ -3874,7 +3875,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> NDArray[Self] | Any: ... @overload # _(index) def reshape( @@ -3883,7 +3884,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int], dtype[Self]]: ... @overload # _(index, index) def reshape( @@ -3893,7 +3894,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int], dtype[Self]]: ... @overload # _(index, index, index) def reshape( @@ -3904,7 +3905,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... @overload # _(index, index, index, index) def reshape( @@ -3916,7 +3917,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int, int], dtype[Self]]: ... @overload # _(index, index, index, index, index, *index) # ndim >= 5 def reshape( @@ -3929,7 +3930,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *sizes6_: SupportsIndex, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int, int, int, *tuple[int, ...]], dtype[Self]]: ... def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ... @@ -3943,7 +3944,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True ) -> bool_: ... @overload def all[ScalarT: generic]( @@ -3953,7 +3954,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... @overload def all[ScalarT: generic]( @@ -3963,7 +3964,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, - where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... @overload @@ -3974,7 +3975,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True ) -> bool_: ... @overload def any[ScalarT: generic]( @@ -3984,7 +3985,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... @overload def any[ScalarT: generic]( @@ -3994,7 +3995,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, - where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @@ -4060,13 +4061,13 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def imag(self) -> bool_[L[False]]: ... @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 - def __new__(cls, value: Never, /) -> bool_[builtins.bool]: ... + def __new__(cls, value: Never, /) -> bool_[py_bool]: ... @overload def __new__(cls, value: _Falsy = ..., /) -> bool_[L[False]]: ... @overload def __new__(cls, value: _Truthy, /) -> bool_[L[True]]: ... @overload - def __new__(cls, value: object, /) -> bool_[builtins.bool]: ... + def __new__(cls, value: object, /) -> bool_[py_bool]: ... def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -4091,7 +4092,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __add__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __add__(self, other: builtins.bool | bool_, /) -> bool_: ... + def __add__(self, other: py_bool | bool_, /) -> bool_: ... @overload def __add__(self, other: int, /) -> int_: ... @overload @@ -4102,7 +4103,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __radd__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __radd__(self, other: builtins.bool, /) -> bool_: ... + def __radd__(self, other: py_bool, /) -> bool_: ... @overload def __radd__(self, other: int, /) -> int_: ... @overload @@ -4131,7 +4132,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __mul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __mul__(self, other: builtins.bool | bool_, /) -> bool_: ... + def __mul__(self, other: py_bool | bool_, /) -> bool_: ... @overload def __mul__(self, other: int, /) -> int_: ... @overload @@ -4142,7 +4143,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rmul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rmul__(self, other: builtins.bool, /) -> bool_: ... + def __rmul__(self, other: py_bool, /) -> bool_: ... @overload def __rmul__(self, other: int, /) -> int_: ... @overload @@ -4153,7 +4154,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __pow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... @overload - def __pow__(self, other: builtins.bool | bool_, mod: None = None, /) -> int8: ... + def __pow__(self, other: py_bool | bool_, mod: None = None, /) -> int8: ... @overload def __pow__(self, other: int, mod: None = None, /) -> int_: ... @overload @@ -4164,7 +4165,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rpow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... @overload - def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... + def __rpow__(self, other: py_bool, mod: None = None, /) -> int8: ... @overload def __rpow__(self, other: int, mod: None = None, /) -> int_: ... @overload @@ -4189,7 +4190,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __floordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload - def __floordiv__(self, other: builtins.bool | bool_, /) -> int8: ... + def __floordiv__(self, other: py_bool | bool_, /) -> int8: ... @overload def __floordiv__(self, other: int, /) -> int_: ... @overload @@ -4198,7 +4199,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rfloordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... + def __rfloordiv__(self, other: py_bool, /) -> int8: ... @overload def __rfloordiv__(self, other: int, /) -> int_: ... @overload @@ -4208,7 +4209,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __mod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload - def __mod__(self, other: builtins.bool | bool_, /) -> int8: ... + def __mod__(self, other: py_bool | bool_, /) -> int8: ... @overload def __mod__(self, other: int, /) -> int_: ... @overload @@ -4218,7 +4219,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rmod__(self, other: builtins.bool, /) -> int8: ... + def __rmod__(self, other: py_bool, /) -> int8: ... @overload def __rmod__(self, other: int, /) -> int_: ... @overload @@ -4228,7 +4229,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __divmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... @overload - def __divmod__(self, other: builtins.bool | bool_, /) -> _2Tuple[int8]: ... + def __divmod__(self, other: py_bool | bool_, /) -> _2Tuple[int8]: ... @overload def __divmod__(self, other: int, /) -> _2Tuple[int_]: ... @overload @@ -4238,7 +4239,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rdivmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... @overload - def __rdivmod__(self, other: builtins.bool, /) -> _2Tuple[int8]: ... + def __rdivmod__(self, other: py_bool, /) -> _2Tuple[int8]: ... @overload def __rdivmod__(self, other: int, /) -> _2Tuple[int_]: ... @overload @@ -4247,14 +4248,14 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __lshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __lshift__(self, other: builtins.bool | bool_, /) -> int8: ... + def __lshift__(self, other: py_bool | bool_, /) -> int8: ... @overload def __lshift__(self, other: int, /) -> int_: ... @overload def __rlshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rlshift__(self, other: builtins.bool, /) -> int8: ... + def __rlshift__(self, other: py_bool, /) -> int8: ... @overload def __rlshift__(self, other: int, /) -> int_: ... @@ -4262,7 +4263,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rshift__(self, other: builtins.bool | bool_, /) -> int8: ... + def __rshift__(self, other: py_bool | bool_, /) -> int8: ... @overload def __rshift__(self, other: int, /) -> int_: ... @@ -4270,18 +4271,18 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rrshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rrshift__(self, other: builtins.bool, /) -> int8: ... + def __rrshift__(self, other: py_bool, /) -> int8: ... @overload def __rrshift__(self, other: int, /) -> int_: ... @overload - def __and__(self: bool_[L[False]], other: builtins.bool | bool_, /) -> bool_[L[False]]: ... + def __and__(self: bool_[L[False]], other: py_bool | bool_, /) -> bool_[L[False]]: ... @overload def __and__(self, other: L[False] | bool_[L[False]], /) -> bool_[L[False]]: ... @overload def __and__(self, other: L[True] | bool_[L[True]], /) -> Self: ... @overload - def __and__(self, other: builtins.bool | bool_, /) -> bool_: ... + def __and__(self, other: py_bool | bool_, /) -> bool_: ... @overload def __and__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload @@ -4289,13 +4290,13 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): __rand__ = __and__ @overload - def __xor__[ItemT: builtins.bool](self: bool_[L[False]], other: ItemT | bool_[ItemT], /) -> bool_[ItemT]: ... + def __xor__[ItemT: py_bool](self: bool_[L[False]], other: ItemT | bool_[ItemT], /) -> bool_[ItemT]: ... @overload def __xor__(self: bool_[L[True]], other: L[True] | bool_[L[True]], /) -> bool_[L[False]]: ... @overload def __xor__(self, other: L[False] | bool_[L[False]], /) -> Self: ... @overload - def __xor__(self, other: builtins.bool | bool_, /) -> bool_: ... + def __xor__(self, other: py_bool | bool_, /) -> bool_: ... @overload def __xor__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload @@ -4303,13 +4304,13 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): __rxor__ = __xor__ @overload - def __or__(self: bool_[L[True]], other: builtins.bool | bool_, /) -> bool_[L[True]]: ... + def __or__(self: bool_[L[True]], other: py_bool | bool_, /) -> bool_[L[True]]: ... @overload def __or__(self, other: L[False] | bool_[L[False]], /) -> Self: ... @overload def __or__(self, other: L[True] | bool_[L[True]], /) -> bool_[L[True]]: ... @overload - def __or__(self, other: builtins.bool | bool_, /) -> bool_: ... + def __or__(self, other: py_bool | bool_, /) -> bool_: ... @overload def __or__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload @@ -5062,7 +5063,7 @@ class floating(_RealMixin, _RoundMixin, inexact[_NBitT, float]): def __rdivmod__(self, other: float, /) -> _2Tuple[Self]: ... # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes - def is_integer(self, /) -> builtins.bool: ... + def is_integer(self, /) -> py_bool: ... def as_integer_ratio(self, /) -> tuple[int, int]: ... float16 = floating[_16Bit] @@ -5899,7 +5900,7 @@ class ufunc: *, signature: tuple[dtype | None, ...] | None = None, casting: _CastingKind | None = None, - reduction: builtins.bool = False, + reduction: py_bool = False, ) -> tuple[dtype, ...]: ... # Parameters: `__name__`, `ntypes` and `identity` @@ -6101,24 +6102,24 @@ class nditer: def copy(self) -> nditer: ... def debug_print(self) -> None: ... def enable_external_loop(self) -> None: ... - def iternext(self) -> builtins.bool: ... + def iternext(self) -> py_bool: ... def remove_axis(self, i: SupportsIndex, /) -> None: ... def remove_multi_index(self) -> None: ... def reset(self) -> None: ... @property def dtypes(self) -> tuple[dtype, ...]: ... @property - def finished(self) -> builtins.bool: ... + def finished(self) -> py_bool: ... @property - def has_delayed_bufalloc(self) -> builtins.bool: ... + def has_delayed_bufalloc(self) -> py_bool: ... @property - def has_index(self) -> builtins.bool: ... + def has_index(self) -> py_bool: ... @property - def has_multi_index(self) -> builtins.bool: ... + def has_multi_index(self) -> py_bool: ... @property def index(self) -> int: ... @property - def iterationneedsapi(self) -> builtins.bool: ... + def iterationneedsapi(self) -> py_bool: ... @property def iterindex(self) -> int: ... @property @@ -6180,7 +6181,7 @@ class memmap(ndarray[_ShapeT_co, _DTypeT_co]): self, array: memmap[_ShapeT_co, _DTypeT_co], context: tuple[ufunc, tuple[Any, ...], int] | None = None, - return_scalar: builtins.bool = False, + return_scalar: py_bool = False, ) -> Any: ... def flush(self) -> None: ... @@ -6219,9 +6220,9 @@ class poly1d: __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @overload - def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int]]: ... + def __array__(self, /, t: None = None, copy: py_bool | None = None) -> ndarray[tuple[int]]: ... @overload - def __array__[DTypeT: dtype](self, /, t: DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], DTypeT]: ... + def __array__[DTypeT: dtype](self, /, t: DTypeT, copy: py_bool | None = None) -> ndarray[tuple[int], DTypeT]: ... @overload def __call__(self, val: _ScalarLike_co) -> Any: ... @@ -6233,7 +6234,7 @@ class poly1d: def __init__( self, c_or_r: ArrayLike, - r: builtins.bool = False, + r: py_bool = False, variable: str | None = None, ) -> None: ... def __len__(self) -> int: ... @@ -6263,5 +6264,5 @@ def from_dlpack( /, *, device: L["cpu"] | None = None, - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> NDArray[number | bool_]: ... From 81571e7a88d35f0ac4cc179258ecfa5675223ed6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Dec 2025 23:31:38 +0100 Subject: [PATCH 1006/1018] TYP: prefer `str` over `builtins.str` in `__init__.pyi` --- numpy/__init__.pyi | 83 +++++++++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 42 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index bae953b7194e..4128dd3cb9ec 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1,5 +1,4 @@ # ruff: noqa: I001 -import builtins import ctypes as ct import datetime as dt import inspect @@ -1090,7 +1089,7 @@ class _DTypeMeta(type): @final class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 - names: tuple[builtins.str, ...] | None + names: tuple[str, ...] | None def __hash__(self) -> int: ... # `None` results in the default dtype @@ -1101,7 +1100,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ... + metadata: dict[str, Any] = ... ) -> dtype[float64]: ... # Overload for `dtype` instances, scalar types, and instances that have a @@ -1113,7 +1112,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[ScalarT]: ... # Builtin types @@ -1215,7 +1214,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[uint8]: ... @overload def __new__( @@ -1224,7 +1223,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[uint16]: ... @overload def __new__( @@ -1233,7 +1232,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[uint32]: ... @overload def __new__( @@ -1242,7 +1241,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[uint64]: ... @overload def __new__( @@ -1251,7 +1250,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[uintp]: ... @overload def __new__( @@ -1260,7 +1259,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[uint32 | uint64]: ... # `signedinteger` string-based representations and ctypes @@ -1271,7 +1270,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[int8]: ... @overload def __new__( @@ -1280,7 +1279,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[int16]: ... @overload def __new__( @@ -1289,7 +1288,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[int32]: ... @overload def __new__( @@ -1298,7 +1297,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[int64]: ... @overload def __new__( @@ -1307,7 +1306,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[intp]: ... @overload def __new__( @@ -1316,7 +1315,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[int32 | int64]: ... # `floating` string-based representations and ctypes @@ -1327,7 +1326,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[float16]: ... @overload def __new__( @@ -1336,7 +1335,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[float32]: ... # float64 codes are covered by overload 1 @overload @@ -1346,7 +1345,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[longdouble]: ... # `complexfloating` string-based representations and ctypes @@ -1358,7 +1357,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[complex64]: ... @overload def __new__( @@ -1367,7 +1366,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[complex128]: ... @overload def __new__( @@ -1376,7 +1375,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[clongdouble]: ... else: @overload @@ -1386,7 +1385,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[complex64]: ... @overload def __new__( @@ -1395,7 +1394,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[complex128]: ... @overload def __new__( @@ -1404,7 +1403,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[clongdouble]: ... # Miscellaneous string-based representations and ctypes @@ -1415,7 +1414,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[timedelta64]: ... @overload def __new__( @@ -1424,7 +1423,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[datetime64]: ... # `StringDType` requires special treatment because it has no scalar type @@ -1435,7 +1434,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtypes.StringDType: ... # Combined char-codes and ctypes, analogous to the scalar-type hierarchy @@ -1446,7 +1445,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[unsignedinteger]: ... @overload def __new__( @@ -1455,7 +1454,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[signedinteger]: ... @overload def __new__( @@ -1464,7 +1463,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[integer]: ... @overload def __new__( @@ -1473,7 +1472,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[floating]: ... @overload def __new__( @@ -1482,7 +1481,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[complexfloating]: ... @overload def __new__( @@ -1491,12 +1490,12 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[inexact]: ... @overload def __new__( cls, - dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], + dtype: _CharacterCodes | type[bytes | str | ct.c_char], align: py_bool = False, copy: py_bool = False, *, @@ -1507,11 +1506,11 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: builtins.str, + dtype: str, align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype: ... # Catch-all overload for object-likes @@ -1527,15 +1526,15 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[object_ | Any]: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... + def __getitem__(self: dtype[void], key: list[str], /) -> dtype[void]: ... @overload - def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype: ... + def __getitem__(self: dtype[void], key: str | SupportsIndex, /) -> dtype: ... # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes @overload @@ -1591,7 +1590,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @property def kind(self) -> _DTypeKind: ... @property - def metadata(self) -> MappingProxyType[builtins.str, Any] | None: ... + def metadata(self) -> MappingProxyType[str, Any] | None: ... @property def name(self) -> LiteralString: ... @property @@ -1726,7 +1725,7 @@ class _ArrayOrScalarCommon: @property def __array_priority__(self) -> float: ... @property - def __array_struct__(self) -> CapsuleType: ... # builtins.PyCapsule + def __array_struct__(self) -> CapsuleType: ... def __array_namespace__(self, /, *, api_version: _ArrayAPIVersion | None = None) -> ModuleType: ... def __setstate__(self, state: tuple[ SupportsIndex, # version From 510fea4b61d9ee369f4f33bf5dfe750ca8bd418a Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 14 Dec 2025 00:31:16 +0100 Subject: [PATCH 1007/1018] TYP: ``linalg.diagonal``: shape-typing and transparent dtypes --- numpy/linalg/_linalg.pyi | 47 +++++++++++++++++++---- numpy/typing/tests/data/reveal/linalg.pyi | 14 +++++++ 2 files changed, 54 insertions(+), 7 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 38926e84d0a6..766c8f2468a5 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -5,9 +5,11 @@ from typing import ( Literal as L, NamedTuple, Never, + Protocol, SupportsIndex, SupportsInt, overload, + type_check_only, ) from typing_extensions import TypeVar @@ -29,6 +31,7 @@ from numpy._typing import ( ArrayLike, DTypeLike, NDArray, + _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, @@ -38,6 +41,7 @@ from numpy._typing import ( _ArrayLikeTD64_co, _ArrayLikeUInt_co, _NestedSequence, + _Shape, _ShapeLike, ) from numpy.linalg import LinAlgError @@ -113,6 +117,11 @@ _FloatingOrArrayT_co = TypeVar("_FloatingOrArrayT_co", bound=np.floating | NDArr _InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) _InexactOrArrayT_co = TypeVar("_InexactOrArrayT_co", bound=np.inexact | NDArray[np.inexact], default=Any, covariant=True) +# shape-typed variant of numpy._typing._SupportsArray +@type_check_only +class _SupportsArray[ShapeT: _Shape, DTypeT: np.dtype](Protocol): + def __array__(self, /) -> np.ndarray[ShapeT, DTypeT]: ... + ### fortran_int = np.intc @@ -654,13 +663,37 @@ def multi_dot( out: NDArray[Any] | None = None, ) -> Any: ... -# TODO: narrow return types -def diagonal( - x: ArrayLike, # >= 2D array - /, - *, - offset: SupportsIndex = 0, -) -> NDArray[Any]: ... +# +@overload # workaround for microsoft/pyright#10232 +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[_JustAnyShape, DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[_AnyShape, DTypeT]: ... +@overload # 2d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int], DTypeT]: ... +@overload # 3d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int, int], DTypeT]: ... +@overload # 4d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int, int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int, int, int], DTypeT]: ... +@overload # nd like ~bool +def diagonal(x: _NestedSequence[list[bool]], /, *, offset: SupportsIndex = 0) -> NDArray[np.bool]: ... +@overload # nd like ~int +def diagonal(x: _NestedSequence[list[int]], /, *, offset: SupportsIndex = 0) -> NDArray[np.int_]: ... +@overload # nd like ~float +def diagonal(x: _NestedSequence[list[float]], /, *, offset: SupportsIndex = 0) -> NDArray[np.float64]: ... +@overload # nd like ~complex +def diagonal(x: _NestedSequence[list[complex]], /, *, offset: SupportsIndex = 0) -> NDArray[np.complex128]: ... +@overload # nd like ~bytes +def diagonal(x: _NestedSequence[list[bytes]], /, *, offset: SupportsIndex = 0) -> NDArray[np.bytes_]: ... +@overload # nd like ~str +def diagonal(x: _NestedSequence[list[str]], /, *, offset: SupportsIndex = 0) -> NDArray[np.str_]: ... +@overload # fallback +def diagonal(x: ArrayLike, /, *, offset: SupportsIndex = 0) -> np.ndarray: ... # TODO: narrow return types def trace( diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index a40aa84d9c05..744f1edbaefa 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -10,6 +10,7 @@ from numpy.linalg._linalg import ( SVDResult, ) +bool_list_2d: list[list[bool]] int_list_2d: list[list[int]] float_list_1d: list[float] float_list_2d: list[list[float]] @@ -17,6 +18,8 @@ float_list_3d: list[list[list[float]]] float_list_4d: list[list[list[list[float]]]] complex_list_2d: list[list[complex]] complex_list_3d: list[list[list[complex]]] +bytes_list_2d: list[list[bytes]] +str_list_2d: list[list[str]] AR_any: np.ndarray AR_f_: npt.NDArray[np.floating] @@ -234,6 +237,17 @@ assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) +assert_type(np.linalg.diagonal(AR_any), np.ndarray) +assert_type(np.linalg.diagonal(AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.diagonal(AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.linalg.diagonal(AR_f8_2d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.linalg.diagonal(bool_list_2d), npt.NDArray[np.bool]) +assert_type(np.linalg.diagonal(int_list_2d), npt.NDArray[np.int_]) +assert_type(np.linalg.diagonal(float_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.diagonal(complex_list_2d), npt.NDArray[np.complex128]) +assert_type(np.linalg.diagonal(bytes_list_2d), npt.NDArray[np.bytes_]) +assert_type(np.linalg.diagonal(str_list_2d), npt.NDArray[np.str_]) + assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) From f8082890fb032b0a77d8dbc3f0a90f7956785210 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 14 Dec 2025 00:36:09 +0100 Subject: [PATCH 1008/1018] TYP: disable type-test on mypy due to a bug --- numpy/typing/tests/data/reveal/linalg.pyi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 744f1edbaefa..194c525871d5 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -237,7 +237,8 @@ assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) -assert_type(np.linalg.diagonal(AR_any), np.ndarray) +# Mypy incorrectly inferts `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.linalg.diagonal(AR_any), np.ndarray) # type: ignore[assert-type] assert_type(np.linalg.diagonal(AR_f4), npt.NDArray[np.float32]) assert_type(np.linalg.diagonal(AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) assert_type(np.linalg.diagonal(AR_f8_2d), np.ndarray[tuple[int], np.dtype[np.float64]]) From 29ad5a11a798150d03d801ccf9e6e75e499c246f Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 14 Dec 2025 14:29:00 +0100 Subject: [PATCH 1009/1018] TYP: ``linalg.trace``: shape-typing and transparent dtypes --- numpy/linalg/_linalg.pyi | 71 +++++++++++++++++++++-- numpy/typing/tests/data/reveal/linalg.pyi | 13 +++++ 2 files changed, 80 insertions(+), 4 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 766c8f2468a5..4e0d075dc181 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -40,6 +40,8 @@ from numpy._typing import ( _ArrayLikeObject_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _ComplexLike_co, + _DTypeLike, _NestedSequence, _Shape, _ShapeLike, @@ -82,6 +84,7 @@ __all__ = [ ] type _AtMost1D = tuple[()] | tuple[int] +type _AtLeast2D = tuple[int, int, *tuple[int, ...]] type _AtLeast3D = tuple[int, int, int, *tuple[int, ...]] type _AtLeast4D = tuple[int, int, int, int, *tuple[int, ...]] type _JustAnyShape = tuple[Never, ...] # workaround for microsoft/pyright#10232 @@ -91,6 +94,7 @@ type _tuple2[T] = tuple[T, T] type _inexact32 = np.float32 | np.complex64 type _to_float64 = np.float64 | np.integer | np.bool type _to_inexact64 = np.complex128 | _to_float64 +type _to_complex = np.number | np.bool type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] type _Array3ND[ScalarT: np.generic] = np.ndarray[_AtLeast3D, np.dtype[ScalarT]] @@ -695,14 +699,73 @@ def diagonal(x: _NestedSequence[list[str]], /, *, offset: SupportsIndex = 0) -> @overload # fallback def diagonal(x: ArrayLike, /, *, offset: SupportsIndex = 0) -> np.ndarray: ... -# TODO: narrow return types +# +@overload # workaround for microsoft/pyright#10232 def trace( - x: ArrayLike, # >= 2D array + x: _SupportsArray[_JustAnyShape, np.dtype[_to_complex]], /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None +) -> Any: ... +@overload # 2d known dtype, dtype=None +def trace[ScalarT: _to_complex]( + x: _SupportsArray[tuple[int, int], np.dtype[ScalarT]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> ScalarT: ... +@overload # 2d, dtype= +def trace[ScalarT: _to_complex]( + x: _SupportsArray[tuple[int, int], np.dtype[_to_complex]] | Sequence[Sequence[_ComplexLike_co]], /, *, offset: SupportsIndex = 0, - dtype: DTypeLike | None = None, -) -> Any: ... + dtype: _DTypeLike[ScalarT], +) -> ScalarT: ... +@overload # 2d bool +def trace(x: Sequence[Sequence[bool]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.bool: ... +@overload # 2d int +def trace(x: Sequence[list[int]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.int_: ... +@overload # 2d float +def trace(x: Sequence[list[float]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.float64: ... +@overload # 2d complex +def trace(x: Sequence[list[complex]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.complex128: ... +@overload # 3d known dtype, dtype=None +def trace[DTypeT: np.dtype[_to_complex]]( + x: _SupportsArray[tuple[int, int, int], DTypeT], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> np.ndarray[tuple[int], DTypeT]: ... +@overload # 3d, dtype= +def trace[ScalarT: _to_complex]( + x: _SupportsArray[tuple[int, int, int], np.dtype[_to_complex]] | Sequence[Sequence[Sequence[_ComplexLike_co]]], + /, + *, + offset: SupportsIndex = 0, + dtype: _DTypeLike[ScalarT], +) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... +@overload # 3d+ known dtype, dtype=None +def trace[DTypeT: np.dtype[_to_complex]]( + x: _SupportsArray[_AtLeast3D, DTypeT], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> np.ndarray[tuple[int, *tuple[Any, ...]], DTypeT]: ... +@overload # 3d+, dtype= +def trace[ScalarT: _to_complex]( + x: _SupportsArray[_AtLeast3D, np.dtype[_to_complex]] | _NestedSequence[Sequence[Sequence[_ComplexLike_co]]], + /, + *, + offset: SupportsIndex = 0, + dtype: _DTypeLike[ScalarT], +) -> np.ndarray[tuple[int, *tuple[Any, ...]], np.dtype[ScalarT]]: ... +@overload # 3d+ bool +def trace( + x: _NestedSequence[Sequence[Sequence[bool]]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> NDArray[np.bool]: ... +@overload # 3d+ int +def trace( + x: _NestedSequence[Sequence[list[int]]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> NDArray[np.int_]: ... +@overload # 3d+ float +def trace( + x: _NestedSequence[Sequence[list[float]]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> NDArray[np.float64]: ... +@overload # 3d+ complex +def trace( + x: _NestedSequence[Sequence[list[complex]]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> NDArray[np.complex128]: ... +@overload # fallback +def trace(x: _ArrayLikeComplex_co, /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Any: ... # TODO: narrow return types @overload diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 194c525871d5..e782b8e24281 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -249,6 +249,19 @@ assert_type(np.linalg.diagonal(complex_list_2d), npt.NDArray[np.complex128]) assert_type(np.linalg.diagonal(bytes_list_2d), npt.NDArray[np.bytes_]) assert_type(np.linalg.diagonal(str_list_2d), npt.NDArray[np.str_]) +assert_type(np.linalg.trace(AR_any), Any) +assert_type(np.linalg.trace(AR_f4), Any) +assert_type(np.linalg.trace(AR_f4_2d), np.float32) +assert_type(np.linalg.trace(AR_f8_2d), np.float64) +assert_type(np.linalg.trace(AR_f4_3d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.linalg.trace(AR_f8_3d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.linalg.trace(AR_f8_4d), np.ndarray[tuple[int, *tuple[Any, ...]], np.dtype[np.float64]]) +assert_type(np.linalg.trace(bool_list_2d), np.bool) +assert_type(np.linalg.trace(int_list_2d), np.int_) +assert_type(np.linalg.trace(float_list_2d), np.float64) +assert_type(np.linalg.trace(complex_list_2d), np.complex128) +assert_type(np.linalg.trace(float_list_3d), npt.NDArray[np.float64]) + assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) From b70fc2ef39e007ea6a5393a3dc12c2e1a46b189c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 14 Dec 2025 14:35:17 +0100 Subject: [PATCH 1010/1018] TYP: fix typo Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- numpy/typing/tests/data/reveal/linalg.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index e782b8e24281..81f8ec7c61d9 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -237,7 +237,7 @@ assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) -# Mypy incorrectly inferts `ndarray[Any, Any]`, but pyright behaves correctly. +# Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. assert_type(np.linalg.diagonal(AR_any), np.ndarray) # type: ignore[assert-type] assert_type(np.linalg.diagonal(AR_f4), npt.NDArray[np.float32]) assert_type(np.linalg.diagonal(AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) From ffd9fd28574028ab1fa323c596f9ed7eddf12da2 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Mon, 15 Dec 2025 01:02:24 +0530 Subject: [PATCH 1011/1018] fix more data races in mtrand.pyx (#30426) --- numpy/random/mtrand.pyx | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index ae1706eb884b..c69284d0df9a 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -222,12 +222,13 @@ cdef class RandomState: "be instantized.") self._bitgen = ( PyCapsule_GetPointer(capsule, name))[0] self._aug_state.bit_generator = &self._bitgen - self._reset_gauss() self.lock = bit_generator.lock + self._reset_gauss() cdef _reset_gauss(self): - self._aug_state.has_gauss = 0 - self._aug_state.gauss = 0.0 + with self.lock: + self._aug_state.has_gauss = 0 + self._aug_state.gauss = 0.0 def seed(self, seed=None): """ @@ -301,8 +302,9 @@ cdef class RandomState: 'MT19937 BitGenerator. To silence this warning, ' 'set `legacy` to False.', RuntimeWarning) legacy = False - st['has_gauss'] = self._aug_state.has_gauss - st['gauss'] = self._aug_state.gauss + with self.lock: + st['has_gauss'] = self._aug_state.has_gauss + st['gauss'] = self._aug_state.gauss if legacy and not isinstance(self._bit_generator, _MT19937): raise ValueError( "legacy can only be True when the underlying bitgenerator is " From 072a8ee00f87a216517accfc480d73168561c79d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Dec 2025 17:21:21 +0000 Subject: [PATCH 1012/1018] MAINT: Bump actions/upload-artifact from 5.0.0 to 6.0.0 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 5.0.0 to 6.0.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/330a01c490aca151604b8cf639adc76d48f6c5d4...b7c566a772e6b6bfb58ed0dc250532a479d7789f) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: 6.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/cygwin.yml | 2 +- .github/workflows/mypy_primer.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index e8f7083336e6..a8bef06a5f5c 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -67,7 +67,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 040a154d2895..b0b57cb93d7b 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -74,7 +74,7 @@ jobs: run: | echo ${{ github.event.pull_request.number }} | tee pr_number.txt - name: Upload mypy_primer diff + PR number - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 if: ${{ matrix.shard-index == 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -82,7 +82,7 @@ jobs: diff_${{ matrix.shard-index }}.txt pr_number.txt - name: Upload mypy_primer diff - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 if: ${{ matrix.shard-index != 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -96,7 +96,7 @@ jobs: contents: read steps: - name: Merge artifacts - uses: actions/upload-artifact/merge@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact/merge@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: mypy_primer_diffs pattern: mypy_primer_diffs-* diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 140d92d43e61..421790b4ae1e 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 436e929fe636..27ee88d80677 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -102,7 +102,7 @@ jobs: env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl From 908219cd6dd75c0650e97657a54f61a17428be8c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 15 Dec 2025 11:12:14 -0700 Subject: [PATCH 1013/1018] MAINT: Update msvc-allowed-warnings.txt Line numbers have changed in numpy/random/_generator.pyx --- .github/check-warnings/msvc-allowed-warnings.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/check-warnings/msvc-allowed-warnings.txt b/.github/check-warnings/msvc-allowed-warnings.txt index fa5597b18216..4cc6e6ab2124 100644 --- a/.github/check-warnings/msvc-allowed-warnings.txt +++ b/.github/check-warnings/msvc-allowed-warnings.txt @@ -15,7 +15,7 @@ C:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned D:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' -numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26290): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data -numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(38314): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data -numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26290): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data -numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(38314): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26329): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(38353): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26329): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(38353): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data From 3a84feb5731514fd8343543f24b171f581b3a5c6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Dec 2025 12:33:17 -0700 Subject: [PATCH 1014/1018] MAINT: Bump astral-sh/setup-uv from 7.1.5 to 7.1.6 (#30436) Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.1.5 to 7.1.6. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/ed21f2f24f8dd64503750218de024bcf64c7250a...681c641aba71e4a1c380be3ab5e12ad51f415867) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.1.6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mypy.yml | 2 +- .github/workflows/stubtest.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index ac87ccacd846..41f421ce3889 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -61,7 +61,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@ed21f2f24f8dd64503750218de024bcf64c7250a # v7.1.5 + - uses: astral-sh/setup-uv@681c641aba71e4a1c380be3ab5e12ad51f415867 # v7.1.6 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index c91954403b14..526471f799c7 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@ed21f2f24f8dd64503750218de024bcf64c7250a # v7.1.5 + - uses: astral-sh/setup-uv@681c641aba71e4a1c380be3ab5e12ad51f415867 # v7.1.6 with: python-version: ${{ matrix.py }} activate-environment: true From e2649ab111ffa634f2314ed37809e7361db5c1fb Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 16 Dec 2025 16:18:02 +0100 Subject: [PATCH 1015/1018] TYP: ``linalg.lstsq``: shape-typing and transparent dtypes (#30433) --- numpy/linalg/_linalg.pyi | 87 ++++++++++++++++------- numpy/typing/tests/data/reveal/linalg.pyi | 59 ++++++++++++++- 2 files changed, 116 insertions(+), 30 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 4e0d075dc181..c52e8e9b0f44 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -18,7 +18,6 @@ from numpy import ( complexfloating, float64, floating, - int32, object_, signedinteger, timedelta64, @@ -116,6 +115,13 @@ type _SideKind = L["L", "U", "l", "u"] type _NonNegInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] +type _LstSqResult[ShapeT: tuple[int, ...], InexactT: np.inexact, FloatingT: np.floating] = tuple[ + np.ndarray[ShapeT, np.dtype[InexactT]], # least-squares solution + np.ndarray[tuple[int], np.dtype[FloatingT]], # residuals + np.int32, # rank + np.ndarray[tuple[int], np.dtype[FloatingT]], # singular values +] + _FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) _FloatingOrArrayT_co = TypeVar("_FloatingOrArrayT_co", bound=np.floating | NDArray[np.floating], default=Any, covariant=True) _InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) @@ -527,34 +533,61 @@ def det(a: _Array3ND[np.complex128] | _NestedSequence[Sequence[list[complex]]]) @overload # fallback def det(a: _ArrayLikeComplex_co) -> Any: ... -# TODO: narrow return types -@overload -def lstsq( - a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = None -) -> tuple[ - NDArray[float64], - NDArray[float64], - int32, - NDArray[float64], -]: ... -@overload +# +@overload # +float64, ~float64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[_to_float64]] | Sequence[Sequence[float]], + b: _SupportsArray[ShapeT, np.dtype[np.floating | np.integer]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.float64, np.float64]: ... +@overload # ~float64, +float64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.floating | np.integer]] | Sequence[Sequence[float]], + b: _SupportsArray[ShapeT, np.dtype[_to_float64]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.float64, np.float64]: ... +@overload # +complex128, ~complex128, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.number]] | Sequence[Sequence[complex]], + b: _SupportsArray[ShapeT, np.dtype[np.complex128]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.complex128, np.float64]: ... +@overload # ~complex128, +complex128, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.complex128]] | Sequence[list[complex]], + b: _SupportsArray[ShapeT, np.dtype[np.number]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.complex128, np.float64]: ... +@overload # ~float32, ~float32, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.float32]], + b: _SupportsArray[ShapeT, np.dtype[np.float32]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.float32, np.float32]: ... +@overload # +complex64, ~complex64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.complex64 | np.float32]], + b: _SupportsArray[ShapeT, np.dtype[np.complex64]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.complex64, np.float32]: ... +@overload # ~complex64, +complex64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.complex64]], + b: _SupportsArray[ShapeT, np.dtype[np.complex64 | np.float32]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.complex64, np.float32]: ... +@overload # +float64, +float64, unknown shape def lstsq( - a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None -) -> tuple[ - NDArray[floating], - NDArray[floating], - int32, - NDArray[floating], -]: ... -@overload + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + rcond: float | None = None, +) -> _LstSqResult[_AnyShape, np.float64 | Any, np.float64 | Any]: ... +@overload # +complex128, +complex128, unknown shape def lstsq( - a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None -) -> tuple[ - NDArray[complexfloating], - NDArray[floating], - int32, - NDArray[floating], -]: ... + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + rcond: float | None = None, +) -> _LstSqResult[_AnyShape, np.complex128 | Any, np.float64 | Any]: ... # TODO: narrow return types @overload diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 81f8ec7c61d9..1e8098939d43 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -10,6 +10,9 @@ from numpy.linalg._linalg import ( SVDResult, ) +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] + bool_list_2d: list[list[bool]] int_list_2d: list[list[int]] float_list_1d: list[float] @@ -41,6 +44,7 @@ AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] AR_f8_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.float64]] +AR_f4_1d: np.ndarray[tuple[int], np.dtype[np.float32]] AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] AR_f4_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float32]] @@ -204,9 +208,58 @@ assert_type(np.linalg.det(AR_f8_3d), npt.NDArray[np.float64]) assert_type(np.linalg.det(complex_list_2d), np.complex128) assert_type(np.linalg.det(complex_list_3d), npt.NDArray[np.complex128]) -assert_type(np.linalg.lstsq(AR_i8, AR_i8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], np.int32, npt.NDArray[np.float64]]) -assert_type(np.linalg.lstsq(AR_i8, AR_f8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) -assert_type(np.linalg.lstsq(AR_f8, AR_c16), tuple[npt.NDArray[np.complexfloating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) +assert_type( + np.linalg.lstsq(AR_i8, AR_i8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4), + tuple[npt.NDArray[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_i8, AR_f8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_i8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f4), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_c8, AR_c8), + tuple[npt.NDArray[np.complex64], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_c8, AR_c16), + tuple[npt.NDArray[np.complex128], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_c16, AR_c8), + tuple[npt.NDArray[np.complex128], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f8_1d), + tuple[_Array1D[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4_1d), + tuple[_Array1D[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f8_2d), + tuple[_Array2D[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4_2d), + tuple[_Array2D[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) assert_type(np.linalg.norm(AR_i8), np.floating) assert_type(np.linalg.norm(AR_f8), np.floating) From 0c0d0970227c0e1c26b8279599b6958313688e1b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 16 Dec 2025 16:19:17 +0100 Subject: [PATCH 1016/1018] TYP: ``linalg.solve`` and ``tensorsolve``: improved return dtype specialization (#30434) --- numpy/linalg/_linalg.pyi | 75 ++++++++++++----------- numpy/typing/tests/data/reveal/linalg.pyi | 14 +++-- 2 files changed, 49 insertions(+), 40 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index c52e8e9b0f44..af876d911826 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -159,42 +159,45 @@ class SlogdetResult(NamedTuple, Generic[_FloatingOrArrayT_co, _InexactOrArrayT_c sign: _FloatingOrArrayT_co logabsdet: _InexactOrArrayT_co -# TODO: narrow return types -@overload -def tensorsolve( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - axes: Iterable[int] | None = None, -) -> NDArray[float64]: ... -@overload -def tensorsolve( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - axes: Iterable[int] | None = None, -) -> NDArray[floating]: ... -@overload -def tensorsolve( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - axes: Iterable[int] | None = None, -) -> NDArray[complexfloating]: ... - -# TODO: narrow return types -@overload -def solve( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, -) -> NDArray[float64]: ... -@overload -def solve( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, -) -> NDArray[floating]: ... -@overload -def solve( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, -) -> NDArray[complexfloating]: ... +# keep in sync with `solve` +@overload # ~float64, +float64 +def tensorsolve(a: _ToArrayF64, b: _ArrayLikeFloat_co, axes: Iterable[int] | None = None) -> NDArray[np.float64]: ... +@overload # +float64, ~float64 +def tensorsolve(a: _ArrayLikeFloat_co, b: _ToArrayF64, axes: Iterable[int] | None = None) -> NDArray[np.float64]: ... +@overload # ~float32, ~float32 +def tensorsolve(a: _ArrayLike[np.float32], b: _ArrayLike[np.float32], axes: Iterable[int] | None = None) -> NDArray[np.float32]: ... +@overload # +float, +float +def tensorsolve(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: Iterable[int] | None = None) -> NDArray[np.float64 | Any]: ... +@overload # ~complex128, +complex128 +def tensorsolve(a: _AsArrayC128, b: _ArrayLikeComplex_co, axes: Iterable[int] | None = None) -> NDArray[np.complex128]: ... +@overload # +complex128, ~complex128 +def tensorsolve(a: _ArrayLikeComplex_co, b: _AsArrayC128, axes: Iterable[int] | None = None) -> NDArray[np.complex128]: ... +@overload # ~complex64, +complex64 +def tensorsolve(a: _ArrayLike[np.complex64], b: _ArrayLike[np.complex64 | np.float32], axes: Iterable[int] | None = None) -> NDArray[np.complex64]: ... +@overload # +complex64, ~complex64 +def tensorsolve(a: _ArrayLike[np.complex64 | np.float32], b: _ArrayLike[np.complex64], axes: Iterable[int] | None = None) -> NDArray[np.complex64]: ... +@overload # +complex, +complex +def tensorsolve(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: Iterable[int] | None = None) -> NDArray[np.complex128 | Any]: ... + +# keep in sync with `tensorsolve` +@overload # ~float64, +float64 +def solve(a: _ToArrayF64, b: _ArrayLikeFloat_co) -> NDArray[np.float64]: ... +@overload # +float64, ~float64 +def solve(a: _ArrayLikeFloat_co, b: _ToArrayF64) -> NDArray[np.float64]: ... +@overload # ~float32, ~float32 +def solve(a: _ArrayLike[np.float32], b: _ArrayLike[np.float32]) -> NDArray[np.float32]: ... +@overload # +float, +float +def solve(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[np.float64 | Any]: ... +@overload # ~complex128, +complex128 +def solve(a: _AsArrayC128, b: _ArrayLikeComplex_co) -> NDArray[np.complex128]: ... +@overload # +complex128, ~complex128 +def solve(a: _ArrayLikeComplex_co, b: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # ~complex64, +complex64 +def solve(a: _ArrayLike[np.complex64], b: _ArrayLike[np.complex64 | np.float32]) -> NDArray[np.complex64]: ... +@overload # +complex64, ~complex64 +def solve(a: _ArrayLike[np.complex64 | np.float32], b: _ArrayLike[np.complex64]) -> NDArray[np.complex64]: ... +@overload # +complex, +complex +def solve(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[np.complex128 | Any]: ... # keep in sync with the other inverse functions and cholesky @overload # inexact32 diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 1e8098939d43..7bafc58789ff 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -51,12 +51,18 @@ AR_f4_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float32]] ### assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorsolve(AR_f4, AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complex128]) +assert_type(np.linalg.tensorsolve(AR_c8, AR_f4), npt.NDArray[np.complex64]) +assert_type(np.linalg.tensorsolve(AR_f4, AR_c8), npt.NDArray[np.complex64]) assert_type(np.linalg.solve(AR_i8, AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.solve(AR_f4, AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complex128]) +assert_type(np.linalg.solve(AR_c8, AR_f4), npt.NDArray[np.complex64]) +assert_type(np.linalg.solve(AR_f4, AR_c8), npt.NDArray[np.complex64]) assert_type(np.linalg.tensorinv(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.float64]) From f159cb3c2dc0c4713a46d7009b569451e30e279a Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 16 Dec 2025 16:21:28 +0100 Subject: [PATCH 1017/1018] MAINT: bump mypy from 1.19.0 to 1.19.1 (#30444) --- environment.yml | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 307dd4631012..774d6c0209ac 100644 --- a/environment.yml +++ b/environment.yml @@ -24,7 +24,7 @@ dependencies: - pytest-xdist - hypothesis # For type annotations - - mypy=1.19.0 + - mypy=1.19.1 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 3094d4eb8470..e3b17f0fc856 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -7,7 +7,7 @@ ninja; sys_platform != "emscripten" pytest-xdist pytest-timeout # For testing types -mypy==1.19.0 +mypy==1.19.1 # for optional f2py encoding detection charset-normalizer tzdata From e934bb97b7c6f07c61fde2e11631c63700014bda Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 16 Dec 2025 19:55:15 +0100 Subject: [PATCH 1018/1018] TYP: ``linalg.outer``: shape-typing and transparent dtypes (#30443) --- numpy/linalg/_linalg.pyi | 95 ++++++++++++++--------- numpy/typing/tests/data/reveal/linalg.pyi | 22 ++++-- 2 files changed, 71 insertions(+), 46 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index af876d911826..cd2653009e89 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -14,16 +14,7 @@ from typing import ( from typing_extensions import TypeVar import numpy as np -from numpy import ( - complexfloating, - float64, - floating, - object_, - signedinteger, - timedelta64, - unsignedinteger, - vecdot, -) +from numpy import complexfloating, floating, signedinteger, unsignedinteger, vecdot from numpy._core.fromnumeric import matrix_transpose from numpy._globals import _NoValue, _NoValueType from numpy._typing import ( @@ -91,13 +82,17 @@ type _JustAnyShape = tuple[Never, ...] # workaround for microsoft/pyright#10232 type _tuple2[T] = tuple[T, T] type _inexact32 = np.float32 | np.complex64 -type _to_float64 = np.float64 | np.integer | np.bool +type _to_integer = np.integer | np.bool +type _to_timedelta64 = np.timedelta64 | _to_integer +type _to_float64 = np.float64 | _to_integer type _to_inexact64 = np.complex128 | _to_float64 type _to_complex = np.number | np.bool type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] type _Array3ND[ScalarT: np.generic] = np.ndarray[_AtLeast3D, np.dtype[ScalarT]] +type _ToArray1D[ScalarT: np.generic] = _SupportsArray[tuple[int], np.dtype[ScalarT]] + # anything that safe-casts (from floating) into float64/complex128 type _ToArrayF64 = _ArrayLike[_to_float64] | _NestedSequence[float] type _ToArrayC128 = _ArrayLike[_to_inexact64] | _NestedSequence[complex] @@ -280,32 +275,6 @@ def matrix_power[ScalarT: _inexact32](a: _ArrayLike[ScalarT], n: SupportsIndex) @overload # fallback def matrix_power(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, n: SupportsIndex) -> np.ndarray: ... -# TODO: narrow return types -@overload -def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never], /) -> NDArray[Any]: ... -@overload -def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... -@overload -def outer[ScalarT: np.number](x1: _ArrayLike[ScalarT], x2: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... -@overload -def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... -@overload -def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... -@overload -def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... -@overload -def outer(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... -@overload -def outer(x1: _ArrayLikeTD64_co, x2: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... -@overload -def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co, /) -> NDArray[object_]: ... -@overload -def outer( - x1: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - x2: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - /, -) -> NDArray[Any]: ... - # NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values @overload # abstract `inexact` and `floating` (excluding concrete types) def eig(a: NDArray[np.inexact[Never]]) -> EigResult: ... @@ -670,7 +639,7 @@ def tensordot( /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[np.bool_]: ... +) -> NDArray[np.bool]: ... @overload def tensordot( a: _ArrayLikeInt_co, @@ -803,6 +772,56 @@ def trace( @overload # fallback def trace(x: _ArrayLikeComplex_co, /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Any: ... +# +@overload # workaround for microsoft/pyright#10232 +def outer(x1: _ToArray1D[Never], x2: _ToArray1D[Never], /) -> np.ndarray[tuple[int, int]]: ... +@overload # +bool, +bool +def outer( + x1: _ToArray1D[np.bool] | Sequence[bool], x2: _ToArray1D[np.bool] | Sequence[bool], / +) -> np.ndarray[tuple[int, int], np.dtype[np.bool]]: ... +@overload # ~int64, +int64 +def outer( + x1: _ToArray1D[np.int64] | list[int], x2: _ToArray1D[_to_integer] | Sequence[int], / +) -> np.ndarray[tuple[int, int], np.dtype[np.int64]]: ... +@overload # +int64, ~int64 +def outer( + x1: _ToArray1D[_to_integer] | Sequence[int], x2: _ToArray1D[np.int64] | list[int], / +) -> np.ndarray[tuple[int, int], np.dtype[np.int64]]: ... +@overload # ~timedelta64, +timedelta64 +def outer( + x1: _ToArray1D[np.timedelta64] | Sequence[np.timedelta64], x2: _ToArray1D[_to_timedelta64] | Sequence[_to_timedelta64], / +) -> np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]: ... +@overload # +timedelta64, ~timedelta64 +def outer( + x1: _ToArray1D[_to_timedelta64] | Sequence[_to_timedelta64], x2: _ToArray1D[np.timedelta64] | Sequence[np.timedelta64], / +) -> np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]: ... +@overload # ~float64, +float64 +def outer( + x1: _ToArray1D[np.float64] | list[float], x2: _ToArray1D[_to_float64] | Sequence[float], / +) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +@overload # +float64, ~float64 +def outer( + x1: _ToArray1D[_to_float64] | Sequence[float], x2: _ToArray1D[np.float64] | list[float], / +) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +@overload # ~complex128, +complex128 +def outer( + x1: _ToArray1D[np.complex128] | list[complex], x2: _ToArray1D[_to_complex] | Sequence[complex], / +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # +complex128, ~complex128 +def outer( + x1: _ToArray1D[_to_complex] | Sequence[complex], x2: _ToArray1D[np.complex128] | list[complex], / +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ~ScalarT, ~ScalarT +def outer[ScalarT: np.number | np.object_]( + x1: _ToArray1D[ScalarT] | Sequence[ScalarT], x2: _ToArray1D[ScalarT] | Sequence[ScalarT], / +) -> np.ndarray[tuple[int, int], np.dtype[ScalarT]]: ... +@overload # fallback +def outer( + x1: _ToArray1D[_to_complex] | Sequence[complex | _to_complex], + x2: _ToArray1D[_to_complex] | Sequence[complex | _to_complex], + /, +) -> np.ndarray[tuple[int, int]]: ... + # TODO: narrow return types @overload def cross( diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 7bafc58789ff..ede41f2862c2 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -13,12 +13,15 @@ from numpy.linalg._linalg import ( type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +bool_list_1d: list[bool] bool_list_2d: list[list[bool]] +int_list_1d: list[int] int_list_2d: list[list[int]] float_list_1d: list[float] float_list_2d: list[list[float]] float_list_3d: list[list[list[float]]] float_list_4d: list[list[list[list[float]]]] +complex_list_1d: list[complex] complex_list_2d: list[list[complex]] complex_list_3d: list[list[list[complex]]] bytes_list_2d: list[list[bytes]] @@ -86,14 +89,6 @@ assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.float64]) assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.float64]) -assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.outer(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) -# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. -assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) # type: ignore[assert-type] - assert_type(np.linalg.qr(AR_i8), QRResult[np.float64]) assert_type(np.linalg.qr(AR_i8, "r"), npt.NDArray[np.float64]) assert_type(np.linalg.qr(AR_i8, "raw"), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) @@ -321,6 +316,17 @@ assert_type(np.linalg.trace(float_list_2d), np.float64) assert_type(np.linalg.trace(complex_list_2d), np.complex128) assert_type(np.linalg.trace(float_list_3d), npt.NDArray[np.float64]) +assert_type(np.linalg.outer(bool_list_1d, bool_list_1d), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.linalg.outer(int_list_1d, int_list_1d), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.linalg.outer(float_list_1d, float_list_1d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.linalg.outer(complex_list_1d, complex_list_1d), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.linalg.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.linalg.outer(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.linalg.outer(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.linalg.outer(AR_b, AR_b), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.linalg.outer(AR_O, AR_O), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.linalg.outer(AR_i8, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) + assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating])